예제 #1
0
def GetUid(user, _getpwnam):
    """Retrieve the uid from the database.

  @type user: string
  @param user: The username to retrieve
  @return: The resolved uid

  """
    try:
        return _getpwnam(user).pw_uid
    except KeyError, err:
        raise errors.ConfigurationError("User '%s' not found (%s)" %
                                        (user, err))
예제 #2
0
파일: runtime.py 프로젝트: volans-/ganeti
def GetGid(group, _getgrnam):
    """Retrieve the gid from the database.

  @type group: string
  @param group: The group name to retrieve
  @return: The resolved gid

  """
    try:
        return _getgrnam(group).gr_gid
    except KeyError as err:
        raise errors.ConfigurationError("Group '%s' not found (%s)" %
                                        (group, err))
예제 #3
0
파일: ssconf.py 프로젝트: volans-/ganeti
    def _ReadFile(self, key, default=None):
        """Generic routine to read keys.

    This will read the file which holds the value requested. Errors
    will be changed into ConfigurationErrors.

    """
        filename = self.KeyToFilename(key)
        try:
            return ReadSsconfFile(filename)
        except EnvironmentError as err:
            if err.errno == errno.ENOENT and default is not None:
                return default
            raise errors.ConfigurationError("Can't read ssconf file %s: %s" %
                                            (filename, str(err)))
예제 #4
0
    def Generate(self, existing, generate_one_fn, ec_id):
        """Generate a new resource of this type

    """
        assert callable(generate_one_fn)

        all_elems = self.GetReserved()
        all_elems.update(existing)
        retries = 64
        while retries > 0:
            new_resource = generate_one_fn()
            if new_resource is not None and new_resource not in all_elems:
                break
        else:
            raise errors.ConfigurationError("Not able generate new resource"
                                            " (last tried: %s)" % new_resource)
        self.Reserve(ec_id, new_resource)
        return new_resource
 def __init__(self):
     raise errors.ConfigurationError("No entries")
예제 #6
0
  def CheckPrereq(self):
    """Check prerequisites.

    This checks that the instance is in the cluster.

    """
    (self.instance_uuid, self.instance_name) = \
      ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
                                self.instance_name)
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
    assert self.instance is not None
    cluster = self.cfg.GetClusterInfo()

    if (not self.cleanup and
        not self.instance.admin_state == constants.ADMINST_UP and
        not self.failover and self.fallback):
      self.lu.LogInfo("Instance is marked down or offline, fallback allowed,"
                      " switching to failover")
      self.failover = True

    disks = self.cfg.GetInstanceDisks(self.instance.uuid)

    if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED):
      if self.failover:
        text = "failovers"
      else:
        text = "migrations"
      invalid_disks = set(d.dev_type for d in disks
                             if d.dev_type not in constants.DTS_MIRRORED)
      raise errors.OpPrereqError("Instance's disk layout '%s' does not allow"
                                 " %s" % (utils.CommaJoin(invalid_disks), text),
                                 errors.ECODE_STATE)

    # TODO allow heterogeneous disk types if all are mirrored in some way.
    if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR):
      CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")

      if self.lu.op.iallocator:
        self._RunAllocator()
      else:
        # We set set self.target_node_uuid as it is required by
        # BuildHooksEnv
        self.target_node_uuid = self.lu.op.target_node_uuid

      # Check that the target node is correct in terms of instance policy
      nodeinfo = self.cfg.GetNodeInfo(self.target_node_uuid)
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                              group_info)
      CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
                             self.cfg, ignore=self.ignore_ipolicy)

      # self.target_node is already populated, either directly or by the
      # iallocator run
      target_node_uuid = self.target_node_uuid
      if self.target_node_uuid == self.instance.primary_node:
        raise errors.OpPrereqError(
          "Cannot migrate instance %s to its primary (%s)" %
          (self.instance.name,
           self.cfg.GetNodeName(self.instance.primary_node)),
          errors.ECODE_STATE)

      if len(self.lu.tasklets) == 1:
        # It is safe to release locks only when we're the only tasklet
        # in the LU
        ReleaseLocks(self.lu, locking.LEVEL_NODE,
                     keep=[self.instance.primary_node, self.target_node_uuid])

    elif utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR):
      templates = [d.dev_type for d in disks]
      secondary_node_uuids = \
        self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
      if not secondary_node_uuids:
        raise errors.ConfigurationError("No secondary node but using"
                                        " %s disk types" %
                                        utils.CommaJoin(set(templates)))
      self.target_node_uuid = target_node_uuid = secondary_node_uuids[0]
      if self.lu.op.iallocator or \
        (self.lu.op.target_node_uuid and
         self.lu.op.target_node_uuid != target_node_uuid):
        if self.failover:
          text = "failed over"
        else:
          text = "migrated"
        raise errors.OpPrereqError("Instances with disk types %s cannot"
                                   " be %s to arbitrary nodes"
                                   " (neither an iallocator nor a target"
                                   " node can be passed)" %
                                   (utils.CommaJoin(set(templates)), text),
                                   errors.ECODE_INVAL)
      nodeinfo = self.cfg.GetNodeInfo(target_node_uuid)
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
                                                              group_info)
      CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo,
                             self.cfg, ignore=self.ignore_ipolicy)

    else:
      raise errors.OpPrereqError("Instance mixes internal and external "
                                 "mirroring. This is not currently supported.")

    i_be = cluster.FillBE(self.instance)

    # check memory requirements on the secondary node
    if (not self.cleanup and
         (not self.failover or
           self.instance.admin_state == constants.ADMINST_UP)):
      self.tgt_free_mem = CheckNodeFreeMemory(
          self.lu, target_node_uuid,
          "migrating instance %s" % self.instance.name,
          i_be[constants.BE_MINMEM], self.instance.hypervisor,
          self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
    else:
      self.lu.LogInfo("Not checking memory on the secondary node as"
                      " instance will not be started")

    # check if failover must be forced instead of migration
    if (not self.cleanup and not self.failover and
        i_be[constants.BE_ALWAYS_FAILOVER]):
      self.lu.LogInfo("Instance configured to always failover; fallback"
                      " to failover")
      self.failover = True

    # check bridge existance
    CheckInstanceBridgesExist(self.lu, self.instance,
                              node_uuid=target_node_uuid)

    if not self.cleanup:
      CheckNodeNotDrained(self.lu, target_node_uuid)
      if not self.failover:
        result = self.rpc.call_instance_migratable(self.instance.primary_node,
                                                   self.instance)
        if result.fail_msg and self.fallback:
          self.lu.LogInfo("Can't migrate, instance offline, fallback to"
                          " failover")
          self.failover = True
        else:
          result.Raise("Can't migrate, please use failover",
                       prereq=True, ecode=errors.ECODE_STATE)

    assert not (self.failover and self.cleanup)

    if not self.failover:
      if self.lu.op.live is not None and self.lu.op.mode is not None:
        raise errors.OpPrereqError("Only one of the 'live' and 'mode'"
                                   " parameters are accepted",
                                   errors.ECODE_INVAL)
      if self.lu.op.live is not None:
        if self.lu.op.live:
          self.lu.op.mode = constants.HT_MIGRATION_LIVE
        else:
          self.lu.op.mode = constants.HT_MIGRATION_NONLIVE
        # reset the 'live' parameter to None so that repeated
        # invocations of CheckPrereq do not raise an exception
        self.lu.op.live = None
      elif self.lu.op.mode is None:
        # read the default value from the hypervisor
        i_hv = cluster.FillHV(self.instance, skip_globals=False)
        self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]

      self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
    else:
      # Failover is never live
      self.live = False

    if not (self.failover or self.cleanup):
      remote_info = self.rpc.call_instance_info(
          self.instance.primary_node, self.instance.name,
          self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
      remote_info.Raise("Error checking instance on node %s" %
                        self.cfg.GetNodeName(self.instance.primary_node),
                        prereq=True)
      instance_running = bool(remote_info.payload)
      if instance_running:
        self.current_mem = int(remote_info.payload["memory"])