コード例 #1
0
    def run(self, *args):
        global _used_luxi_calls
        assert _used_luxi_calls is None

        diff = (KNOWN_UNUSED_LUXI - luxi.REQ_ALL)
        assert not diff, "Non-existing LUXI calls listed as unused: %s" % diff

        _used_luxi_calls = set()
        try:
            # Run actual tests
            result = unittest.TextTestRunner.run(self, *args)

            diff = _used_luxi_calls & KNOWN_UNUSED_LUXI
            if diff:
                raise AssertionError(
                    "LUXI methods marked as unused were called: %s" %
                    utils.CommaJoin(diff))

            diff = (luxi.REQ_ALL - KNOWN_UNUSED_LUXI - _used_luxi_calls)
            if diff:
                raise AssertionError(
                    "The following LUXI methods were not used: %s" %
                    utils.CommaJoin(diff))
        finally:
            # Reset global variable
            _used_luxi_calls = None

        return result
コード例 #2
0
ファイル: iallocator.py プロジェクト: sajalcody/ganeti
    def ValidateResult(self, ia, result):
        """Validates the result of an relocation request.

    """
        IARequestBase.ValidateResult(self, ia, result)

        node2group = dict((name, ndata["group"])
                          for (name, ndata) in ia.in_data["nodes"].items())

        fn = compat.partial(self._NodesToGroups, node2group,
                            ia.in_data["nodegroups"])

        instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
        request_groups = fn(
            ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
            ia.cfg.GetNodeNames([instance.primary_node]))
        result_groups = fn(result +
                           ia.cfg.GetNodeNames([instance.primary_node]))

        if ia.success and not set(result_groups).issubset(request_groups):
            raise errors.ResultValidationError(
                "Groups of nodes returned by"
                " iallocator (%s) differ from original"
                " groups (%s)" % (utils.CommaJoin(result_groups),
                                  utils.CommaJoin(request_groups)))
コード例 #3
0
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
    """Checks if the owned node groups are still correct for an instance.

  @type cfg: L{config.ConfigWriter}
  @param cfg: The cluster configuration
  @type inst_uuid: string
  @param inst_uuid: Instance UUID
  @type owned_groups: set or frozenset
  @param owned_groups: List of currently owned node groups
  @type primary_only: boolean
  @param primary_only: Whether to check node groups for only the primary node

  """
    inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)

    if not owned_groups.issuperset(inst_groups):
        raise errors.OpPrereqError(
            "Instance %s's node groups changed since"
            " locks were acquired, current groups are"
            " are '%s', owning groups '%s'; retry the"
            " operation" %
            (cfg.GetInstanceName(inst_uuid), utils.CommaJoin(inst_groups),
             utils.CommaJoin(owned_groups)), errors.ECODE_STATE)

    return inst_groups
コード例 #4
0
 def test(self):
     self.assertEqual(utils.CommaJoin([]), "")
     self.assertEqual(utils.CommaJoin([1, 2, 3]), "1, 2, 3")
     self.assertEqual(utils.CommaJoin(["Hello"]), "Hello")
     self.assertEqual(utils.CommaJoin(["Hello", "World"]), "Hello, World")
     self.assertEqual(utils.CommaJoin(["Hello", "World", 99]),
                      "Hello, World, 99")
コード例 #5
0
ファイル: group.py プロジェクト: sajalcody/ganeti
    def CheckPrereq(self):
        """Check prerequisites.

    """
        assert self.needed_locks[locking.LEVEL_NODEGROUP]
        assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) == frozenset(
            self.op.node_uuids))

        expected_locks = (set([self.group_uuid]) |
                          self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
        actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
        if actual_locks != expected_locks:
            raise errors.OpExecError(
                "Nodes changed groups since locks were acquired,"
                " current groups are '%s', used to be '%s'" %
                (utils.CommaJoin(expected_locks),
                 utils.CommaJoin(actual_locks)))

        self.node_data = self.cfg.GetAllNodesInfo()
        self.group = self.cfg.GetNodeGroup(self.group_uuid)
        instance_data = self.cfg.GetAllInstancesInfo()

        if self.group is None:
            raise errors.OpExecError(
                "Could not retrieve group '%s' (UUID: %s)" %
                (self.op.group_name, self.group_uuid))

        (new_splits, previous_splits) = \
          self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
                                                 for uuid in self.op.node_uuids],
                                                self.node_data, instance_data)

        if new_splits:
            fmt_new_splits = utils.CommaJoin(
                utils.NiceSort(self.cfg.GetInstanceNames(new_splits)))

            if not self.op.force:
                raise errors.OpExecError(
                    "The following instances get split by this"
                    " change and --force was not given: %s" % fmt_new_splits)
            else:
                self.LogWarning(
                    "This operation will split the following instances: %s",
                    fmt_new_splits)

                if previous_splits:
                    self.LogWarning(
                        "In addition, these already-split instances continue"
                        " to be split across groups: %s",
                        utils.CommaJoin(
                            utils.NiceSort(
                                self.cfg.GetInstanceNames(previous_splits))))
コード例 #6
0
ファイル: docs_unittest.py プロジェクト: vanloswang/ganeti
    def _FindRapiMissing(self, handlers):
        used = frozenset(
            itertools.chain(*map(baserlib.GetResourceOpcodes, handlers)))

        unexpected = used & RAPI_OPCODE_EXCLUDE
        self.assertFalse(unexpected,
                         msg=("Found RAPI resources for excluded opcodes: %s" %
                              utils.CommaJoin(_GetOpIds(unexpected))))

        missing = (frozenset(opcodes.OP_MAPPING.values()) - used -
                   RAPI_OPCODE_EXCLUDE)
        self.assertFalse(missing,
                         msg=("Missing RAPI resources for opcodes: %s" %
                              utils.CommaJoin(_GetOpIds(missing))))
コード例 #7
0
  def _AssembleLocal(self, minor, backend, meta, size):
    """Configure the local part of a DRBD device.

    @type minor: int
    @param minor: the minor to assemble locally
    @type backend: string
    @param backend: path to the data device to use
    @type meta: string
    @param meta: path to the meta device to use
    @type size: int
    @param size: size in MiB

    """
    cmds = self._cmd_gen.GenLocalInitCmds(minor, backend, meta,
                                          size, self.params)

    for cmd in cmds:
      result = utils.RunCmd(cmd)
      if result.failed:
        base.ThrowError("drbd%d: can't attach local disk: %s",
                        minor, result.output)

    def _WaitForMinorSyncParams():
      """Call _SetMinorSyncParams and raise RetryAgain on errors.
      """
      if self._SetMinorSyncParams(minor, self.params):
        raise utils.RetryAgain()

    if self._NeedsLocalSyncerParams():
      # Retry because disk config for DRBD resource may be still uninitialized.
      try:
        utils.Retry(_WaitForMinorSyncParams, 1.0, 5.0)
      except utils.RetryTimeout as e:
        base.ThrowError("drbd%d: can't set the synchronization parameters: %s" %
                        (minor, utils.CommaJoin(e.args[0])))
コード例 #8
0
ファイル: instance.py プロジェクト: dimara/ganeti
    def Exec(self, feedback_fn):
        """Executes the opcode.

    """
        jobs = []
        if self.op.iallocator:
            op2inst = dict((op.instance_name, op) for op in self.op.instances)
            (allocatable, failed) = self.ia_result

            for (name, node_names) in allocatable:
                op = op2inst.pop(name)

                (op.pnode_uuid, op.pnode) = \
                  ExpandNodeUuidAndName(self.cfg, None, node_names[0])
                if len(node_names) > 1:
                    (op.snode_uuid, op.snode) = \
                      ExpandNodeUuidAndName(self.cfg, None, node_names[1])

                    jobs.append([op])

                missing = set(op2inst.keys()) - set(failed)
                assert not missing, \
                  "Iallocator did return incomplete result: %s" % \
                  utils.CommaJoin(missing)
        else:
            jobs.extend([op] for op in self.op.instances)

        return ResultWithJobs(jobs, **self._ConstructPartialResult())
コード例 #9
0
def CheckInstanceState(lu, instance, req_states, msg=None):
    """Ensure that an instance is in one of the required states.

  @param lu: the LU on behalf of which we make the check
  @param instance: the instance to check
  @param msg: if passed, should be a message to replace the default one
  @raise errors.OpPrereqError: if the instance is not in the required state

  """
    if msg is None:
        msg = ("can't use instance from outside %s states" %
               utils.CommaJoin(req_states))
    if instance.admin_state not in req_states:
        raise errors.OpPrereqError(
            "Instance '%s' is marked to be %s, %s" %
            (instance.name, instance.admin_state, msg), errors.ECODE_STATE)

    if constants.ADMINST_UP not in req_states:
        pnode_uuid = instance.primary_node
        if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
            all_hvparams = lu.cfg.GetClusterInfo().hvparams
            ins_l = lu.rpc.call_instance_list([pnode_uuid],
                                              [instance.hypervisor],
                                              all_hvparams)[pnode_uuid]
            ins_l.Raise("Can't contact node %s for instance information" %
                        lu.cfg.GetNodeName(pnode_uuid),
                        prereq=True,
                        ecode=errors.ECODE_ENVIRON)
            if instance.name in ins_l.payload:
                raise errors.OpPrereqError(
                    "Instance %s is running, %s" % (instance.name, msg),
                    errors.ECODE_STATE)
        else:
            lu.LogWarning("Primary node offline, ignoring check that instance"
                          " is down")
コード例 #10
0
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
    """Make sure that none of the given paramters is global.

  If a global parameter is found, an L{errors.OpPrereqError} exception is
  raised. This is used to avoid setting global parameters for individual nodes.

  @type params: dictionary
  @param params: Parameters to check
  @type glob_pars: dictionary
  @param glob_pars: Forbidden parameters
  @type kind: string
  @param kind: Kind of parameters (e.g. "node")
  @type bad_levels: string
  @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
      "instance")
  @type good_levels: strings
  @param good_levels: Level(s) at which the parameters are allowed (e.g.
      "cluster or group")

  """
    used_globals = glob_pars.intersection(params)
    if used_globals:
        msg = ("The following %s parameters are global and cannot"
               " be customized at %s level, please modify them at"
               " %s level: %s" %
               (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
        raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
コード例 #11
0
ファイル: group.py プロジェクト: sajalcody/ganeti
    def _CheckIpolicy(self, cluster, owned_instance_names):
        """Sanity checks for the ipolicy.

    @type cluster: C{objects.Cluster}
    @param cluster: the cluster's configuration
    @type owned_instance_names: list of string
    @param owned_instance_names: list of instances

    """
        if self.op.ipolicy:
            self.new_ipolicy = GetUpdatedIPolicy(self.group.ipolicy,
                                                 self.op.ipolicy,
                                                 group_policy=True)

            new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
            CheckIpolicyVsDiskTemplates(new_ipolicy,
                                        cluster.enabled_disk_templates)
            instances = \
              dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
            gmi = ganeti.masterd.instance
            violations = \
                ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
                                                                       self.group),
                                             new_ipolicy, instances.values(),
                                             self.cfg)

            if violations:
                self.LogWarning(
                    "After the ipolicy change the following instances"
                    " violate them: %s", utils.CommaJoin(violations))
コード例 #12
0
ファイル: ht.py プロジェクト: vladimir-ipatov/ganeti-1
def TStrictDict(require_all, exclusive, items):
    """Strict dictionary check with specific keys.

  @type require_all: boolean
  @param require_all: Whether all keys in L{items} are required
  @type exclusive: boolean
  @param exclusive: Whether only keys listed in L{items} should be accepted
  @type items: dictionary
  @param items: Mapping from key (string) to verification function

  """
    descparts = ["Dictionary containing"]

    if exclusive:
        descparts.append(" none but the")

    if require_all:
        descparts.append(" required")

    if len(items) == 1:
        descparts.append(" key ")
    else:
        descparts.append(" keys ")

    descparts.append(
        utils.CommaJoin("\"%s\" (value %s)" % (key, value)
                        for (key, value) in items.items()))

    desc = WithDesc("".join(descparts))

    return desc(
        TAnd(TDict,
             compat.partial(_TStrictDictCheck, require_all, exclusive, items)))
コード例 #13
0
ファイル: instance_utils.py プロジェクト: kawamuray/ganeti
def CheckTargetNodeIPolicy(lu,
                           ipolicy,
                           instance,
                           node,
                           cfg,
                           ignore=False,
                           _compute_fn=_ComputeIPolicyNodeViolation):
    """Checks that the target node is correct in terms of instance policy.

  @param ipolicy: The ipolicy to verify
  @param instance: The instance object to verify
  @param node: The new node to relocate
  @type cfg: L{config.ConfigWriter}
  @param cfg: Cluster configuration
  @param ignore: Ignore violations of the ipolicy
  @param _compute_fn: The function to verify ipolicy (unittest only)
  @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}

  """
    primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
    res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)

    if res:
        msg = ("Instance does not meet target node group's (%s) instance"
               " policy: %s") % (node.group, utils.CommaJoin(res))
        if ignore:
            lu.LogWarning(msg)
        else:
            raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
コード例 #14
0
ファイル: drbd.py プロジェクト: vladimir-ipatov/ganeti-2.15.2
  def _AssembleLocal(self, minor, backend, meta, size):
    """Configure the local part of a DRBD device.

    @type minor: int
    @param minor: the minor to assemble locally
    @type backend: string
    @param backend: path to the data device to use
    @type meta: string
    @param meta: path to the meta device to use
    @type size: int
    @param size: size in MiB

    """
    cmds = self._cmd_gen.GenLocalInitCmds(minor, backend, meta,
                                          size, self.params)

    for cmd in cmds:
      result = utils.RunCmd(cmd)
      if result.failed:
        base.ThrowError("drbd%d: can't attach local disk: %s",
                        minor, result.output)
   
    # syncer init only for drbd => 8.4 - in 8.4 it must be set after local, not in net
    info = DRBD8.GetProcInfo()
    version = info.GetVersion()
    if version["k_minor"] >= 4:
      sync_errors = self._SetMinorSyncParams(minor, self.params)
      # try second time because disk config for drbd resource may be still uninitialized
      if sync_errors:
        time.sleep(1)
        sync_errors = self._SetMinorSyncParams(minor, self.params)
        if sync_errors:
          base.ThrowError("drbd%d: can't set the synchronization parameters: %s" %
                          (minor, utils.CommaJoin(sync_errors)))
コード例 #15
0
def ListNetworks(opts, args):
    """List Ip pools and their properties.

  @param opts: the command line options selected by the user
  @type args: list
  @param args: networks to list, or empty for all
  @rtype: int
  @return: the desired exit code

  """
    desired_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
    fmtoverride = {
        "group_list":
        (lambda data: utils.CommaJoin("%s (%s, %s, %s)" %
                                      (name, mode, link, vlan)
                                      for (name, mode, link, vlan) in data),
         False),
        "inst_list": (",".join, False),
        "tags": (",".join, False),
    }

    cl = GetClient()
    return GenericList(constants.QR_NETWORK,
                       desired_fields,
                       args,
                       None,
                       opts.separator,
                       not opts.no_headers,
                       verbose=opts.verbose,
                       format_override=fmtoverride,
                       cl=cl)
コード例 #16
0
def _NetworkConflictCheck(lu, check_fn, action, instances):
  """Checks for network interface conflicts with a network.

  @type lu: L{LogicalUnit}
  @type check_fn: callable receiving one parameter (L{objects.NIC}) and
    returning boolean
  @param check_fn: Function checking for conflict
  @type action: string
  @param action: Part of error message (see code)
  @param instances: the instances to check
  @type instances: list of instance objects
  @raise errors.OpPrereqError: If conflicting IP addresses are found.

  """
  conflicts = []

  for instance in instances:
    instconflicts = [(idx, nic.ip)
                     for (idx, nic) in enumerate(instance.nics)
                     if check_fn(nic)]

    if instconflicts:
      conflicts.append((instance.name, instconflicts))

  if conflicts:
    lu.LogWarning("IP addresses from network '%s', which is about to %s"
                  " node group '%s', are in use: %s" %
                  (lu.network_name, action, lu.group.name,
                   utils.CommaJoin(("%s: %s" %
                                    (name, _FmtNetworkConflict(details)))
                                   for (name, details) in conflicts)))

    raise errors.OpPrereqError("Conflicting IP addresses found; "
                               " remove/modify the corresponding network"
                               " interfaces", errors.ECODE_STATE)
コード例 #17
0
  def _CheckPayload(self, result):
    """Checks if the payload is valid.

    @param result: RPC result
    @raises errors.OpExecError: If payload is not valid

    """
    errs = []
    if self.op.command == constants.OOB_HEALTH:
      if not isinstance(result.payload, list):
        errs.append("command 'health' is expected to return a list but got %s" %
                    type(result.payload))
      else:
        for item, status in result.payload:
          if status not in constants.OOB_STATUSES:
            errs.append("health item '%s' has invalid status '%s'" %
                        (item, status))

    if self.op.command == constants.OOB_POWER_STATUS:
      if not isinstance(result.payload, dict):
        errs.append("power-status is expected to return a dict but got %s" %
                    type(result.payload))

    if self.op.command in [
      constants.OOB_POWER_ON,
      constants.OOB_POWER_OFF,
      constants.OOB_POWER_CYCLE,
      ]:
      if result.payload is not None:
        errs.append("%s is expected to not return payload but got '%s'" %
                    (self.op.command, result.payload))

    if errs:
      raise errors.OpExecError("Check of out-of-band payload failed due to %s" %
                               utils.CommaJoin(errs))
コード例 #18
0
ファイル: masterd.py プロジェクト: ribag/ganeti-experiments
def _SetWatcherPause(context, ec_id, until):
    """Creates or removes the watcher pause file.

  @type context: L{GanetiContext}
  @param context: Global Ganeti context
  @type until: None or int
  @param until: Unix timestamp saying until when the watcher shouldn't run

  """
    node_names = context.GetConfig(ec_id).GetNodeList()

    if until is None:
        logging.info("Received request to no longer pause watcher")
    else:
        if not ht.TNumber(until):
            raise TypeError("Duration must be numeric")

        if until < time.time():
            raise errors.GenericError(
                "Unable to set pause end time in the past")

        logging.info("Received request to pause watcher until %s", until)

    result = context.rpc.call_set_watcher_pause(node_names, until)

    errmsg = utils.CommaJoin("%s (%s)" % (node_name, nres.fail_msg)
                             for (node_name, nres) in result.items()
                             if nres.fail_msg and not nres.offline)
    if errmsg:
        raise errors.OpExecError(
            "Watcher pause was set where possible, but failed"
            " on the following node(s): %s" % errmsg)

    return until
コード例 #19
0
def MigrateNode(opts, args):
    """Migrate all primary instance on a node.

  """
    cl = GetClient()
    force = opts.force
    selected_fields = ["name", "pinst_list"]

    qcl = GetClient()
    result = qcl.QueryNodes(names=args,
                            fields=selected_fields,
                            use_locking=False)
    qcl.Close()
    ((node, pinst), ) = result

    if not pinst:
        ToStdout("No primary instances on node %s, exiting." % node)
        return 0

    pinst = utils.NiceSort(pinst)

    if not (force or AskUser("Migrate instance(s) %s?" %
                             utils.CommaJoin(utils.NiceSort(pinst)))):
        return constants.EXIT_CONFIRMATION

    # this should be removed once --non-live is deprecated
    if not opts.live and opts.migration_mode is not None:
        raise errors.OpPrereqError(
            "Only one of the --non-live and "
            "--migration-mode options can be passed", errors.ECODE_INVAL)
    if not opts.live:  # --non-live passed
        mode = constants.HT_MIGRATION_NONLIVE
    else:
        mode = opts.migration_mode

    op = opcodes.OpNodeMigrate(node_name=args[0],
                               mode=mode,
                               iallocator=opts.iallocator,
                               target_node=opts.dst_node,
                               allow_runtime_changes=opts.allow_runtime_chgs,
                               ignore_ipolicy=opts.ignore_ipolicy)

    result = SubmitOrSend(op, opts, cl=cl)

    # Keep track of submitted jobs
    jex = JobExecutor(cl=cl, opts=opts)

    for (status, job_id) in result[constants.JOB_IDS_KEY]:
        jex.AddJobId(None, status, job_id)

    results = jex.GetResults()
    bad_cnt = len([row for row in results if not row[0]])
    if bad_cnt == 0:
        ToStdout("All instances migrated successfully.")
        rcode = constants.EXIT_SUCCESS
    else:
        ToStdout("There were %s errors during the node migration.", bad_cnt)
        rcode = constants.EXIT_FAILURE

    return rcode
コード例 #20
0
ファイル: group.py プロジェクト: sajalcody/ganeti
    def ExpandNames(self):
        # This raises errors.OpPrereqError on its own:
        self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)

        if self.op.target_groups:
            self.req_target_uuids = map(self.cfg.LookupNodeGroup,
                                        self.op.target_groups)
        else:
            self.req_target_uuids = []

        if self.group_uuid in self.req_target_uuids:
            raise errors.OpPrereqError(
                "Group to be evacuated (%s) can not be used"
                " as a target group (targets are %s)" %
                (self.group_uuid, utils.CommaJoin(self.req_target_uuids)),
                errors.ECODE_INVAL)

        self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)

        self.share_locks = ShareAll()
        self.needed_locks = {
            locking.LEVEL_INSTANCE: [],
            locking.LEVEL_NODEGROUP: [],
            locking.LEVEL_NODE: [],
        }
コード例 #21
0
ファイル: group.py プロジェクト: sajalcody/ganeti
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the given group name exists as a node group, that is
    empty (i.e., contains no nodes), and that is not the last group of the
    cluster.

    """
        # Verify that the group is empty.
        group_nodes = [
            node.uuid for node in self.cfg.GetAllNodesInfo().values()
            if node.group == self.group_uuid
        ]

        if group_nodes:
            raise errors.OpPrereqError(
                "Group '%s' not empty, has the following"
                " nodes: %s" % (self.op.group_name,
                                utils.CommaJoin(utils.NiceSort(group_nodes))),
                errors.ECODE_STATE)

        # Verify the cluster would not be left group-less.
        if len(self.cfg.GetNodeGroupList()) == 1:
            raise errors.OpPrereqError(
                "Group '%s' is the only group, cannot be"
                " removed" % self.op.group_name, errors.ECODE_STATE)
コード例 #22
0
def CheckNodePVs(nresult, exclusive_storage):
    """Check node PVs.

  """
    pvlist_dict = nresult.get(constants.NV_PVLIST, None)
    if pvlist_dict is None:
        return (["Can't get PV list from node"], None)
    pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
    errlist = []
    # check that ':' is not present in PV names, since it's a
    # special character for lvcreate (denotes the range of PEs to
    # use on the PV)
    for pv in pvlist:
        if ":" in pv.name:
            errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
                           (pv.name, pv.vg_name))
    es_pvinfo = None
    if exclusive_storage:
        (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
        errlist.extend(errmsgs)
        shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
        if shared_pvs:
            for (pvname, lvlist) in shared_pvs:
                # TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
                errlist.append("PV %s is shared among unrelated LVs (%s)" %
                               (pvname, utils.CommaJoin(lvlist)))
    return (errlist, es_pvinfo)
コード例 #23
0
ファイル: ht.py プロジェクト: vladimir-ipatov/ganeti-1
def TElemOf(target_list):
    """Builds a function that checks if a given value is a member of a list.

  """
    def fn(val):
        return val in target_list

    return WithDesc("OneOf %s" % (utils.CommaJoin(target_list), ))(fn)
コード例 #24
0
def _NodeEvacDest(use_nodes, group, node_names):
    """Returns group or nodes depending on caller's choice.

  """
    if use_nodes:
        return utils.CommaJoin(node_names)
    else:
        return group
コード例 #25
0
 def test(self):
   wrong = \
     set(itertools.ifilterfalse(self._BUILTIN_NAME_RE.match,
           itertools.ifilterfalse(self._VALID_NAME_RE.match,
                                  dir(constants))))
   wrong -= self._EXCEPTIONS
   self.assertFalse(wrong,
                    msg=("Invalid names exported from constants module: %s" %
                         utils.CommaJoin(sorted(wrong))))
コード例 #26
0
def ComputeIPolicySpecViolation(ipolicy,
                                mem_size,
                                cpu_count,
                                disk_count,
                                nic_count,
                                disk_sizes,
                                spindle_use,
                                disk_template,
                                _compute_fn=_ComputeMinMaxSpec):
    """Verifies ipolicy against provided specs.

  @type ipolicy: dict
  @param ipolicy: The ipolicy
  @type mem_size: int
  @param mem_size: The memory size
  @type cpu_count: int
  @param cpu_count: Used cpu cores
  @type disk_count: int
  @param disk_count: Number of disks used
  @type nic_count: int
  @param nic_count: Number of nics used
  @type disk_sizes: list of ints
  @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
  @type spindle_use: int
  @param spindle_use: The number of spindles this instance uses
  @type disk_template: string
  @param disk_template: The disk template of the instance
  @param _compute_fn: The compute function (unittest only)
  @return: A list of violations, or an empty list of no violations are found

  """
    assert disk_count == len(disk_sizes)

    test_settings = [
        (constants.ISPEC_MEM_SIZE, "", mem_size),
        (constants.ISPEC_CPU_COUNT, "", cpu_count),
        (constants.ISPEC_NIC_COUNT, "", nic_count),
        (constants.ISPEC_SPINDLE_USE, "", spindle_use),
    ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
         for idx, d in enumerate(disk_sizes)]
    if disk_template != constants.DT_DISKLESS:
        # This check doesn't make sense for diskless instances
        test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
    ret = []
    allowed_dts = ipolicy[constants.IPOLICY_DTS]
    if disk_template not in allowed_dts:
        ret.append("Disk template %s is not allowed (allowed templates: %s)" %
                   (disk_template, utils.CommaJoin(allowed_dts)))

    min_errs = None
    for minmax in ipolicy[constants.ISPECS_MINMAX]:
        errs = filter(None, (_compute_fn(name, qualifier, minmax, value)
                             for (name, qualifier, value) in test_settings))
        if min_errs is None or len(errs) < len(min_errs):
            min_errs = errs
    assert min_errs is not None
    return ret + min_errs
コード例 #27
0
def check_console_parameters(hvparams):
    if hvparams[constants.HV_SERIAL_CONSOLE]:
      serial_speed = hvparams[constants.HV_SERIAL_SPEED]
      valid_speeds = constants.VALID_SERIAL_SPEEDS
      if not serial_speed or serial_speed not in valid_speeds:
        raise errors.HypervisorError("Invalid serial console speed, must be"
                                     " one of: %s" %
                                     utils.CommaJoin(valid_speeds))
    return True
コード例 #28
0
ファイル: nodemaint.py プロジェクト: volans-/ganeti
    def ShutdownDRBD(drbd_running):
        """Shutdown active DRBD devices.

    """
        if drbd_running:
            logging.info(
                "Following DRBD minors should not be active,"
                " shutting them down: %s", utils.CommaJoin(drbd_running))
            for minor in drbd_running:
                drbd.DRBD8.ShutdownAll(minor)
コード例 #29
0
ファイル: __init__.py プロジェクト: volans-/ganeti
def _VerifyDisks(cl, uuid, nodes, instances):
    """Run a per-group "gnt-cluster verify-disks".

  """
    op = opcodes.OpGroupVerifyDisks(group_name=uuid,
                                    priority=constants.OP_PRIO_LOW)
    op.reason = [(constants.OPCODE_REASON_SRC_WATCHER,
                  "Verifying disks of group %s" % uuid, utils.EpochNano())]
    job_id = cl.SubmitJob([op])
    ((_, offline_disk_instances, _), ) = \
      cli.PollJob(job_id, cl=cl, feedback_fn=logging.debug)
    try:
        cl.ArchiveJob(job_id)
    except Exception as err:
        logging.exception("Error while archiving job %d" % job_id)

    if not offline_disk_instances:
        # nothing to do
        logging.debug("Verify-disks reported no offline disks, nothing to do")
        return

    logging.debug("Will activate disks for instance(s) %s",
                  utils.CommaJoin(offline_disk_instances))

    # We submit only one job, and wait for it. Not optimal, but this puts less
    # load on the job queue.
    job = []
    for name in offline_disk_instances:
        try:
            inst = instances[name]
        except KeyError:
            logging.info("Can't find instance '%s', maybe it was ignored",
                         name)
            continue

        if inst.status in HELPLESS_STATES or _CheckForOfflineNodes(
                nodes, inst):
            logging.info(
                "Skipping instance '%s' because it is in a helpless state"
                " or has offline secondaries", name)
            continue

        op = opcodes.OpInstanceActivateDisks(instance_name=name)
        op.reason = [(constants.OPCODE_REASON_SRC_WATCHER,
                      "Activating disks for instance %s" % name,
                      utils.EpochNano())]
        job.append(op)

    if job:
        job_id = cli.SendJob(job, cl=cl)

        try:
            cli.PollJob(job_id, cl=cl, feedback_fn=logging.debug)
        except Exception:  # pylint: disable=W0703
            logging.exception("Error while activating disks")
コード例 #30
0
ファイル: docs_unittest.py プロジェクト: vanloswang/ganeti
    def _CheckManpage(self, script, mantext, commands):
        missing = []

        for cmd in commands:
            pattern = r"^(\| )?\*\*%s\*\*" % re.escape(cmd)
            if not re.findall(pattern, mantext, re.DOTALL | re.MULTILINE):
                missing.append(cmd)

        self.failIf(missing,
                    msg=("Manpage for '%s' missing documentation for %s" %
                         (script, utils.CommaJoin(missing))))