Пример #1
0
def _GetGroupData(qcl, uuid):
    """Retrieves instances and nodes per node group.

  """
    queries = [
        (constants.QR_INSTANCE, [
            "name", "status", "disks_active", "snodes", "pnode.group.uuid",
            "snodes.group.uuid"
        ], [qlang.OP_EQUAL, "pnode.group.uuid", uuid]),
        (constants.QR_NODE, ["name", "bootid",
                             "offline"], [qlang.OP_EQUAL, "group.uuid", uuid]),
    ]

    results = []
    for what, fields, qfilter in queries:
        results.append(qcl.Query(what, fields, qfilter))

    results_data = map(operator.attrgetter("data"), results)

    # Ensure results are tuples with two values
    assert compat.all(
        map(ht.TListOf(ht.TListOf(ht.TIsLength(2))), results_data))

    # Extract values ignoring result status
    (raw_instances, raw_nodes) = [[map(compat.snd, values) for values in res]
                                  for res in results_data]

    secondaries = {}
    instances = []

    # Load all instances
    for (name, status, disks_active, snodes, pnode_group_uuid,
         snodes_group_uuid) in raw_instances:
        if snodes and set([pnode_group_uuid]) != set(snodes_group_uuid):
            logging.error(
                "Ignoring split instance '%s', primary group %s, secondary"
                " groups %s", name, pnode_group_uuid,
                utils.CommaJoin(snodes_group_uuid))
        else:
            instances.append(Instance(name, status, disks_active, snodes))

            for node in snodes:
                secondaries.setdefault(node, set()).add(name)

    # Load all nodes
    nodes = [
        Node(name, bootid, offline, secondaries.get(name, set()))
        for (name, bootid, offline) in raw_nodes
    ]

    return (dict((node.name, node) for node in nodes),
            dict((inst.name, inst) for inst in instances))
Пример #2
0
class _OpTestVerifyErrors(opcodes.OpCode):
  OP_PARAMS = [
    ("debug_simulate_errors", False, ht.TBool, ""),
    ("error_codes", False, ht.TBool, ""),
    ("ignore_errors",
     [],
     ht.TListOf(ht.TElemOf(constants.CV_ALL_ECODES_STRINGS)),
     "")
    ]
Пример #3
0
 def testListOf(self):
     fn = ht.TListOf(ht.TNonEmptyString)
     self.assertTrue(fn([]))
     self.assertTrue(fn(["x"]))
     self.assertTrue(fn(["Hello", "World"]))
     self.assertFalse(fn(None))
     self.assertFalse(fn(False))
     self.assertFalse(fn(range(3)))
     self.assertFalse(fn(["x", None]))
    def testVerificationFails(self):
        self.assertRaises(errors.ParseError, serializer.LoadAndVerifyJson,
                          "{}", lambda _: False)

        verify_fn = ht.TListOf(ht.TNonEmptyString)
        try:
            serializer.LoadAndVerifyJson("{}", verify_fn)
        except errors.ParseError, err:
            self.assertTrue(str(err).endswith(str(verify_fn)))
Пример #5
0
def BuildJobDepCheck(relative):
    """Builds check for job dependencies (L{DEPEND_ATTR}).

  @type relative: bool
  @param relative: Whether to accept relative job IDs (negative)
  @rtype: callable

  """
    if relative:
        job_id = ht.TOr(ht.TJobId, ht.TRelativeJobId)
    else:
        job_id = ht.TJobId

    job_dep = \
      ht.TAnd(ht.TOr(ht.TListOf(ht.TAny), ht.TTuple),
              ht.TIsLength(2),
              ht.TItems([job_id,
                         ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))]))

    return ht.TMaybe(ht.TListOf(job_dep))
Пример #6
0
class IAReqMultiInstanceAlloc(IARequestBase):
    """An multi instance allocation request.

  """
    # pylint: disable=E1101
    MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC
    REQ_PARAMS = [
        ("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))),
    ]
    _MASUCCESS = \
      ht.TListOf(ht.TAnd(ht.TIsLength(2),
                         ht.TItems([ht.TNonEmptyString,
                                    ht.TListOf(ht.TNonEmptyString),
                                    ])))
    _MAFAILED = ht.TListOf(ht.TNonEmptyString)
    REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2),
                         ht.TItems([_MASUCCESS, _MAFAILED]))

    def GetRequest(self, cfg):
        return {
            "instances": [iareq.GetRequest(cfg) for iareq in self.instances],
        }
Пример #7
0
    def GetPostOpInput(self):
        """Replaces disks on an instance.

    """
        static = {
            "instance_name": self.items[0],
        }

        if self.request_body:
            data = self.request_body
        elif self.queryargs:
            # Legacy interface, do not modify/extend
            data = {
                "remote_node":
                self._checkStringVariable("remote_node", default=None),
                "mode":
                self._checkStringVariable("mode", default=None),
                "disks":
                self._checkStringVariable("disks", default=None),
                "iallocator":
                self._checkStringVariable("iallocator", default=None),
            }
        else:
            data = {}

        # Parse disks
        try:
            raw_disks = data.pop("disks")
        except KeyError:
            pass
        else:
            if raw_disks:
                if ht.TListOf(ht.TInt)(raw_disks):  # pylint: disable=E1102
                    data["disks"] = raw_disks
                else:
                    # Backwards compatibility for strings of the format "1, 2, 3"
                    try:
                        data["disks"] = [
                            int(part) for part in raw_disks.split(",")
                        ]
                    except (TypeError, ValueError), err:
                        raise http.HttpBadRequest(
                            "Invalid disk index passed: %s" % err)
Пример #8
0
"""

from ganeti import constants
from ganeti import errors
from ganeti import ht

_FOURCC_LEN = 4

#: Items in the individual rows of the NodeDrbd query
_HTNodeDrbdItems = [
    ht.TString, ht.TInt, ht.TString, ht.TString, ht.TString, ht.TString
]
#: Type for the (top-level) result of NodeDrbd query
HTNodeDrbd = ht.TListOf(
    ht.TAnd(ht.TList, ht.TIsLength(len(_HTNodeDrbdItems)),
            ht.TItems(_HTNodeDrbdItems)))


def PackMagic(payload):
    """Prepend the confd magic fourcc to a payload.

  """
    return "".join([constants.CONFD_MAGIC_FOURCC, payload])


def UnpackMagic(payload):
    """Unpack and check the confd magic fourcc from a payload.

  """
    if len(payload) < _FOURCC_LEN:
Пример #9
0
  # TODO: Test dependencies on errors
  jobs = [
    [opcodes.OpTestDelay(duration=1)],
    [opcodes.OpTestDelay(duration=1,
                         depends=[(-1, [])])],
    [opcodes.OpTestDelay(duration=1,
                         depends=[(-2, [constants.JOB_STATUS_SUCCESS])])],
    [opcodes.OpTestDelay(duration=1,
                         depends=[])],
    [opcodes.OpTestDelay(duration=1,
                         depends=[(-2, [constants.JOB_STATUS_SUCCESS])])],
    ]

  # Function for checking result
  check_fn = ht.TListOf(ht.TAnd(ht.TIsLength(2),
                                ht.TItems([ht.TBool,
                                           ht.TOr(ht.TNonEmptyString,
                                                  ht.TJobId)])))

  cl = cli.GetClient()
  result = cl.SubmitManyJobs(jobs)
  if not check_fn(result):
    raise errors.OpExecError("Job submission doesn't match %s: %s" %
                             (check_fn, result))

  # Wait for jobs to finish
  jex = JobExecutor(cl=cl, opts=opts)

  for (status, job_id) in result:
    jex.AddJobId(None, status, job_id)

  job_results = jex.GetResults()
Пример #10
0
from ganeti import utils
from ganeti import ht
from ganeti import ssh
from ganeti import pathutils
from ganeti.tools import common

_DATA_CHECK = ht.TStrictDict(
    False, True, {
        constants.SSHS_CLUSTER_NAME:
        ht.TNonEmptyString,
        constants.SSHS_NODE_DAEMON_CERTIFICATE:
        ht.TNonEmptyString,
        constants.SSHS_SSH_PUBLIC_KEYS:
        ht.TItems([
            ht.TElemOf(constants.SSHS_ACTIONS),
            ht.TDictOf(ht.TNonEmptyString, ht.TListOf(ht.TNonEmptyString))
        ]),
        constants.SSHS_SSH_AUTHORIZED_KEYS:
        ht.TItems([
            ht.TElemOf(constants.SSHS_ACTIONS),
            ht.TDictOf(ht.TNonEmptyString, ht.TListOf(ht.TNonEmptyString))
        ]),
        constants.SSHS_GENERATE:
        ht.TDictOf(ht.TNonEmptyString, ht.TString),
    })


class SshUpdateError(errors.GenericError):
    """Local class for reporting errors.

  """
Пример #11
0
from ganeti import pathutils
from ganeti import utils
from ganeti import ht
from ganeti import ssh
from ganeti.tools import common


_SSH_KEY_LIST_ITEM = \
  ht.TAnd(ht.TIsLength(3),
          ht.TItems([
            ht.TSshKeyType,
            ht.Comment("public")(ht.TNonEmptyString),
            ht.Comment("private")(ht.TNonEmptyString),
          ]))

_SSH_KEY_LIST = ht.TListOf(_SSH_KEY_LIST_ITEM)

_DATA_CHECK = ht.TStrictDict(
    False, True, {
        constants.SSHS_CLUSTER_NAME:
        ht.TNonEmptyString,
        constants.SSHS_NODE_DAEMON_CERTIFICATE:
        ht.TNonEmptyString,
        constants.SSHS_SSH_HOST_KEY:
        _SSH_KEY_LIST,
        constants.SSHS_SSH_ROOT_KEY:
        _SSH_KEY_LIST,
        constants.SSHS_SSH_AUTHORIZED_KEYS:
        ht.TDictOf(ht.TNonEmptyString, ht.TListOf(ht.TNonEmptyString)),
        constants.SSHS_SSH_KEY_TYPE:
        ht.TSshKeyType,
Пример #12
0
def _GetGroupData(qcl, uuid):
    """Retrieves instances and nodes per node group.

  """
    locks = qcl.Query(constants.QR_LOCK, ["name", "mode"], None)

    prefix = "instance/"
    prefix_len = len(prefix)

    locked_instances = set()

    for [[_, name], [_, lock]] in locks.data:
        if name.startswith(prefix) and lock:
            locked_instances.add(name[prefix_len:])

    queries = [
        (constants.QR_INSTANCE, [
            "name", "status", "admin_state", "admin_state_source",
            "disks_active", "snodes", "pnode.group.uuid", "snodes.group.uuid",
            "disk_template"
        ], [qlang.OP_EQUAL, "pnode.group.uuid", uuid]),
        (constants.QR_NODE, ["name", "bootid",
                             "offline"], [qlang.OP_EQUAL, "group.uuid", uuid]),
    ]

    results_data = [
        qcl.Query(what, field, qfilter).data
        for (what, field, qfilter) in queries
    ]

    # Ensure results are tuples with two values
    assert compat.all(
        ht.TListOf(ht.TListOf(ht.TIsLength(2)))(d) for d in results_data)

    # Extract values ignoring result status
    (raw_instances, raw_nodes) = [[[v[1] for v in values] for values in res]
                                  for res in results_data]

    secondaries = {}
    instances = []

    # Load all instances
    for (name, status, config_state, config_state_source, disks_active, snodes,
         pnode_group_uuid, snodes_group_uuid, disk_template) in raw_instances:
        if snodes and set([pnode_group_uuid]) != set(snodes_group_uuid):
            logging.error(
                "Ignoring split instance '%s', primary group %s, secondary"
                " groups %s", name, pnode_group_uuid,
                utils.CommaJoin(snodes_group_uuid))
        else:
            instances.append(
                Instance(name, status, config_state, config_state_source,
                         disks_active, snodes, disk_template))

            for node in snodes:
                secondaries.setdefault(node, set()).add(name)

    # Load all nodes
    nodes = [
        Node(name, bootid, offline, secondaries.get(name, set()))
        for (name, bootid, offline) in raw_nodes
    ]

    return (dict((node.name, node) for node in nodes),
            dict((inst.name, inst) for inst in instances), locked_instances)
Пример #13
0
def _TestJobDependency(opts):
    """Tests job dependencies.

  """
    ToStdout("Testing job dependencies")

    try:
        cl = cli.GetClient()
        SubmitOpCode(opcodes.OpTestDelay(duration=0, depends=[(-1, None)]),
                     cl=cl)
    except errors.GenericError as err:
        if opts.debug:
            ToStdout("Ignoring error for 'wrong dependencies' test: %s", err)
    else:
        raise errors.OpExecError("Submitting plain opcode with relative job ID"
                                 " did not fail as expected")

    # TODO: Test dependencies on errors
    jobs = [
        [opcodes.OpTestDelay(duration=1)],
        [opcodes.OpTestDelay(duration=1, depends=[(-1, [])])],
        [
            opcodes.OpTestDelay(duration=1,
                                depends=[(-2, [constants.JOB_STATUS_SUCCESS])])
        ],
        [opcodes.OpTestDelay(duration=1, depends=[])],
        [
            opcodes.OpTestDelay(duration=1,
                                depends=[(-2, [constants.JOB_STATUS_SUCCESS])])
        ],
    ]

    # Function for checking result
    check_fn = ht.TListOf(
        ht.TAnd(ht.TIsLength(2),
                ht.TItems([ht.TBool,
                           ht.TOr(ht.TNonEmptyString, ht.TJobId)])))

    cl = cli.GetClient()
    result = cl.SubmitManyJobs(jobs)
    if not check_fn(result):
        raise errors.OpExecError("Job submission doesn't match %s: %s" %
                                 (check_fn, result))

    # Wait for jobs to finish
    jex = JobExecutor(cl=cl, opts=opts)

    for (status, job_id) in result:
        jex.AddJobId(None, status, job_id)

    job_results = jex.GetResults()
    if not compat.all(row[0] for row in job_results):
        raise errors.OpExecError(
            "At least one of the submitted jobs failed: %s" % job_results)

    # Get details about jobs
    data = cl.QueryJobs([job_id for (_, job_id) in result],
                        ["id", "opexec", "ops"])
    data_job_id = [job_id for (job_id, _, _) in data]
    data_opexec = [opexec for (_, opexec, _) in data]
    data_op = [[opcodes.OpCode.LoadOpCode(op) for op in ops]
               for (_, _, ops) in data]

    assert compat.all(not op.depends or len(op.depends) == 1 for ops in data_op
                      for op in ops)

    # Check resolved job IDs in dependencies
    for (job_idx, res_jobdep) in [(1, data_job_id[0]), (2, data_job_id[0]),
                                  (4, data_job_id[2])]:
        if data_op[job_idx][0].depends[0][0] != res_jobdep:
            raise errors.OpExecError(
                "Job %s's opcode doesn't depend on correct job"
                " ID (%s)" % (job_idx, res_jobdep))

    # Check execution order
    if not (data_opexec[0] <= data_opexec[1]
            and data_opexec[0] <= data_opexec[2]
            and data_opexec[2] <= data_opexec[4]):
        raise errors.OpExecError("Jobs did not run in correct order: %s" %
                                 data)

    assert len(jobs) == 5 and compat.all(len(ops) == 1 for ops in jobs)

    ToStdout("Job dependency tests were successful")
Пример #14
0
class IAReqInstanceAlloc(IARequestBase):
    """An instance allocation request.

  """
    # pylint: disable=E1101
    MODE = constants.IALLOCATOR_MODE_ALLOC
    REQ_PARAMS = [
        _INST_NAME,
        ("memory", ht.TNonNegativeInt),
        ("spindle_use", ht.TNonNegativeInt),
        ("disks", ht.TListOf(ht.TDict)),
        ("disk_template", ht.TString),
        ("group_name", ht.TMaybe(ht.TNonEmptyString)),
        ("os", ht.TString),
        ("tags", _STRING_LIST),
        ("nics", ht.TListOf(ht.TDict)),
        ("vcpus", ht.TInt),
        ("hypervisor", ht.TString),
        ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
    ]
    REQ_RESULT = ht.TList

    def RequiredNodes(self):
        """Calculates the required nodes based on the disk_template.

    """
        if self.disk_template in constants.DTS_INT_MIRROR:
            return 2
        else:
            return 1

    def GetRequest(self, cfg):
        """Requests a new instance.

    The checks for the completeness of the opcode must have already been
    done.

    """
        for d in self.disks:
            d[constants.IDISK_TYPE] = self.disk_template
        disk_space = gmi.ComputeDiskSize(self.disks)

        return {
            "name": self.name,
            "disk_template": self.disk_template,
            "group_name": self.group_name,
            "tags": self.tags,
            "os": self.os,
            "vcpus": self.vcpus,
            "memory": self.memory,
            "spindle_use": self.spindle_use,
            "disks": self.disks,
            "disk_space_total": disk_space,
            "nics": self.nics,
            "required_nodes": self.RequiredNodes(),
            "hypervisor": self.hypervisor,
        }

    def ValidateResult(self, ia, result):
        """Validates an single instance allocation request.

    """
        IARequestBase.ValidateResult(self, ia, result)

        if ia.success and len(result) != self.RequiredNodes():
            raise errors.ResultValidationError(
                "iallocator returned invalid number"
                " of nodes (%s), required %s" %
                (len(result), self.RequiredNodes()))
Пример #15
0
import logging

from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import ht
from ganeti import outils
from ganeti import opcodes
from ganeti import serializer
from ganeti import utils

import ganeti.rpc.node as rpc
import ganeti.masterd.instance as gmi

_STRING_LIST = ht.TListOf(ht.TString)
_JOB_LIST = ht.TListOf(
    ht.TListOf(
        ht.TStrictDict(
            True,
            False,
            {
                # pylint: disable=E1101
                # Class '...' has no 'OP_ID' member
                "OP_ID":
                ht.TElemOf([
                    opcodes.OpInstanceFailover.OP_ID,
                    opcodes.OpInstanceMigrate.OP_ID,
                    opcodes.OpInstanceReplaceDisks.OP_ID
                ]),
            })))
Пример #16
0
from ganeti import utils
from ganeti import serializer
from ganeti import ht
from ganeti import ssh
from ganeti import ssconf


_SSH_KEY_LIST_ITEM = \
  ht.TAnd(ht.TIsLength(3),
          ht.TItems([
            ht.TElemOf(constants.SSHK_ALL),
            ht.Comment("public")(ht.TNonEmptyString),
            ht.Comment("private")(ht.TNonEmptyString),
          ]))

_SSH_KEY_LIST = ht.TListOf(_SSH_KEY_LIST_ITEM)

_DATA_CHECK = ht.TStrictDict(
    False, True, {
        constants.SSHS_CLUSTER_NAME: ht.TNonEmptyString,
        constants.SSHS_NODE_DAEMON_CERTIFICATE: ht.TNonEmptyString,
        constants.SSHS_SSH_HOST_KEY: _SSH_KEY_LIST,
        constants.SSHS_SSH_ROOT_KEY: _SSH_KEY_LIST,
    })


class JoinError(errors.GenericError):
    """Local class for reporting errors.

  """