class TestArchInfo(unittest.TestCase):
    EXP_TYPES = \
      ht.TAnd(ht.TIsLength(2),
              ht.TItems([
                ht.TNonEmptyString,
                ht.TNonEmptyString,
                ]))

    def setUp(self):
        self.assertTrue(runtime._arch is None)

    def tearDown(self):
        runtime._arch = None

    def testNotInitialized(self):
        self.assertRaises(errors.ProgrammerError, runtime.GetArchInfo)

    def testInitializeMultiple(self):
        runtime.InitArchInfo()

        self.assertRaises(errors.ProgrammerError, runtime.InitArchInfo)

    def testNormal(self):
        runtime.InitArchInfo()

        info = runtime.GetArchInfo()

        self.assertTrue(self.EXP_TYPES(info),
                        msg=("Doesn't match expected type description: %s" %
                             self.EXP_TYPES))
Esempio n. 2
0
 def testOr(self):
     fn = ht.TMaybe(ht.TAnd(ht.TString, ht.TIsLength(5)))
     self.assertTrue(fn("12345"))
     self.assertTrue(fn(None))
     self.assertFalse(fn(1))
     self.assertFalse(fn(""))
     self.assertFalse(fn("abc"))
Esempio n. 3
0
def _GetGroupData(qcl, uuid):
    """Retrieves instances and nodes per node group.

  """
    queries = [
        (constants.QR_INSTANCE, [
            "name", "status", "disks_active", "snodes", "pnode.group.uuid",
            "snodes.group.uuid"
        ], [qlang.OP_EQUAL, "pnode.group.uuid", uuid]),
        (constants.QR_NODE, ["name", "bootid",
                             "offline"], [qlang.OP_EQUAL, "group.uuid", uuid]),
    ]

    results = []
    for what, fields, qfilter in queries:
        results.append(qcl.Query(what, fields, qfilter))

    results_data = map(operator.attrgetter("data"), results)

    # Ensure results are tuples with two values
    assert compat.all(
        map(ht.TListOf(ht.TListOf(ht.TIsLength(2))), results_data))

    # Extract values ignoring result status
    (raw_instances, raw_nodes) = [[map(compat.snd, values) for values in res]
                                  for res in results_data]

    secondaries = {}
    instances = []

    # Load all instances
    for (name, status, disks_active, snodes, pnode_group_uuid,
         snodes_group_uuid) in raw_instances:
        if snodes and set([pnode_group_uuid]) != set(snodes_group_uuid):
            logging.error(
                "Ignoring split instance '%s', primary group %s, secondary"
                " groups %s", name, pnode_group_uuid,
                utils.CommaJoin(snodes_group_uuid))
        else:
            instances.append(Instance(name, status, disks_active, snodes))

            for node in snodes:
                secondaries.setdefault(node, set()).add(name)

    # Load all nodes
    nodes = [
        Node(name, bootid, offline, secondaries.get(name, set()))
        for (name, bootid, offline) in raw_nodes
    ]

    return (dict((node.name, node) for node in nodes),
            dict((inst.name, inst) for inst in instances))
Esempio n. 4
0
class IAReqMultiInstanceAlloc(IARequestBase):
    """An multi instance allocation request.

  """
    # pylint: disable=E1101
    MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC
    REQ_PARAMS = [
        ("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))),
    ]
    _MASUCCESS = \
      ht.TListOf(ht.TAnd(ht.TIsLength(2),
                         ht.TItems([ht.TNonEmptyString,
                                    ht.TListOf(ht.TNonEmptyString),
                                    ])))
    _MAFAILED = ht.TListOf(ht.TNonEmptyString)
    REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2),
                         ht.TItems([_MASUCCESS, _MAFAILED]))

    def GetRequest(self, cfg):
        return {
            "instances": [iareq.GetRequest(cfg) for iareq in self.instances],
        }
Esempio n. 5
0
def BuildJobDepCheck(relative):
    """Builds check for job dependencies (L{DEPEND_ATTR}).

  @type relative: bool
  @param relative: Whether to accept relative job IDs (negative)
  @rtype: callable

  """
    if relative:
        job_id = ht.TOr(ht.TJobId, ht.TRelativeJobId)
    else:
        job_id = ht.TJobId

    job_dep = \
      ht.TAnd(ht.TOr(ht.TListOf(ht.TAny), ht.TTuple),
              ht.TIsLength(2),
              ht.TItems([job_id,
                         ht.TListOf(ht.TElemOf(constants.JOBS_FINALIZED))]))

    return ht.TMaybe(ht.TListOf(job_dep))
Esempio n. 6
0
"""

from ganeti import constants
from ganeti import errors
from ganeti import ht

_FOURCC_LEN = 4

#: Items in the individual rows of the NodeDrbd query
_HTNodeDrbdItems = [
    ht.TString, ht.TInt, ht.TString, ht.TString, ht.TString, ht.TString
]
#: Type for the (top-level) result of NodeDrbd query
HTNodeDrbd = ht.TListOf(
    ht.TAnd(ht.TList, ht.TIsLength(len(_HTNodeDrbdItems)),
            ht.TItems(_HTNodeDrbdItems)))


def PackMagic(payload):
    """Prepend the confd magic fourcc to a payload.

  """
    return "".join([constants.CONFD_MAGIC_FOURCC, payload])


def UnpackMagic(payload):
    """Unpack and check the confd magic fourcc from a payload.

  """
    if len(payload) < _FOURCC_LEN:
Esempio n. 7
0
  # TODO: Test dependencies on errors
  jobs = [
    [opcodes.OpTestDelay(duration=1)],
    [opcodes.OpTestDelay(duration=1,
                         depends=[(-1, [])])],
    [opcodes.OpTestDelay(duration=1,
                         depends=[(-2, [constants.JOB_STATUS_SUCCESS])])],
    [opcodes.OpTestDelay(duration=1,
                         depends=[])],
    [opcodes.OpTestDelay(duration=1,
                         depends=[(-2, [constants.JOB_STATUS_SUCCESS])])],
    ]

  # Function for checking result
  check_fn = ht.TListOf(ht.TAnd(ht.TIsLength(2),
                                ht.TItems([ht.TBool,
                                           ht.TOr(ht.TNonEmptyString,
                                                  ht.TJobId)])))

  cl = cli.GetClient()
  result = cl.SubmitManyJobs(jobs)
  if not check_fn(result):
    raise errors.OpExecError("Job submission doesn't match %s: %s" %
                             (check_fn, result))

  # Wait for jobs to finish
  jex = JobExecutor(cl=cl, opts=opts)

  for (status, job_id) in result:
    jex.AddJobId(None, status, job_id)
Esempio n. 8
0
import optparse
import sys
import logging

from ganeti import cli
from ganeti import constants
from ganeti import errors
from ganeti import pathutils
from ganeti import utils
from ganeti import ht
from ganeti import ssh
from ganeti.tools import common


_SSH_KEY_LIST_ITEM = \
  ht.TAnd(ht.TIsLength(3),
          ht.TItems([
            ht.TSshKeyType,
            ht.Comment("public")(ht.TNonEmptyString),
            ht.Comment("private")(ht.TNonEmptyString),
          ]))

_SSH_KEY_LIST = ht.TListOf(_SSH_KEY_LIST_ITEM)

_DATA_CHECK = ht.TStrictDict(
    False, True, {
        constants.SSHS_CLUSTER_NAME:
        ht.TNonEmptyString,
        constants.SSHS_NODE_DAEMON_CERTIFICATE:
        ht.TNonEmptyString,
        constants.SSHS_SSH_HOST_KEY:
Esempio n. 9
0
def _GetGroupData(qcl, uuid):
    """Retrieves instances and nodes per node group.

  """
    locks = qcl.Query(constants.QR_LOCK, ["name", "mode"], None)

    prefix = "instance/"
    prefix_len = len(prefix)

    locked_instances = set()

    for [[_, name], [_, lock]] in locks.data:
        if name.startswith(prefix) and lock:
            locked_instances.add(name[prefix_len:])

    queries = [
        (constants.QR_INSTANCE, [
            "name", "status", "admin_state", "admin_state_source",
            "disks_active", "snodes", "pnode.group.uuid", "snodes.group.uuid",
            "disk_template"
        ], [qlang.OP_EQUAL, "pnode.group.uuid", uuid]),
        (constants.QR_NODE, ["name", "bootid",
                             "offline"], [qlang.OP_EQUAL, "group.uuid", uuid]),
    ]

    results_data = [
        qcl.Query(what, field, qfilter).data
        for (what, field, qfilter) in queries
    ]

    # Ensure results are tuples with two values
    assert compat.all(
        ht.TListOf(ht.TListOf(ht.TIsLength(2)))(d) for d in results_data)

    # Extract values ignoring result status
    (raw_instances, raw_nodes) = [[[v[1] for v in values] for values in res]
                                  for res in results_data]

    secondaries = {}
    instances = []

    # Load all instances
    for (name, status, config_state, config_state_source, disks_active, snodes,
         pnode_group_uuid, snodes_group_uuid, disk_template) in raw_instances:
        if snodes and set([pnode_group_uuid]) != set(snodes_group_uuid):
            logging.error(
                "Ignoring split instance '%s', primary group %s, secondary"
                " groups %s", name, pnode_group_uuid,
                utils.CommaJoin(snodes_group_uuid))
        else:
            instances.append(
                Instance(name, status, config_state, config_state_source,
                         disks_active, snodes, disk_template))

            for node in snodes:
                secondaries.setdefault(node, set()).add(name)

    # Load all nodes
    nodes = [
        Node(name, bootid, offline, secondaries.get(name, set()))
        for (name, bootid, offline) in raw_nodes
    ]

    return (dict((node.name, node) for node in nodes),
            dict((inst.name, inst) for inst in instances), locked_instances)
Esempio n. 10
0
def _TestJobDependency(opts):
    """Tests job dependencies.

  """
    ToStdout("Testing job dependencies")

    try:
        cl = cli.GetClient()
        SubmitOpCode(opcodes.OpTestDelay(duration=0, depends=[(-1, None)]),
                     cl=cl)
    except errors.GenericError as err:
        if opts.debug:
            ToStdout("Ignoring error for 'wrong dependencies' test: %s", err)
    else:
        raise errors.OpExecError("Submitting plain opcode with relative job ID"
                                 " did not fail as expected")

    # TODO: Test dependencies on errors
    jobs = [
        [opcodes.OpTestDelay(duration=1)],
        [opcodes.OpTestDelay(duration=1, depends=[(-1, [])])],
        [
            opcodes.OpTestDelay(duration=1,
                                depends=[(-2, [constants.JOB_STATUS_SUCCESS])])
        ],
        [opcodes.OpTestDelay(duration=1, depends=[])],
        [
            opcodes.OpTestDelay(duration=1,
                                depends=[(-2, [constants.JOB_STATUS_SUCCESS])])
        ],
    ]

    # Function for checking result
    check_fn = ht.TListOf(
        ht.TAnd(ht.TIsLength(2),
                ht.TItems([ht.TBool,
                           ht.TOr(ht.TNonEmptyString, ht.TJobId)])))

    cl = cli.GetClient()
    result = cl.SubmitManyJobs(jobs)
    if not check_fn(result):
        raise errors.OpExecError("Job submission doesn't match %s: %s" %
                                 (check_fn, result))

    # Wait for jobs to finish
    jex = JobExecutor(cl=cl, opts=opts)

    for (status, job_id) in result:
        jex.AddJobId(None, status, job_id)

    job_results = jex.GetResults()
    if not compat.all(row[0] for row in job_results):
        raise errors.OpExecError(
            "At least one of the submitted jobs failed: %s" % job_results)

    # Get details about jobs
    data = cl.QueryJobs([job_id for (_, job_id) in result],
                        ["id", "opexec", "ops"])
    data_job_id = [job_id for (job_id, _, _) in data]
    data_opexec = [opexec for (_, opexec, _) in data]
    data_op = [[opcodes.OpCode.LoadOpCode(op) for op in ops]
               for (_, _, ops) in data]

    assert compat.all(not op.depends or len(op.depends) == 1 for ops in data_op
                      for op in ops)

    # Check resolved job IDs in dependencies
    for (job_idx, res_jobdep) in [(1, data_job_id[0]), (2, data_job_id[0]),
                                  (4, data_job_id[2])]:
        if data_op[job_idx][0].depends[0][0] != res_jobdep:
            raise errors.OpExecError(
                "Job %s's opcode doesn't depend on correct job"
                " ID (%s)" % (job_idx, res_jobdep))

    # Check execution order
    if not (data_opexec[0] <= data_opexec[1]
            and data_opexec[0] <= data_opexec[2]
            and data_opexec[2] <= data_opexec[4]):
        raise errors.OpExecError("Jobs did not run in correct order: %s" %
                                 data)

    assert len(jobs) == 5 and compat.all(len(ops) == 1 for ops in jobs)

    ToStdout("Job dependency tests were successful")
Esempio n. 11
0
        ht.TStrictDict(
            True,
            False,
            {
                # pylint: disable=E1101
                # Class '...' has no 'OP_ID' member
                "OP_ID":
                ht.TElemOf([
                    opcodes.OpInstanceFailover.OP_ID,
                    opcodes.OpInstanceMigrate.OP_ID,
                    opcodes.OpInstanceReplaceDisks.OP_ID
                ]),
            })))

_NEVAC_MOVED = \
  ht.TListOf(ht.TAnd(ht.TIsLength(3),
                     ht.TItems([ht.TNonEmptyString,
                                ht.TNonEmptyString,
                                ht.TListOf(ht.TNonEmptyString),
                                ])))
_NEVAC_FAILED = \
  ht.TListOf(ht.TAnd(ht.TIsLength(2),
                     ht.TItems([ht.TNonEmptyString,
                                ht.TMaybeString,
                                ])))
_NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
                        ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))

_INST_NAME = ("name", ht.TNonEmptyString)
_INST_UUID = ("inst_uuid", ht.TNonEmptyString)
Esempio n. 12
0
        [opcodes.OpTestDelay(duration=1)],
        [opcodes.OpTestDelay(duration=1, depends=[(-1, [])])],
        [
            opcodes.OpTestDelay(duration=1,
                                depends=[(-2, [constants.JOB_STATUS_SUCCESS])])
        ],
        [opcodes.OpTestDelay(duration=1, depends=[])],
        [
            opcodes.OpTestDelay(duration=1,
                                depends=[(-2, [constants.JOB_STATUS_SUCCESS])])
        ],
    ]

    # Function for checking result
    check_fn = ht.TListOf(
        ht.TAnd(ht.TIsLength(2),
                ht.TItems([ht.TBool,
                           ht.TOr(ht.TNonEmptyString, ht.TJobId)])))

    cl = cli.GetClient()
    result = cl.SubmitManyJobs(jobs)
    if not check_fn(result):
        raise errors.OpExecError("Job submission doesn't match %s: %s" %
                                 (check_fn, result))

    # Wait for jobs to finish
    jex = JobExecutor(cl=cl, opts=opts)

    for (status, job_id) in result:
        jex.AddJobId(None, status, job_id)
Esempio n. 13
0
from ganeti import errors
from ganeti import ht
from ganeti import locking
from ganeti.masterd import iallocator
from ganeti import netutils
from ganeti import objects
from ganeti import pathutils
from ganeti import utils
from ganeti.cmdlib.common import AnnotateDiskParams, \
  ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled, \
  ComputeIPolicySpecViolation

#: Type description for changes as returned by L{ApplyContainerMods}'s
#: callbacks
_TApplyContModsCbChanges = \
  ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
    ht.TNonEmptyString,
    ht.TAny,
    ])))


def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names,
                         os_type, status, minmem, maxmem, vcpus, nics,
                         disk_template, disks, bep, hvp, hypervisor_name,
                         tags):
    """Builds instance related env variables for hooks

  This builds the hook environment from individual variables.

  @type name: string
  @param name: the name of the instance
Esempio n. 14
0
 def testIsLength(self):
     fn = ht.TIsLength(10)
     self.assertTrue(fn(range(10)))
     self.assertFalse(fn(range(1)))
     self.assertFalse(fn(range(100)))