コード例 #1
0
def TestInstStatusCollector():
    """Test the Xen instance status collector.

  """
    enabled_hypervisors = qa_config.GetEnabledHypervisors()

    is_xen = (constants.HT_XEN_PVM in enabled_hypervisors
              or constants.HT_XEN_HVM in enabled_hypervisors)
    if not is_xen:
        return

    # Execute on master on an empty cluster
    AssertCommand([MON_COLLECTOR, "inst-status-xen"])

    #Execute on cluster with instances
    node1 = qa_config.AcquireNode()
    node2 = qa_config.AcquireNode()
    template = qa_config.GetDefaultDiskTemplate()

    instance = CreateInstanceByDiskTemplate([node1, node2], template)
    AssertCommand([MON_COLLECTOR, "inst-status-xen"], node=node1)
    AssertCommand([MON_COLLECTOR, "inst-status-xen"], node=node2)
    RemoveInstance(instance)

    node1.Release()
    node2.Release()
コード例 #2
0
def RunExclusiveStorageTests():
    """Test exclusive storage."""
    if not qa_config.TestEnabled("cluster-exclusive-storage"):
        return

    node = qa_config.AcquireNode()
    try:
        old_es = qa_cluster.TestSetExclStorCluster(False)
        qa_node.TestExclStorSingleNode(node)

        qa_cluster.TestSetExclStorCluster(True)
        qa_cluster.TestExclStorSharedPv(node)

        if qa_config.TestEnabled("instance-add-plain-disk"):
            # Make sure that the cluster doesn't have any pre-existing problem
            qa_cluster.AssertClusterVerify()

            # Create and allocate instances
            instance1 = qa_instance.TestInstanceAddWithPlainDisk([node])
            try:
                instance2 = qa_instance.TestInstanceAddWithPlainDisk([node])
                try:
                    # cluster-verify checks that disks are allocated correctly
                    qa_cluster.AssertClusterVerify()

                    # Remove instances
                    qa_instance.TestInstanceRemove(instance2)
                    qa_instance.TestInstanceRemove(instance1)
                finally:
                    instance2.Release()
            finally:
                instance1.Release()

        if qa_config.TestEnabled("instance-add-drbd-disk"):
            snode = qa_config.AcquireNode()
            try:
                qa_cluster.TestSetExclStorCluster(False)
                instance = qa_instance.TestInstanceAddWithDrbdDisk(
                    [node, snode])
                try:
                    qa_cluster.TestSetExclStorCluster(True)
                    exp_err = [constants.CV_EINSTANCEUNSUITABLENODE]
                    qa_cluster.AssertClusterVerify(fail=True, errors=exp_err)
                    qa_instance.TestInstanceRemove(instance)
                finally:
                    instance.Release()
            finally:
                snode.Release()
        qa_cluster.TestSetExclStorCluster(old_es)
    finally:
        node.Release()
コード例 #3
0
ファイル: ganeti-qa.py プロジェクト: nh2/ganeti-test-1
def RunExportImportTests(instance, inodes):
  """Tries to export and import the instance.

  @type inodes: list of nodes
  @param inodes: current nodes of the instance

  """
  # FIXME: export explicitly bails out on file based storage. other non-lvm
  # based storage types are untested, though. Also note that import could still
  # work, but is deeply embedded into the "export" case.
  if (qa_config.TestEnabled("instance-export") and
      instance.disk_template not in constants.DTS_FILEBASED):
    RunTest(qa_instance.TestInstanceExportNoTarget, instance)

    pnode = inodes[0]
    expnode = qa_config.AcquireNode(exclude=pnode)
    try:
      name = RunTest(qa_instance.TestInstanceExport, instance, expnode)

      RunTest(qa_instance.TestBackupList, expnode)

      if qa_config.TestEnabled("instance-import"):
        newinst = qa_config.AcquireInstance()
        try:
          RunTest(qa_instance.TestInstanceImport, newinst, pnode,
                  expnode, name)
          # Check if starting the instance works
          RunTest(qa_instance.TestInstanceStartup, newinst)
          RunTest(qa_instance.TestInstanceRemove, newinst)
        finally:
          newinst.Release()
    finally:
      expnode.Release()

  # FIXME: inter-cluster-instance-move crashes on file based instances :/
  # See Issue 414.
  if (qa_config.TestEnabled([qa_rapi.Enabled, "inter-cluster-instance-move"])
      and (instance.disk_template not in constants.DTS_FILEBASED)):
    newinst = qa_config.AcquireInstance()
    try:
      tnode = qa_config.AcquireNode(exclude=inodes)
      try:
        RunTest(qa_rapi.TestInterClusterInstanceMove, instance, newinst,
                inodes, tnode)
      finally:
        tnode.Release()
    finally:
      newinst.Release()
コード例 #4
0
def TestClusterMasterFailoverWithDrainedQueue():
    """gnt-cluster master-failover with drained queue"""
    master = qa_config.GetMasterNode()
    failovermaster = qa_config.AcquireNode(exclude=master)

    # Ensure queue is not drained
    for node in [master, failovermaster]:
        _AssertDrainFile(node, fail=True)

    # Drain queue on failover master
    AssertCommand(["touch", _NodeQueueDrainFile(failovermaster)],
                  node=failovermaster)

    cmd = ["gnt-cluster", "master-failover"]
    try:
        _AssertDrainFile(failovermaster)
        AssertCommand(cmd, node=failovermaster)
        _AssertDrainFile(master, fail=True)
        _AssertDrainFile(failovermaster, fail=True)

        # Back to original master node
        AssertCommand(cmd, node=master)
    finally:
        failovermaster.Release()

    # Ensure queue is not drained
    for node in [master, failovermaster]:
        _AssertDrainFile(node, fail=True)
コード例 #5
0
def TestLiveRepair():
    """Test node evacuate failover upon diagnosis.

  """
    _SetUp('live-repair')
    n = random.randint(10000, 99999)
    node = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
    UploadData(
        node.primary, 'echo \'' + serializer.DumpJson({
            "status": "live-repair",
            "command": "repair",
            "details": str(n)
        }).strip() + '\'', 0755,
        '/etc/ganeti/node-diagnose-commands/live-repair')
    UploadData(
        node.primary, """#!/usr/bin/python
import sys
import json

n = json.loads(sys.stdin.read())['details']
with open('/tmp/' + n, 'w') as f:
  f.write(n)
print 'file written'
""", 0755, '/etc/ganeti/node-repair-commands/repair')
    _AssertRepairCommand()
    tag = _AssertRepairTagAddition(node)
    if str(n) != AssertCommand(["cat", "/tmp/" + str(n)], node=node)[1]:
        raise qa_error.Error('Repair command was unsuccessful')
    node.Release()
    _TearDown(node, tag, [
        '/etc/ganeti/node-diagnose-commands/live-repair',
        '/etc/ganeti/node-repair-commands/repair'
    ], False)
コード例 #6
0
def _AcquireAllNodes():
    """Generator for acquiring all nodes in the QA config.

  """
    exclude = []
    try:
        while True:
            node = qa_config.AcquireNode(exclude=exclude)
            exclude.append(node)
            yield node
    except qa_error.OutOfNodesError:
        pass
コード例 #7
0
def RunHardwareFailureTests(instance, inodes):
    """Test cluster internal hardware failure recovery.

  """
    RunTestIf("instance-failover", qa_instance.TestInstanceFailover, instance)
    RunTestIf(["instance-failover", qa_rapi.Enabled],
              qa_rapi.TestRapiInstanceFailover, instance)

    RunTestIf("instance-migrate", qa_instance.TestInstanceMigrate, instance)
    RunTestIf(["instance-migrate", qa_rapi.Enabled],
              qa_rapi.TestRapiInstanceMigrate, instance)

    if qa_config.TestEnabled("instance-replace-disks"):
        # We just need alternative secondary nodes, hence "- 1"
        othernodes = qa_config.AcquireManyNodes(len(inodes) - 1,
                                                exclude=inodes)
        try:
            RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceReplaceDisks,
                      instance)
            RunTest(qa_instance.TestReplaceDisks, instance, inodes, othernodes)
        finally:
            qa_config.ReleaseManyNodes(othernodes)
        del othernodes

    if qa_config.TestEnabled("instance-recreate-disks"):
        try:
            acquirednodes = qa_config.AcquireManyNodes(len(inodes),
                                                       exclude=inodes)
            othernodes = acquirednodes
        except qa_error.OutOfNodesError:
            if len(inodes) > 1:
                # If the cluster is not big enough, let's reuse some of the nodes, but
                # with different roles. In this way, we can test a DRBD instance even on
                # a 3-node cluster.
                acquirednodes = [qa_config.AcquireNode(exclude=inodes)]
                othernodes = acquirednodes + inodes[:-1]
            else:
                raise
        try:
            RunTest(qa_instance.TestRecreateDisks, instance, inodes,
                    othernodes)
        finally:
            qa_config.ReleaseManyNodes(acquirednodes)

    if len(inodes) >= 2:
        RunTestIf("node-evacuate", qa_node.TestNodeEvacuate, inodes[0],
                  inodes[1])
        RunTestIf("node-failover", qa_node.TestNodeFailover, inodes[0],
                  inodes[1])
        RunTestIf("node-migrate", qa_node.TestNodeMigrate, inodes[0],
                  inodes[1])
コード例 #8
0
def TestClusterMasterFailover():
    """gnt-cluster master-failover"""
    master = qa_config.GetMasterNode()
    failovermaster = qa_config.AcquireNode(exclude=master)

    cmd = ["gnt-cluster", "master-failover"]
    node_list_cmd = ["gnt-node", "list"]
    try:
        AssertCommand(cmd, node=failovermaster)
        AssertCommand(node_list_cmd, node=failovermaster)
        # Back to original master node
        AssertCommand(cmd, node=master)
        AssertCommand(node_list_cmd, node=master)
    finally:
        failovermaster.Release()
コード例 #9
0
def TestNodeEvacuate(node, node2):
  """gnt-node evacuate"""
  node3 = qa_config.AcquireNode(exclude=[node, node2])
  try:
    if qa_utils.GetNodeInstances(node3, secondaries=True):
      raise qa_error.UnusableNodeError("Evacuation node has at least one"
                                       " secondary instance. This test requires"
                                       " it to have no secondary instances.")

    # Evacuate all secondary instances
    AssertCommand(["gnt-node", "evacuate", "-f",
                   "--new-secondary=%s" % node3.primary, node2.primary])

    # ... and back again.
    AssertCommand(["gnt-node", "evacuate", "-f",
                   "--new-secondary=%s" % node2.primary, node3.primary])
  finally:
    node3.Release()
コード例 #10
0
def TestOutOfBand():
  """gnt-node power"""
  master = qa_config.GetMasterNode()

  node = qa_config.AcquireNode(exclude=master)

  master_name = master.primary
  node_name = node.primary
  full_node_name = qa_utils.ResolveNodeName(node)

  (oob_path, verify_path,
   data_path, exit_code_path) = _CreateOobScriptStructure()

  try:
    AssertCommand(["gnt-cluster", "modify", "--node-parameters",
                   "oob_program=%s" % oob_path])

    # No data, exit 0
    _UpdateOobFile(exit_code_path, "0")

    AssertCommand(["gnt-node", "power", "on", node_name])
    _AssertOobCall(verify_path, "power-on %s" % full_node_name)

    AssertCommand(["gnt-node", "power", "-f", "off", node_name])
    _AssertOobCall(verify_path, "power-off %s" % full_node_name)

    # Power off on master without options should fail
    AssertCommand(["gnt-node", "power", "-f", "off", master_name], fail=True)
    # With force master it should still fail
    AssertCommand(["gnt-node", "power", "-f", "--ignore-status", "off",
                   master_name],
                  fail=True)

    # Verify we can't transform back to online when not yet powered on
    AssertCommand(["gnt-node", "modify", "-O", "no", node_name],
                  fail=True)
    # Now reset state
    AssertCommand(["gnt-node", "modify", "-O", "no", "--node-powered", "yes",
                   node_name])

    AssertCommand(["gnt-node", "power", "-f", "cycle", node_name])
    _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

    # Those commands should fail as they expect output which isn't provided yet
    # But they should have called the oob helper nevermind
    AssertCommand(["gnt-node", "power", "status", node_name],
                  fail=True)
    _AssertOobCall(verify_path, "power-status %s" % full_node_name)

    AssertCommand(["gnt-node", "health", node_name],
                  fail=True)
    _AssertOobCall(verify_path, "health %s" % full_node_name)

    AssertCommand(["gnt-node", "health"], fail=True)

    # Correct Data, exit 0
    _UpdateOobFile(data_path, serializer.DumpJson({"powered": True}))

    AssertCommand(["gnt-node", "power", "status", node_name])
    _AssertOobCall(verify_path, "power-status %s" % full_node_name)

    _UpdateOobFile(data_path, serializer.DumpJson([["temp", "OK"],
                                                   ["disk0", "CRITICAL"]]))

    AssertCommand(["gnt-node", "health", node_name])
    _AssertOobCall(verify_path, "health %s" % full_node_name)

    AssertCommand(["gnt-node", "health"])

    # Those commands should fail as they expect no data regardless of exit 0
    AssertCommand(["gnt-node", "power", "on", node_name], fail=True)
    _AssertOobCall(verify_path, "power-on %s" % full_node_name)

    try:
      AssertCommand(["gnt-node", "power", "-f", "off", node_name], fail=True)
      _AssertOobCall(verify_path, "power-off %s" % full_node_name)
    finally:
      AssertCommand(["gnt-node", "modify", "-O", "no", node_name])

    AssertCommand(["gnt-node", "power", "-f", "cycle", node_name], fail=True)
    _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

    # Data, exit 1 (all should fail)
    _UpdateOobFile(exit_code_path, "1")

    AssertCommand(["gnt-node", "power", "on", node_name], fail=True)
    _AssertOobCall(verify_path, "power-on %s" % full_node_name)

    try:
      AssertCommand(["gnt-node", "power", "-f", "off", node_name], fail=True)
      _AssertOobCall(verify_path, "power-off %s" % full_node_name)
    finally:
      AssertCommand(["gnt-node", "modify", "-O", "no", node_name])

    AssertCommand(["gnt-node", "power", "-f", "cycle", node_name], fail=True)
    _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

    AssertCommand(["gnt-node", "power", "status", node_name],
                  fail=True)
    _AssertOobCall(verify_path, "power-status %s" % full_node_name)

    AssertCommand(["gnt-node", "health", node_name],
                  fail=True)
    _AssertOobCall(verify_path, "health %s" % full_node_name)

    AssertCommand(["gnt-node", "health"], fail=True)

    # No data, exit 1 (all should fail)
    _UpdateOobFile(data_path, "")
    AssertCommand(["gnt-node", "power", "on", node_name], fail=True)
    _AssertOobCall(verify_path, "power-on %s" % full_node_name)

    try:
      AssertCommand(["gnt-node", "power", "-f", "off", node_name], fail=True)
      _AssertOobCall(verify_path, "power-off %s" % full_node_name)
    finally:
      AssertCommand(["gnt-node", "modify", "-O", "no", node_name])

    AssertCommand(["gnt-node", "power", "-f", "cycle", node_name], fail=True)
    _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

    AssertCommand(["gnt-node", "power", "status", node_name],
                  fail=True)
    _AssertOobCall(verify_path, "power-status %s" % full_node_name)

    AssertCommand(["gnt-node", "health", node_name],
                  fail=True)
    _AssertOobCall(verify_path, "health %s" % full_node_name)

    AssertCommand(["gnt-node", "health"], fail=True)

    # Different OOB script for node
    verify_path2 = qa_utils.UploadData(master.primary, "")
    oob_script = ("#!/bin/sh\n"
                  "echo \"$@\" > %s\n") % verify_path2
    oob_path2 = qa_utils.UploadData(master.primary, oob_script, mode=0700)

    try:
      AssertCommand(["gnt-node", "modify", "--node-parameters",
                     "oob_program=%s" % oob_path2, node_name])
      AssertCommand(["gnt-node", "power", "on", node_name])
      _AssertOobCall(verify_path2, "power-on %s" % full_node_name)
    finally:
      AssertCommand(["gnt-node", "modify", "--node-parameters",
                     "oob_program=default", node_name])
      AssertCommand(["rm", "-f", oob_path2, verify_path2])
  finally:
    AssertCommand(["gnt-cluster", "modify", "--node-parameters",
                   "oob_program="])
    AssertCommand(["rm", "-f", oob_path, verify_path, data_path,
                   exit_code_path])
コード例 #11
0
def RunQa():
    """Main QA body.

  """
    RunTestBlock(RunEnvTests)
    SetupCluster()

    RunTestBlock(RunClusterTests)
    RunTestBlock(RunOsTests)

    RunTestIf("tags", qa_tags.TestClusterTags)

    RunTestBlock(RunCommonNodeTests)
    RunTestBlock(RunGroupListTests)
    RunTestBlock(RunGroupRwTests)
    RunTestBlock(RunNetworkTests)
    RunTestBlock(RunFilterTests)

    # The master shouldn't be readded or put offline; "delay" needs a non-master
    # node to test
    pnode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
    try:
        RunTestIf("node-readd", qa_node.TestNodeReadd, pnode)
        RunTestIf("node-modify", qa_node.TestNodeModify, pnode)
        RunTestIf("delay", qa_cluster.TestDelay, pnode)
    finally:
        pnode.Release()

    # Make sure the cluster is clean before running instance tests
    qa_cluster.AssertClusterVerify()

    pnode = qa_config.AcquireNode()
    try:
        RunTestIf("tags", qa_tags.TestNodeTags, pnode)

        if qa_rapi.Enabled():
            RunTest(qa_rapi.TestNode, pnode)

            if (qa_config.TestEnabled("instance-add-plain-disk")
                    and qa_config.IsTemplateSupported(constants.DT_PLAIN)):
                # Normal instance allocation via RAPI
                for use_client in [True, False]:
                    rapi_instance = RunTest(qa_rapi.TestRapiInstanceAdd, pnode,
                                            use_client)
                    try:
                        if qa_config.TestEnabled(
                                "instance-plain-rapi-common-tests"):
                            RunCommonInstanceTests(rapi_instance, [pnode])
                        RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance,
                                use_client)
                    finally:
                        rapi_instance.Release()
                    del rapi_instance

                # Multi-instance allocation
                rapi_instance_one, rapi_instance_two = \
                  RunTest(qa_rapi.TestRapiInstanceMultiAlloc, pnode)

                try:
                    RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_one,
                            True)
                    RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_two,
                            True)
                finally:
                    rapi_instance_one.Release()
                    rapi_instance_two.Release()
    finally:
        pnode.Release()

    config_list = [
        ("default-instance-tests", lambda: None, lambda _: None),
        (IsExclusiveStorageInstanceTestEnabled,
         lambda: qa_cluster.TestSetExclStorCluster(True),
         qa_cluster.TestSetExclStorCluster),
    ]
    for (conf_name, setup_conf_f, restore_conf_f) in config_list:
        if qa_config.TestEnabled(conf_name):
            oldconf = setup_conf_f()
            RunTestBlock(RunInstanceTests)
            restore_conf_f(oldconf)

    pnode = qa_config.AcquireNode()
    try:
        if qa_config.TestEnabled(
            ["instance-add-plain-disk", "instance-export"]):
            for shutdown in [False, True]:
                instance = RunTest(qa_instance.TestInstanceAddWithPlainDisk,
                                   [pnode])
                try:
                    expnode = qa_config.AcquireNode(exclude=pnode)
                    try:
                        if shutdown:
                            # Stop instance before exporting and removing it
                            RunTest(qa_instance.TestInstanceShutdown, instance)
                        RunTest(qa_instance.TestInstanceExportWithRemove,
                                instance, expnode)
                        RunTest(qa_instance.TestBackupList, expnode)
                    finally:
                        expnode.Release()
                finally:
                    instance.Release()
                del expnode
                del instance
            qa_cluster.AssertClusterVerify()

    finally:
        pnode.Release()

    if qa_rapi.Enabled():
        RunTestIf("filters", qa_rapi.TestFilters)

    RunTestIf("cluster-upgrade", qa_cluster.TestUpgrade)

    RunTestBlock(RunExclusiveStorageTests)
    RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"],
              TestIPolicyPlainInstance)

    RunTestBlock(RunCustomSshPortTests)

    RunTestIf("instance-add-restricted-by-disktemplates",
              qa_instance.TestInstanceCreationRestrictedByDiskTemplates)

    # Test removing instance with offline drbd secondary
    if qa_config.TestEnabled(
        ["instance-remove-drbd-offline", "instance-add-drbd-disk"]):
        # Make sure the master is not put offline
        snode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
        try:
            pnode = qa_config.AcquireNode(exclude=snode)
            try:
                instance = qa_instance.TestInstanceAddWithDrbdDisk(
                    [pnode, snode])
                set_offline = lambda node: qa_node.MakeNodeOffline(node, "yes")
                set_online = lambda node: qa_node.MakeNodeOffline(node, "no")
                RunTest(qa_instance.TestRemoveInstanceOfflineNode, instance,
                        snode, set_offline, set_online)
            finally:
                pnode.Release()
        finally:
            snode.Release()
        qa_cluster.AssertClusterVerify()

    RunTestBlock(RunMonitoringTests)

    RunPerformanceTests()

    RunTestIf("cluster-destroy", qa_node.TestNodeRemoveAll)

    RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)
コード例 #12
0
def RunInstanceTests():
    """Create and exercise instances."""

    requested_conversions = qa_config.get("convert-disk-templates", [])
    supported_conversions = \
        set(requested_conversions).difference(constants.DTS_NOT_CONVERTIBLE_TO)
    for (test_name, templ, create_fun, num_nodes) in \
        qa_instance.available_instance_tests:
        if (qa_config.TestEnabled(test_name)
                and qa_config.IsTemplateSupported(templ)):
            inodes = qa_config.AcquireManyNodes(num_nodes)
            try:
                instance = RunTest(create_fun, inodes)
                try:
                    RunTestIf("instance-user-down",
                              qa_instance.TestInstanceUserDown, instance)
                    RunTestIf("instance-communication",
                              qa_instance.TestInstanceCommunication, instance,
                              qa_config.GetMasterNode())
                    RunTestIf("cluster-epo", qa_cluster.TestClusterEpo)
                    RunDaemonTests(instance)
                    for node in inodes:
                        RunTestIf("haskell-confd", qa_node.TestNodeListDrbd,
                                  node, templ == constants.DT_DRBD8)
                    if len(inodes) > 1:
                        RunTestIf("group-rwops",
                                  qa_group.TestAssignNodesIncludingSplit,
                                  constants.INITIAL_NODE_GROUP_NAME,
                                  inodes[0].primary, inodes[1].primary)
                    # This test will run once but it will cover all the supported
                    # user-provided disk template conversions
                    if qa_config.TestEnabled("instance-convert-disk"):
                        if (len(supported_conversions) > 1
                                and instance.disk_template
                                in supported_conversions):
                            RunTest(qa_instance.TestInstanceShutdown, instance)
                            RunTest(
                                qa_instance.TestInstanceConvertDiskTemplate,
                                instance, supported_conversions)
                            RunTest(qa_instance.TestInstanceStartup, instance)
                            # At this point we clear the set because the requested conversions
                            # has been tested
                            supported_conversions.clear()
                        else:
                            test_desc = "Converting instance of template %s" % templ
                            ReportTestSkip(test_desc, "conversion feature")
                    RunTestIf("instance-modify-disks",
                              qa_instance.TestInstanceModifyDisks, instance)
                    RunCommonInstanceTests(instance, inodes)
                    if qa_config.TestEnabled("instance-modify-primary"):
                        othernode = qa_config.AcquireNode()
                        RunTest(qa_instance.TestInstanceModifyPrimaryAndBack,
                                instance, inodes[0], othernode)
                        othernode.Release()
                    RunGroupListTests()
                    RunExportImportTests(instance, inodes)
                    RunHardwareFailureTests(instance, inodes)
                    RunRepairDiskSizes()
                    RunTestIf(["rapi", "instance-data-censorship"],
                              qa_rapi.TestInstanceDataCensorship, instance,
                              inodes)
                    RunTest(qa_instance.TestInstanceRemove, instance)
                finally:
                    instance.Release()
                del instance
            finally:
                qa_config.ReleaseManyNodes(inodes)
            qa_cluster.AssertClusterVerify()
        else:
            test_desc = "Creating instances of template %s" % templ
            if not qa_config.TestEnabled(test_name):
                ReportTestSkip(test_desc, test_name)
            else:
                ReportTestSkip(test_desc, "disk template %s" % templ)
コード例 #13
0
def TestIPolicyPlainInstance():
    """Test instance policy interaction with instances"""
    params = [
        "memory-size", "cpu-count", "disk-count", "disk-size", "nic-count"
    ]
    if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
        print "Template %s not supported" % constants.DT_PLAIN
        return

    # This test assumes that the group policy is empty
    (_, old_specs) = qa_cluster.TestClusterSetISpecs()
    # We also assume to have only one min/max bound
    assert len(old_specs[constants.ISPECS_MINMAX]) == 1
    node = qa_config.AcquireNode()
    try:
        # Log of policy changes, list of tuples:
        # (full_change, incremental_change, policy_violated)
        history = []
        instance = qa_instance.TestInstanceAddWithPlainDisk([node])
        try:
            policyerror = [constants.CV_EINSTANCEPOLICY]
            for par in params:
                (iminval,
                 imaxval) = qa_instance.GetInstanceSpec(instance.name, par)
                # Some specs must be multiple of 4
                new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4,
                                          imaxval + 4)
                history.append((None, new_spec, True))
                if iminval > 0:
                    # Some specs must be multiple of 4
                    if iminval >= 4:
                        upper = iminval - 4
                    else:
                        upper = iminval - 1
                    new_spec = _BuildSpecDict(par, 0, upper, upper)
                    history.append((None, new_spec, True))
                history.append((old_specs, None, False))

            # Test with two instance specs
            double_specs = copy.deepcopy(old_specs)
            double_specs[constants.ISPECS_MINMAX] = \
                double_specs[constants.ISPECS_MINMAX] * 2
            (par1, par2) = params[0:2]
            (_, imaxval1) = qa_instance.GetInstanceSpec(instance.name, par1)
            (_, imaxval2) = qa_instance.GetInstanceSpec(instance.name, par2)
            old_minmax = old_specs[constants.ISPECS_MINMAX][0]
            history.extend([
                (double_specs, None, False),
                # The first min/max limit is being violated
                (None,
                 _BuildDoubleSpecDict(0, par1, imaxval1 + 4, imaxval1 + 4,
                                      imaxval1 + 4), False),
                # Both min/max limits are being violated
                (None,
                 _BuildDoubleSpecDict(1, par2, imaxval2 + 4, None,
                                      imaxval2 + 4), True),
                # The second min/max limit is being violated
                (None,
                 _BuildDoubleSpecDict(0, par1,
                                      old_minmax[constants.ISPECS_MIN][par1],
                                      old_specs[constants.ISPECS_STD][par1],
                                      old_minmax[constants.ISPECS_MAX][par1]),
                 False),
                (old_specs, None, False),
            ])

            # Apply the changes, and check policy violations after each change
            qa_cluster.AssertClusterVerify()
            for (new_specs, diff_specs, failed) in history:
                qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
                                                diff_specs=diff_specs)
                if failed:
                    qa_cluster.AssertClusterVerify(warnings=policyerror)
                else:
                    qa_cluster.AssertClusterVerify()

            qa_instance.TestInstanceRemove(instance)
        finally:
            instance.Release()

        # Now we replay the same policy changes, and we expect that the instance
        # cannot be created for the cases where we had a policy violation above
        for (new_specs, diff_specs, failed) in history:
            qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
                                            diff_specs=diff_specs)
            if failed:
                qa_instance.TestInstanceAddWithPlainDisk([node], fail=True)
            # Instance creation with no policy violation has been tested already
    finally:
        node.Release()
コード例 #14
0
def Workload(client):
    """ The actual RAPI workload used for tests.

  @type client: C{GanetiRapiClientWrapper}
  @param client: A wrapped RAPI client.

  """

    # First just the simple information retrievals
    TestGetters(client)

    # Then the only remaining function which is parameter-free
    Finish(client, client.RedistributeConfig)

    # Try changing the cluster parameters
    TestClusterParameterModification(client)

    TestTags(client, client.GetClusterTags, client.AddClusterTags,
             client.DeleteClusterTags)

    # Generously assume the master is present
    node = qa_config.AcquireNode()
    TestTags(client, client.GetNodeTags, client.AddNodeTags,
             client.DeleteNodeTags, node.primary)
    node.Release()

    # Instance tests

    # First remove all instances the QA might have created
    RemoveAllInstances(client)

    nodes = qa_config.AcquireManyNodes(2)
    instances = qa_config.AcquireManyInstances(2)
    TestSingleInstance(client, instances[0].name, instances[1].name,
                       nodes[0].primary, nodes[1].primary)
    qa_config.ReleaseManyInstances(instances)
    qa_config.ReleaseManyNodes(nodes)

    # Test all the queries which involve resources that do not have functions
    # of their own
    TestQueries(client, "lock")
    TestQueries(client, "job")
    TestQueries(client, "export")

    node = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
    TestNodeOperations(client, node.primary)
    TestQueryFiltering(client, node.primary)
    node.Release()

    nodes = qa_config.AcquireManyNodes(2)
    TestGroupOperations(client, nodes[0].primary, nodes[1].primary)
    qa_config.ReleaseManyNodes(nodes)

    TestNetworks(client)

    nodes = qa_config.AcquireManyNodes(3)
    instance = qa_config.AcquireInstance()
    TestInstanceMigrations(client, nodes[0].primary, nodes[1].primary,
                           nodes[2].primary, instance.name)
    instance.Release()
    qa_config.ReleaseManyNodes(nodes)

    nodes = qa_config.AcquireManyNodes(2)
    instances = qa_config.AcquireManyInstances(2)
    TestInstanceMoves(client, nodes[0], nodes[1], instances[0], instances[1])
    TestJobCancellation(client, nodes[0].primary, nodes[1].primary,
                        instances[0].name, instances[1].name)
    qa_config.ReleaseManyInstances(instances)
    qa_config.ReleaseManyNodes(nodes)