Exemplo n.º 1
0
def TestInstStatusCollector():
    """Test the Xen instance status collector.

  """
    enabled_hypervisors = qa_config.GetEnabledHypervisors()

    is_xen = (constants.HT_XEN_PVM in enabled_hypervisors
              or constants.HT_XEN_HVM in enabled_hypervisors)
    if not is_xen:
        return

    # Execute on master on an empty cluster
    AssertCommand([MON_COLLECTOR, "inst-status-xen"])

    #Execute on cluster with instances
    node1 = qa_config.AcquireNode()
    node2 = qa_config.AcquireNode()
    template = qa_config.GetDefaultDiskTemplate()

    instance = CreateInstanceByDiskTemplate([node1, node2], template)
    AssertCommand([MON_COLLECTOR, "inst-status-xen"], node=node1)
    AssertCommand([MON_COLLECTOR, "inst-status-xen"], node=node2)
    RemoveInstance(instance)

    node1.Release()
    node2.Release()
Exemplo n.º 2
0
def RunExclusiveStorageTests():
    """Test exclusive storage."""
    if not qa_config.TestEnabled("cluster-exclusive-storage"):
        return

    node = qa_config.AcquireNode()
    try:
        old_es = qa_cluster.TestSetExclStorCluster(False)
        qa_node.TestExclStorSingleNode(node)

        qa_cluster.TestSetExclStorCluster(True)
        qa_cluster.TestExclStorSharedPv(node)

        if qa_config.TestEnabled("instance-add-plain-disk"):
            # Make sure that the cluster doesn't have any pre-existing problem
            qa_cluster.AssertClusterVerify()

            # Create and allocate instances
            instance1 = qa_instance.TestInstanceAddWithPlainDisk([node])
            try:
                instance2 = qa_instance.TestInstanceAddWithPlainDisk([node])
                try:
                    # cluster-verify checks that disks are allocated correctly
                    qa_cluster.AssertClusterVerify()

                    # Remove instances
                    qa_instance.TestInstanceRemove(instance2)
                    qa_instance.TestInstanceRemove(instance1)
                finally:
                    instance2.Release()
            finally:
                instance1.Release()

        if qa_config.TestEnabled("instance-add-drbd-disk"):
            snode = qa_config.AcquireNode()
            try:
                qa_cluster.TestSetExclStorCluster(False)
                instance = qa_instance.TestInstanceAddWithDrbdDisk(
                    [node, snode])
                try:
                    qa_cluster.TestSetExclStorCluster(True)
                    exp_err = [constants.CV_EINSTANCEUNSUITABLENODE]
                    qa_cluster.AssertClusterVerify(fail=True, errors=exp_err)
                    qa_instance.TestInstanceRemove(instance)
                finally:
                    instance.Release()
            finally:
                snode.Release()
        qa_cluster.TestSetExclStorCluster(old_es)
    finally:
        node.Release()
    def testAcquireNodeOrder(self):
        # Mark all nodes as marked (master excluded)
        for node in self.config["nodes"]:
            if node != self.config.GetMasterNode():
                node.MarkAdded()

        nodecount = len(self.config["nodes"])

        for iterations in [0, 1, 3, 100, 127, 7964]:
            acquired = []

            for i in range(iterations):
                node = qa_config.AcquireNode(_cfg=self.config)
                self.assertTrue(node.use_count > 0)
                self.assertEqual(node.use_count, (i / nodecount + 1))
                acquired.append((node.use_count, node.primary, node))

            # Check if returned nodes were in correct order
            key_fn = lambda (a, b, c): (a, utils.NiceSortKey(b), c)
            self.assertEqual(acquired, sorted(acquired, key=key_fn))

            # Release previously acquired nodes
            qa_config.ReleaseManyNodes(map(operator.itemgetter(2), acquired))

            # Check if nodes were actually released
            for node in self.config["nodes"]:
                self.assertEqual(node.use_count, 0)
                self.assertTrue(node.added
                                or node == self.config.GetMasterNode())
    def testAcquireNodeTooMany(self):
        # Mark all nodes as marked (master excluded)
        for node in self.config["nodes"]:
            if node != self.config.GetMasterNode():
                node.MarkAdded()

        nodecount = len(self.config["nodes"])

        self.assertTrue(nodecount > 1)

        acquired = []

        for _ in range(nodecount):
            node = qa_config.AcquireNode(exclude=acquired, _cfg=self.config)
            if node == self.config.GetMasterNode():
                self.assertFalse(node.added)
            else:
                self.assertTrue(node.added)
            self.assertEqual(node.use_count, 1)
            acquired.append(node)

        self.assertRaises(qa_error.OutOfNodesError,
                          qa_config.AcquireNode,
                          exclude=acquired,
                          _cfg=self.config)
Exemplo n.º 5
0
def RunExportImportTests(instance, inodes):
    """Tries to export and import the instance.

  @type inodes: list of nodes
  @param inodes: current nodes of the instance

  """
    # FIXME: export explicitly bails out on file based storage. other non-lvm
    # based storage types are untested, though. Also note that import could still
    # work, but is deeply embedded into the "export" case.
    if qa_config.TestEnabled("instance-export"):
        RunTest(qa_instance.TestInstanceExportNoTarget, instance)

        pnode = inodes[0]
        expnode = qa_config.AcquireNode(exclude=pnode)
        try:
            name = RunTest(qa_instance.TestInstanceExport, instance, expnode)

            RunTest(qa_instance.TestBackupList, expnode)

            if qa_config.TestEnabled("instance-import"):
                newinst = qa_config.AcquireInstance()
                try:
                    RunTest(qa_instance.TestInstanceImport, newinst, pnode,
                            expnode, name)
                    # Check if starting the instance works
                    RunTest(qa_instance.TestInstanceStartup, newinst)
                    RunTest(qa_instance.TestInstanceRemove, newinst)
                finally:
                    newinst.Release()
        finally:
            expnode.Release()

    # FIXME: inter-cluster-instance-move crashes on file based instances :/
    # See Issue 414.
    if (qa_config.TestEnabled([qa_rapi.Enabled,
                               "inter-cluster-instance-move"])):
        newinst = qa_config.AcquireInstance()
        try:
            tnode = qa_config.AcquireNode(exclude=inodes)
            try:
                RunTest(qa_rapi.TestInterClusterInstanceMove, instance,
                        newinst, inodes, tnode)
            finally:
                tnode.Release()
        finally:
            newinst.Release()
Exemplo n.º 6
0
  def testAcquireNodeNoneAdded(self):
    self.assertFalse(compat.any(n.added for n in self.config["nodes"]))

    # First call must return master node
    node = qa_config.AcquireNode(_cfg=self.config)
    self.assertEqual(node, self.config.GetMasterNode())

    # Next call with exclusion list fails
    self.assertRaises(qa_error.OutOfNodesError, qa_config.AcquireNode,
                      exclude=[node], _cfg=self.config)
Exemplo n.º 7
0
def _AcquireAllNodes():
  """Generator for acquiring all nodes in the QA config.

  """
  exclude = []
  try:
    while True:
      node = qa_config.AcquireNode(exclude=exclude)
      exclude.append(node)
      yield node
  except qa_error.OutOfNodesError:
    pass
Exemplo n.º 8
0
def RunInstanceTestsFull(create_fun, inodes, supported_conversions, templ):
    instance = RunTest(create_fun, inodes)
    try:
        RunTestIf("instance-user-down", qa_instance.TestInstanceUserDown,
                  instance)
        RunTestIf("instance-communication",
                  qa_instance.TestInstanceCommunication, instance,
                  qa_config.GetMasterNode())
        RunTestIf("cluster-epo", qa_cluster.TestClusterEpo)
        RunDaemonTests(instance)
        for node in inodes:
            RunTestIf("haskell-confd", qa_node.TestNodeListDrbd, node,
                      templ == constants.DT_DRBD8)
        if len(inodes) > 1:
            RunTestIf("group-rwops", qa_group.TestAssignNodesIncludingSplit,
                      constants.INITIAL_NODE_GROUP_NAME, inodes[0].primary,
                      inodes[1].primary)
        # This test will run once but it will cover all the supported
        # user-provided disk template conversions
        if qa_config.TestEnabled("instance-convert-disk"):
            if (len(supported_conversions) > 1
                    and instance.disk_template in supported_conversions):
                RunTest(qa_instance.TestInstanceShutdown, instance)
                RunTest(qa_instance.TestInstanceConvertDiskTemplate, instance,
                        supported_conversions)
                RunTest(qa_instance.TestInstanceStartup, instance)
                # At this point we clear the set because the requested conversions
                # has been tested
                supported_conversions.clear()
            else:
                test_desc = "Converting instance of template %s" % templ
                ReportTestSkip(test_desc, "conversion feature")
        RunTestIf("instance-modify-disks", qa_instance.TestInstanceModifyDisks,
                  instance)
        RunCommonInstanceTests(instance, inodes)
        if qa_config.TestEnabled("instance-modify-primary"):
            othernode = qa_config.AcquireNode()
            RunTest(qa_instance.TestInstanceModifyPrimaryAndBack, instance,
                    inodes[0], othernode)
            othernode.Release()
        RunGroupListTests()
        RunExportImportTests(instance, inodes)
        RunHardwareFailureTests(instance, inodes)
        RunRepairDiskSizes()
        RunTestIf(["rapi", "instance-data-censorship"],
                  qa_rapi.TestInstanceDataCensorship, instance, inodes)
        RunTest(qa_instance.TestInstanceRemove, instance)
    finally:
        instance.Release()
    del instance

    qa_cluster.AssertClusterVerify()
Exemplo n.º 9
0
def RunHardwareFailureTests(instance, inodes):
    """Test cluster internal hardware failure recovery.

  """
    RunTestIf("instance-failover", qa_instance.TestInstanceFailover, instance)
    RunTestIf(["instance-failover", qa_rapi.Enabled],
              qa_rapi.TestRapiInstanceFailover, instance)

    RunTestIf("instance-migrate", qa_instance.TestInstanceMigrate, instance)
    RunTestIf(["instance-migrate", qa_rapi.Enabled],
              qa_rapi.TestRapiInstanceMigrate, instance)

    if qa_config.TestEnabled("instance-replace-disks"):
        # We just need alternative secondary nodes, hence "- 1"
        othernodes = qa_config.AcquireManyNodes(len(inodes) - 1,
                                                exclude=inodes)
        try:
            RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceReplaceDisks,
                      instance)
            RunTest(qa_instance.TestReplaceDisks, instance, inodes, othernodes)
        finally:
            qa_config.ReleaseManyNodes(othernodes)
        del othernodes

    if qa_config.TestEnabled("instance-recreate-disks"):
        try:
            acquirednodes = qa_config.AcquireManyNodes(len(inodes),
                                                       exclude=inodes)
            othernodes = acquirednodes
        except qa_error.OutOfNodesError:
            if len(inodes) > 1:
                # If the cluster is not big enough, let's reuse some of the nodes, but
                # with different roles. In this way, we can test a DRBD instance even on
                # a 3-node cluster.
                acquirednodes = [qa_config.AcquireNode(exclude=inodes)]
                othernodes = acquirednodes + inodes[:-1]
            else:
                raise
        try:
            RunTest(qa_instance.TestRecreateDisks, instance, inodes,
                    othernodes)
        finally:
            qa_config.ReleaseManyNodes(acquirednodes)

    if len(inodes) >= 2:
        RunTestIf("node-evacuate", qa_node.TestNodeEvacuate, inodes[0],
                  inodes[1])
        RunTestIf("node-failover", qa_node.TestNodeFailover, inodes[0],
                  inodes[1])
        RunTestIf("node-migrate", qa_node.TestNodeMigrate, inodes[0],
                  inodes[1])
Exemplo n.º 10
0
def TestNodeEvacuate(node, node2):
    """gnt-node evacuate"""
    node3 = qa_config.AcquireNode(exclude=[node, node2])
    try:
        if qa_utils.GetNodeInstances(node3, secondaries=True):
            raise qa_error.UnusableNodeError(
                "Evacuation node has at least one"
                " secondary instance. This test requires"
                " it to have no secondary instances.")

        # Evacuate all secondary instances
        AssertCommand([
            "gnt-node", "evacuate", "-f",
            "--new-secondary=%s" % node3.primary, node2.primary
        ])

        # ... and back again.
        AssertCommand([
            "gnt-node", "evacuate", "-f",
            "--new-secondary=%s" % node2.primary, node3.primary
        ])
    finally:
        node3.Release()
Exemplo n.º 11
0
def TestIPolicyPlainInstance():
    """Test instance policy interaction with instances"""
    params = [
        "memory-size", "cpu-count", "disk-count", "disk-size", "nic-count"
    ]
    if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
        print("Template %s not supported" % constants.DT_PLAIN)
        return

    # This test assumes that the group policy is empty
    (_, old_specs) = qa_cluster.TestClusterSetISpecs()
    # We also assume to have only one min/max bound
    assert len(old_specs[constants.ISPECS_MINMAX]) == 1
    node = qa_config.AcquireNode()
    try:
        # Log of policy changes, list of tuples:
        # (full_change, incremental_change, policy_violated)
        history = []
        instance = qa_instance.TestInstanceAddWithPlainDisk([node])
        try:
            policyerror = [constants.CV_EINSTANCEPOLICY]
            for par in params:
                (iminval,
                 imaxval) = qa_instance.GetInstanceSpec(instance.name, par)
                # Some specs must be multiple of 4
                new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4,
                                          imaxval + 4)
                history.append((None, new_spec, True))
                if iminval > 0:
                    # Some specs must be multiple of 4
                    if iminval >= 4:
                        upper = iminval - 4
                    else:
                        upper = iminval - 1
                    new_spec = _BuildSpecDict(par, 0, upper, upper)
                    history.append((None, new_spec, True))
                history.append((old_specs, None, False))

            # Test with two instance specs
            double_specs = copy.deepcopy(old_specs)
            double_specs[constants.ISPECS_MINMAX] = \
                double_specs[constants.ISPECS_MINMAX] * 2
            (par1, par2) = params[0:2]
            (_, imaxval1) = qa_instance.GetInstanceSpec(instance.name, par1)
            (_, imaxval2) = qa_instance.GetInstanceSpec(instance.name, par2)
            old_minmax = old_specs[constants.ISPECS_MINMAX][0]
            history.extend([
                (double_specs, None, False),
                # The first min/max limit is being violated
                (None,
                 _BuildDoubleSpecDict(0, par1, imaxval1 + 4, imaxval1 + 4,
                                      imaxval1 + 4), False),
                # Both min/max limits are being violated
                (None,
                 _BuildDoubleSpecDict(1, par2, imaxval2 + 4, None,
                                      imaxval2 + 4), True),
                # The second min/max limit is being violated
                (None,
                 _BuildDoubleSpecDict(0, par1,
                                      old_minmax[constants.ISPECS_MIN][par1],
                                      old_specs[constants.ISPECS_STD][par1],
                                      old_minmax[constants.ISPECS_MAX][par1]),
                 False),
                (old_specs, None, False),
            ])

            # Apply the changes, and check policy violations after each change
            qa_cluster.AssertClusterVerify()
            for (new_specs, diff_specs, failed) in history:
                qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
                                                diff_specs=diff_specs)
                if failed:
                    qa_cluster.AssertClusterVerify(warnings=policyerror)
                else:
                    qa_cluster.AssertClusterVerify()

            qa_instance.TestInstanceRemove(instance)
        finally:
            instance.Release()

        # Now we replay the same policy changes, and we expect that the instance
        # cannot be created for the cases where we had a policy violation above
        for (new_specs, diff_specs, failed) in history:
            qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
                                            diff_specs=diff_specs)
            if failed:
                qa_instance.TestInstanceAddWithPlainDisk([node], fail=True)
            # Instance creation with no policy violation has been tested already
    finally:
        node.Release()
Exemplo n.º 12
0
def RunQa():
    """Main QA body.

  """
    RunTestBlock(RunEnvTests)
    SetupCluster()

    RunTestBlock(RunClusterTests)
    RunTestBlock(RunOsTests)

    RunTestIf("tags", qa_tags.TestClusterTags)

    RunTestBlock(RunCommonNodeTests)
    RunTestBlock(RunGroupListTests)
    RunTestBlock(RunGroupRwTests)
    RunTestBlock(RunNetworkTests)
    RunTestBlock(RunFilterTests)

    # The master shouldn't be readded or put offline; "delay" needs a non-master
    # node to test
    pnode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
    try:
        RunTestIf("node-readd", qa_node.TestNodeReadd, pnode)
        RunTestIf("node-modify", qa_node.TestNodeModify, pnode)
        RunTestIf("delay", qa_cluster.TestDelay, pnode)
    finally:
        pnode.Release()

    # Make sure the cluster is clean before running instance tests
    qa_cluster.AssertClusterVerify()

    pnode = qa_config.AcquireNode()
    try:
        RunTestIf("tags", qa_tags.TestNodeTags, pnode)

        if qa_rapi.Enabled():
            RunTest(qa_rapi.TestNode, pnode)

            if (qa_config.TestEnabled("instance-add-plain-disk")
                    and qa_config.IsTemplateSupported(constants.DT_PLAIN)):
                # Normal instance allocation via RAPI
                for use_client in [True, False]:
                    rapi_instance = RunTest(qa_rapi.TestRapiInstanceAdd, pnode,
                                            use_client)
                    try:
                        if qa_config.TestEnabled(
                                "instance-plain-rapi-common-tests"):
                            RunCommonInstanceTests(rapi_instance, [pnode])
                        RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance,
                                use_client)
                    finally:
                        rapi_instance.Release()
                    del rapi_instance

                # Multi-instance allocation
                rapi_instance_one, rapi_instance_two = \
                  RunTest(qa_rapi.TestRapiInstanceMultiAlloc, pnode)

                try:
                    RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_one,
                            True)
                    RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_two,
                            True)
                finally:
                    rapi_instance_one.Release()
                    rapi_instance_two.Release()
    finally:
        pnode.Release()

    config_list = [
        ("default-instance-tests", lambda: None, lambda _: None),
        (IsExclusiveStorageInstanceTestEnabled,
         lambda: qa_cluster.TestSetExclStorCluster(True),
         qa_cluster.TestSetExclStorCluster),
    ]
    for (conf_name, setup_conf_f, restore_conf_f) in config_list:
        if qa_config.TestEnabled(conf_name):
            oldconf = setup_conf_f()
            RunTestBlock(RunInstanceTests)
            restore_conf_f(oldconf)

    pnode = qa_config.AcquireNode()
    try:
        if qa_config.TestEnabled(
            ["instance-add-plain-disk", "instance-export"]):
            for shutdown in [False, True]:
                instance = RunTest(qa_instance.TestInstanceAddWithPlainDisk,
                                   [pnode])
                try:
                    expnode = qa_config.AcquireNode(exclude=pnode)
                    try:
                        if shutdown:
                            # Stop instance before exporting and removing it
                            RunTest(qa_instance.TestInstanceShutdown, instance)
                        RunTest(qa_instance.TestInstanceExportWithRemove,
                                instance, expnode)
                        RunTest(qa_instance.TestBackupList, expnode)
                    finally:
                        expnode.Release()
                finally:
                    instance.Release()
                del expnode
                del instance
            qa_cluster.AssertClusterVerify()

    finally:
        pnode.Release()

    if qa_rapi.Enabled():
        RunTestIf("filters", qa_rapi.TestFilters)

    RunTestIf("cluster-upgrade", qa_cluster.TestUpgrade)

    RunTestBlock(RunExclusiveStorageTests)
    RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"],
              TestIPolicyPlainInstance)

    RunTestBlock(RunCustomSshPortTests)

    RunTestIf("instance-add-restricted-by-disktemplates",
              qa_instance.TestInstanceCreationRestrictedByDiskTemplates)

    RunTestIf("instance-add-osparams", qa_instance.TestInstanceAddOsParams)
    RunTestIf("instance-add-osparams", qa_instance.TestSecretOsParams)

    # Test removing instance with offline drbd secondary
    if qa_config.TestEnabled(
        ["instance-remove-drbd-offline", "instance-add-drbd-disk"]):
        # Make sure the master is not put offline
        snode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
        try:
            pnode = qa_config.AcquireNode(exclude=snode)
            try:
                instance = qa_instance.TestInstanceAddWithDrbdDisk(
                    [pnode, snode])
                set_offline = lambda node: qa_node.MakeNodeOffline(node, "yes")
                set_online = lambda node: qa_node.MakeNodeOffline(node, "no")
                RunTest(qa_instance.TestRemoveInstanceOfflineNode, instance,
                        snode, set_offline, set_online)
            finally:
                pnode.Release()
        finally:
            snode.Release()
        qa_cluster.AssertClusterVerify()

    RunTestBlock(RunMonitoringTests)

    RunPerformanceTests()

    RunTestIf("cluster-destroy", qa_node.TestNodeRemoveAll)

    RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)
Exemplo n.º 13
0
def TestOutOfBand():
    """gnt-node power"""
    master = qa_config.GetMasterNode()

    node = qa_config.AcquireNode(exclude=master)

    master_name = master.primary
    node_name = node.primary
    full_node_name = qa_utils.ResolveNodeName(node)

    (oob_path, verify_path, data_path,
     exit_code_path) = _CreateOobScriptStructure()

    try:
        AssertCommand([
            "gnt-cluster", "modify", "--node-parameters",
            "oob_program=%s" % oob_path
        ])

        # No data, exit 0
        _UpdateOobFile(exit_code_path, "0")

        AssertCommand(["gnt-node", "power", "on", node_name])
        _AssertOobCall(verify_path, "power-on %s" % full_node_name)

        AssertCommand(["gnt-node", "power", "-f", "off", node_name])
        _AssertOobCall(verify_path, "power-off %s" % full_node_name)

        # Power off on master without options should fail
        AssertCommand(["gnt-node", "power", "-f", "off", master_name],
                      fail=True)
        # With force master it should still fail
        AssertCommand(
            ["gnt-node", "power", "-f", "--ignore-status", "off", master_name],
            fail=True)

        # Verify we can't transform back to online when not yet powered on
        AssertCommand(["gnt-node", "modify", "-O", "no", node_name], fail=True)
        # Now reset state
        AssertCommand([
            "gnt-node", "modify", "-O", "no", "--node-powered", "yes",
            node_name
        ])

        AssertCommand(["gnt-node", "power", "-f", "cycle", node_name])
        _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

        # Those commands should fail as they expect output which isn't provided yet
        # But they should have called the oob helper nevermind
        AssertCommand(["gnt-node", "power", "status", node_name], fail=True)
        _AssertOobCall(verify_path, "power-status %s" % full_node_name)

        AssertCommand(["gnt-node", "health", node_name], fail=True)
        _AssertOobCall(verify_path, "health %s" % full_node_name)

        AssertCommand(["gnt-node", "health"], fail=True)

        # Correct Data, exit 0
        _UpdateOobFile(data_path, serializer.DumpJson({"powered": True}))

        AssertCommand(["gnt-node", "power", "status", node_name])
        _AssertOobCall(verify_path, "power-status %s" % full_node_name)

        _UpdateOobFile(
            data_path,
            serializer.DumpJson([["temp", "OK"], ["disk0", "CRITICAL"]]))

        AssertCommand(["gnt-node", "health", node_name])
        _AssertOobCall(verify_path, "health %s" % full_node_name)

        AssertCommand(["gnt-node", "health"])

        # Those commands should fail as they expect no data regardless of exit 0
        AssertCommand(["gnt-node", "power", "on", node_name], fail=True)
        _AssertOobCall(verify_path, "power-on %s" % full_node_name)

        try:
            AssertCommand(["gnt-node", "power", "-f", "off", node_name],
                          fail=True)
            _AssertOobCall(verify_path, "power-off %s" % full_node_name)
        finally:
            AssertCommand(["gnt-node", "modify", "-O", "no", node_name])

        AssertCommand(["gnt-node", "power", "-f", "cycle", node_name],
                      fail=True)
        _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

        # Data, exit 1 (all should fail)
        _UpdateOobFile(exit_code_path, "1")

        AssertCommand(["gnt-node", "power", "on", node_name], fail=True)
        _AssertOobCall(verify_path, "power-on %s" % full_node_name)

        try:
            AssertCommand(["gnt-node", "power", "-f", "off", node_name],
                          fail=True)
            _AssertOobCall(verify_path, "power-off %s" % full_node_name)
        finally:
            AssertCommand(["gnt-node", "modify", "-O", "no", node_name])

        AssertCommand(["gnt-node", "power", "-f", "cycle", node_name],
                      fail=True)
        _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

        AssertCommand(["gnt-node", "power", "status", node_name], fail=True)
        _AssertOobCall(verify_path, "power-status %s" % full_node_name)

        AssertCommand(["gnt-node", "health", node_name], fail=True)
        _AssertOobCall(verify_path, "health %s" % full_node_name)

        AssertCommand(["gnt-node", "health"], fail=True)

        # No data, exit 1 (all should fail)
        _UpdateOobFile(data_path, "")
        AssertCommand(["gnt-node", "power", "on", node_name], fail=True)
        _AssertOobCall(verify_path, "power-on %s" % full_node_name)

        try:
            AssertCommand(["gnt-node", "power", "-f", "off", node_name],
                          fail=True)
            _AssertOobCall(verify_path, "power-off %s" % full_node_name)
        finally:
            AssertCommand(["gnt-node", "modify", "-O", "no", node_name])

        AssertCommand(["gnt-node", "power", "-f", "cycle", node_name],
                      fail=True)
        _AssertOobCall(verify_path, "power-cycle %s" % full_node_name)

        AssertCommand(["gnt-node", "power", "status", node_name], fail=True)
        _AssertOobCall(verify_path, "power-status %s" % full_node_name)

        AssertCommand(["gnt-node", "health", node_name], fail=True)
        _AssertOobCall(verify_path, "health %s" % full_node_name)

        AssertCommand(["gnt-node", "health"], fail=True)

        # Different OOB script for node
        verify_path2 = qa_utils.UploadData(master.primary, "")
        oob_script = ("#!/bin/sh\n" "echo \"$@\" > %s\n") % verify_path2
        oob_path2 = qa_utils.UploadData(master.primary, oob_script, mode=0o700)

        try:
            AssertCommand([
                "gnt-node", "modify", "--node-parameters",
                "oob_program=%s" % oob_path2, node_name
            ])
            AssertCommand(["gnt-node", "power", "on", node_name])
            _AssertOobCall(verify_path2, "power-on %s" % full_node_name)
        finally:
            AssertCommand([
                "gnt-node", "modify", "--node-parameters",
                "oob_program=default", node_name
            ])
            AssertCommand(["rm", "-f", oob_path2, verify_path2])
    finally:
        AssertCommand(
            ["gnt-cluster", "modify", "--node-parameters", "oob_program="])
        AssertCommand(
            ["rm", "-f", oob_path, verify_path, data_path, exit_code_path])