def RunCustomSshPortTests(): """Test accessing nodes with custom SSH ports. This requires removing nodes, adding them to a new group, and then undoing the change. """ if not qa_config.TestEnabled("group-custom-ssh-port"): return std_port = netutils.GetDaemonPort(constants.SSH) port = 211 master = qa_config.GetMasterNode() with qa_config.AcquireManyNodesCtx(1, exclude=master) as nodes: # Checks if the node(s) could be contacted through IPv6. # If yes, better skip the whole test. for node in nodes: if qa_utils.UsesIPv6Connection(node.primary, std_port): print("Node %s is likely to be reached using IPv6," "skipping the test" % (node.primary, )) return for node in nodes: qa_node.NodeRemove(node) with qa_iptables.RulesContext() as r: with qa_group.NewGroupCtx() as group: qa_group.ModifyGroupSshPort(r, group, nodes, port) for node in nodes: qa_node.NodeAdd(node, group=group) # Make sure that the cluster doesn't have any pre-existing problem qa_cluster.AssertClusterVerify() # Create and allocate instances instance1 = qa_instance.TestInstanceAddWithPlainDisk(nodes) try: instance2 = qa_instance.TestInstanceAddWithPlainDisk(nodes) try: # cluster-verify checks that disks are allocated correctly qa_cluster.AssertClusterVerify() # Remove instances qa_instance.TestInstanceRemove(instance2) qa_instance.TestInstanceRemove(instance1) finally: instance2.Release() finally: instance1.Release() for node in nodes: qa_node.NodeRemove(node) for node in nodes: qa_node.NodeAdd(node) qa_cluster.AssertClusterVerify()
def RunExclusiveStorageTests(): """Test exclusive storage.""" if not qa_config.TestEnabled("cluster-exclusive-storage"): return node = qa_config.AcquireNode() try: old_es = qa_cluster.TestSetExclStorCluster(False) qa_node.TestExclStorSingleNode(node) qa_cluster.TestSetExclStorCluster(True) qa_cluster.TestExclStorSharedPv(node) if qa_config.TestEnabled("instance-add-plain-disk"): # Make sure that the cluster doesn't have any pre-existing problem qa_cluster.AssertClusterVerify() # Create and allocate instances instance1 = qa_instance.TestInstanceAddWithPlainDisk([node]) try: instance2 = qa_instance.TestInstanceAddWithPlainDisk([node]) try: # cluster-verify checks that disks are allocated correctly qa_cluster.AssertClusterVerify() # Remove instances qa_instance.TestInstanceRemove(instance2) qa_instance.TestInstanceRemove(instance1) finally: instance2.Release() finally: instance1.Release() if qa_config.TestEnabled("instance-add-drbd-disk"): snode = qa_config.AcquireNode() try: qa_cluster.TestSetExclStorCluster(False) instance = qa_instance.TestInstanceAddWithDrbdDisk( [node, snode]) try: qa_cluster.TestSetExclStorCluster(True) exp_err = [constants.CV_EINSTANCEUNSUITABLENODE] qa_cluster.AssertClusterVerify(fail=True, errors=exp_err) qa_instance.TestInstanceRemove(instance) finally: instance.Release() finally: snode.Release() qa_cluster.TestSetExclStorCluster(old_es) finally: node.Release()
def RunPerformanceTests(): if not qa_config.TestEnabled("performance"): ReportTestSkip("performance related tests", "performance") return # For reproducable performance, run performance tests with the watcher # paused. qa_utils.AssertCommand(["gnt-cluster", "watcher", "pause", "4h"]) if qa_config.TestEnabled("jobqueue-performance"): RunTest(qa_performance.TestParallelMaxInstanceCreationPerformance) RunTest( qa_performance.TestParallelNodeCountInstanceCreationPerformance) instances = qa_performance.CreateAllInstances() RunTest(qa_performance.TestParallelModify, instances) RunTest(qa_performance.TestParallelInstanceOSOperations, instances) RunTest(qa_performance.TestParallelInstanceQueries, instances) qa_performance.RemoveAllInstances(instances) RunTest(qa_performance.TestJobQueueSubmissionPerformance) if qa_config.TestEnabled("parallel-performance"): if qa_config.IsTemplateSupported(constants.DT_DRBD8): RunTest(qa_performance.TestParallelDRBDInstanceCreationPerformance) if qa_config.IsTemplateSupported(constants.DT_PLAIN): RunTest( qa_performance.TestParallelPlainInstanceCreationPerformance) # Preparations need to be made only if some of these tests are enabled if qa_config.IsTemplateSupported(constants.DT_DRBD8) and \ qa_config.TestEnabled(qa_config.Either(list(PARALLEL_TEST_DICT))): inodes = qa_config.AcquireManyNodes(2) try: instance = qa_instance.TestInstanceAddWithDrbdDisk(inodes) try: for (test_name, test_fn) in PARALLEL_TEST_DICT.items(): RunTestIf(test_name, test_fn, instance) finally: instance.Release() qa_instance.TestInstanceRemove(instance) finally: qa_config.ReleaseManyNodes(inodes) qa_utils.AssertCommand(["gnt-cluster", "watcher", "continue"])
def TestIPolicyPlainInstance(): """Test instance policy interaction with instances""" params = [ "memory-size", "cpu-count", "disk-count", "disk-size", "nic-count" ] if not qa_config.IsTemplateSupported(constants.DT_PLAIN): print("Template %s not supported" % constants.DT_PLAIN) return # This test assumes that the group policy is empty (_, old_specs) = qa_cluster.TestClusterSetISpecs() # We also assume to have only one min/max bound assert len(old_specs[constants.ISPECS_MINMAX]) == 1 node = qa_config.AcquireNode() try: # Log of policy changes, list of tuples: # (full_change, incremental_change, policy_violated) history = [] instance = qa_instance.TestInstanceAddWithPlainDisk([node]) try: policyerror = [constants.CV_EINSTANCEPOLICY] for par in params: (iminval, imaxval) = qa_instance.GetInstanceSpec(instance.name, par) # Some specs must be multiple of 4 new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4, imaxval + 4) history.append((None, new_spec, True)) if iminval > 0: # Some specs must be multiple of 4 if iminval >= 4: upper = iminval - 4 else: upper = iminval - 1 new_spec = _BuildSpecDict(par, 0, upper, upper) history.append((None, new_spec, True)) history.append((old_specs, None, False)) # Test with two instance specs double_specs = copy.deepcopy(old_specs) double_specs[constants.ISPECS_MINMAX] = \ double_specs[constants.ISPECS_MINMAX] * 2 (par1, par2) = params[0:2] (_, imaxval1) = qa_instance.GetInstanceSpec(instance.name, par1) (_, imaxval2) = qa_instance.GetInstanceSpec(instance.name, par2) old_minmax = old_specs[constants.ISPECS_MINMAX][0] history.extend([ (double_specs, None, False), # The first min/max limit is being violated (None, _BuildDoubleSpecDict(0, par1, imaxval1 + 4, imaxval1 + 4, imaxval1 + 4), False), # Both min/max limits are being violated (None, _BuildDoubleSpecDict(1, par2, imaxval2 + 4, None, imaxval2 + 4), True), # The second min/max limit is being violated (None, _BuildDoubleSpecDict(0, par1, old_minmax[constants.ISPECS_MIN][par1], old_specs[constants.ISPECS_STD][par1], old_minmax[constants.ISPECS_MAX][par1]), False), (old_specs, None, False), ]) # Apply the changes, and check policy violations after each change qa_cluster.AssertClusterVerify() for (new_specs, diff_specs, failed) in history: qa_cluster.TestClusterSetISpecs(new_specs=new_specs, diff_specs=diff_specs) if failed: qa_cluster.AssertClusterVerify(warnings=policyerror) else: qa_cluster.AssertClusterVerify() qa_instance.TestInstanceRemove(instance) finally: instance.Release() # Now we replay the same policy changes, and we expect that the instance # cannot be created for the cases where we had a policy violation above for (new_specs, diff_specs, failed) in history: qa_cluster.TestClusterSetISpecs(new_specs=new_specs, diff_specs=diff_specs) if failed: qa_instance.TestInstanceAddWithPlainDisk([node], fail=True) # Instance creation with no policy violation has been tested already finally: node.Release()