def RunHardwareFailureTests(instance, inodes): """Test cluster internal hardware failure recovery. """ RunTestIf("instance-failover", qa_instance.TestInstanceFailover, instance) RunTestIf(["instance-failover", qa_rapi.Enabled], qa_rapi.TestRapiInstanceFailover, instance) RunTestIf("instance-migrate", qa_instance.TestInstanceMigrate, instance) RunTestIf(["instance-migrate", qa_rapi.Enabled], qa_rapi.TestRapiInstanceMigrate, instance) if qa_config.TestEnabled("instance-replace-disks"): # We just need alternative secondary nodes, hence "- 1" othernodes = qa_config.AcquireManyNodes(len(inodes) - 1, exclude=inodes) try: RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceReplaceDisks, instance) RunTest(qa_instance.TestReplaceDisks, instance, inodes, othernodes) finally: qa_config.ReleaseManyNodes(othernodes) del othernodes if qa_config.TestEnabled("instance-recreate-disks"): try: acquirednodes = qa_config.AcquireManyNodes(len(inodes), exclude=inodes) othernodes = acquirednodes except qa_error.OutOfNodesError: if len(inodes) > 1: # If the cluster is not big enough, let's reuse some of the nodes, but # with different roles. In this way, we can test a DRBD instance even on # a 3-node cluster. acquirednodes = [qa_config.AcquireNode(exclude=inodes)] othernodes = acquirednodes + inodes[:-1] else: raise try: RunTest(qa_instance.TestRecreateDisks, instance, inodes, othernodes) finally: qa_config.ReleaseManyNodes(acquirednodes) if len(inodes) >= 2: RunTestIf("node-evacuate", qa_node.TestNodeEvacuate, inodes[0], inodes[1]) RunTestIf("node-failover", qa_node.TestNodeFailover, inodes[0], inodes[1]) RunTestIf("node-migrate", qa_node.TestNodeMigrate, inodes[0], inodes[1])
def RunExclusiveStorageTests(): """Test exclusive storage.""" if not qa_config.TestEnabled("cluster-exclusive-storage"): return node = qa_config.AcquireNode() try: old_es = qa_cluster.TestSetExclStorCluster(False) qa_node.TestExclStorSingleNode(node) qa_cluster.TestSetExclStorCluster(True) qa_cluster.TestExclStorSharedPv(node) if qa_config.TestEnabled("instance-add-plain-disk"): # Make sure that the cluster doesn't have any pre-existing problem qa_cluster.AssertClusterVerify() # Create and allocate instances instance1 = qa_instance.TestInstanceAddWithPlainDisk([node]) try: instance2 = qa_instance.TestInstanceAddWithPlainDisk([node]) try: # cluster-verify checks that disks are allocated correctly qa_cluster.AssertClusterVerify() # Remove instances qa_instance.TestInstanceRemove(instance2) qa_instance.TestInstanceRemove(instance1) finally: instance2.Release() finally: instance1.Release() if qa_config.TestEnabled("instance-add-drbd-disk"): snode = qa_config.AcquireNode() try: qa_cluster.TestSetExclStorCluster(False) instance = qa_instance.TestInstanceAddWithDrbdDisk( [node, snode]) try: qa_cluster.TestSetExclStorCluster(True) exp_err = [constants.CV_EINSTANCEUNSUITABLENODE] qa_cluster.AssertClusterVerify(fail=True, errors=exp_err) qa_instance.TestInstanceRemove(instance) finally: instance.Release() finally: snode.Release() qa_cluster.TestSetExclStorCluster(old_es) finally: node.Release()
def RunPerformanceTests(): if not qa_config.TestEnabled("performance"): ReportTestSkip("performance related tests", "performance") return if qa_config.TestEnabled("jobqueue-performance"): RunTest(qa_performance.TestParallelMaxInstanceCreationPerformance) RunTest( qa_performance.TestParallelNodeCountInstanceCreationPerformance) instances = qa_performance.CreateAllInstances() RunTest(qa_performance.TestParallelModify, instances) RunTest(qa_performance.TestParallelInstanceOSOperations, instances) RunTest(qa_performance.TestParallelInstanceQueries, instances) qa_performance.RemoveAllInstances(instances) RunTest(qa_performance.TestJobQueueSubmissionPerformance) if qa_config.TestEnabled("parallel-performance"): if qa_config.IsTemplateSupported(constants.DT_DRBD8): RunTest(qa_performance.TestParallelDRBDInstanceCreationPerformance) if qa_config.IsTemplateSupported(constants.DT_PLAIN): RunTest( qa_performance.TestParallelPlainInstanceCreationPerformance) if qa_config.IsTemplateSupported(constants.DT_DRBD8): inodes = qa_config.AcquireManyNodes(2) try: instance = qa_instance.TestInstanceAddWithDrbdDisk(inodes) try: RunTest(qa_performance.TestParallelInstanceFailover, instance) RunTest(qa_performance.TestParallelInstanceMigration, instance) RunTest(qa_performance.TestParallelInstanceReplaceDisks, instance) RunTest(qa_performance.TestParallelInstanceReboot, instance) RunTest(qa_performance.TestParallelInstanceReinstall, instance) RunTest(qa_performance.TestParallelInstanceRename, instance) finally: qa_instance.TestInstanceRemove(instance) instance.Release() finally: qa_config.ReleaseManyNodes(inodes)
def RunExportImportTests(instance, inodes): """Tries to export and import the instance. @type inodes: list of nodes @param inodes: current nodes of the instance """ # FIXME: export explicitly bails out on file based storage. other non-lvm # based storage types are untested, though. Also note that import could still # work, but is deeply embedded into the "export" case. if (qa_config.TestEnabled("instance-export") and instance.disk_template not in constants.DTS_FILEBASED): RunTest(qa_instance.TestInstanceExportNoTarget, instance) pnode = inodes[0] expnode = qa_config.AcquireNode(exclude=pnode) try: name = RunTest(qa_instance.TestInstanceExport, instance, expnode) RunTest(qa_instance.TestBackupList, expnode) if qa_config.TestEnabled("instance-import"): newinst = qa_config.AcquireInstance() try: RunTest(qa_instance.TestInstanceImport, newinst, pnode, expnode, name) # Check if starting the instance works RunTest(qa_instance.TestInstanceStartup, newinst) RunTest(qa_instance.TestInstanceRemove, newinst) finally: newinst.Release() finally: expnode.Release() # FIXME: inter-cluster-instance-move crashes on file based instances :/ # See Issue 414. if (qa_config.TestEnabled([qa_rapi.Enabled, "inter-cluster-instance-move"]) and (instance.disk_template not in constants.DTS_FILEBASED)): newinst = qa_config.AcquireInstance() try: tnode = qa_config.AcquireNode(exclude=inodes) try: RunTest(qa_rapi.TestInterClusterInstanceMove, instance, newinst, inodes, tnode) finally: tnode.Release() finally: newinst.Release()
def SetupCluster(rapi_user): """Initializes the cluster. @param rapi_user: Login user for RAPI @return: Login secret for RAPI """ rapi_secret = utils.GenerateSecret() RunTestIf("create-cluster", qa_cluster.TestClusterInit, rapi_user, rapi_secret) if not qa_config.TestEnabled("create-cluster"): # If the cluster is already in place, we assume that exclusive-storage is # already set according to the configuration qa_config.SetExclusiveStorage(qa_config.get("exclusive-storage", False)) if qa_rapi.Enabled(): # To support RAPI on an existing cluster we have to find out the secret rapi_secret = qa_rapi.LookupRapiSecret(rapi_user) qa_group.ConfigureGroups() # Test on empty cluster RunTestIf("node-list", qa_node.TestNodeList) RunTestIf("instance-list", qa_instance.TestInstanceList) RunTestIf("job-list", qa_job.TestJobList) RunTestIf("create-cluster", qa_node.TestNodeAddAll) if not qa_config.TestEnabled("create-cluster"): # consider the nodes are already there qa_node.MarkNodeAddedAll() RunTestIf("test-jobqueue", qa_cluster.TestJobqueue) RunTestIf("test-jobqueue", qa_job.TestJobCancellation) # enable the watcher (unconditionally) RunTest(qa_daemon.TestResumeWatcher) RunTestIf("node-list", qa_node.TestNodeList) # Test listing fields RunTestIf("node-list", qa_node.TestNodeListFields) RunTestIf("instance-list", qa_instance.TestInstanceListFields) RunTestIf("job-list", qa_job.TestJobListFields) RunTestIf("instance-export", qa_instance.TestBackupListFields) RunTestIf("node-info", qa_node.TestNodeInfo) return rapi_secret
def SetupRapi(): """Sets up the RAPI certificate and usernames for the client. """ if not Enabled(): return (None, None) # pylint: disable=W0603 # due to global usage global _rapi_username global _rapi_password _rapi_username = qa_config.get("rapi-user", "ganeti-qa") if qa_config.TestEnabled("create-cluster") and \ qa_config.get("rapi-files-location") is None: # For a new cluster, we have to invent a secret and a user, unless it has # been provided separately _rapi_password = _CreateRapiUser(_rapi_username) else: _EnsureRapiFilesPresence() _rapi_password = _GetRapiSecret(_rapi_username) # Once a username and password have been set, we can fetch the certs and # get all we need for a working RAPI client. ReloadCertificates(ensure_presence=False)
def IsExclusiveStorageInstanceTestEnabled(): test_name = "exclusive-storage-instance-tests" if qa_config.TestEnabled(test_name): vgname = qa_config.get("vg-name", constants.DEFAULT_VG) vgscmd = utils.ShellQuoteArgs([ "vgs", "--noheadings", "-o", "pv_count", vgname, ]) nodes = qa_config.GetConfig()["nodes"] for node in nodes: try: pvnum = int(qa_utils.GetCommandOutput(node.primary, vgscmd)) except Exception, e: msg = ( "Cannot get the number of PVs on %s, needed by '%s': %s" % (node.primary, test_name, e)) raise qa_error.Error(msg) if pvnum < 2: raise qa_error.Error( "Node %s has not enough PVs (%s) to run '%s'" % (node.primary, pvnum, test_name)) res = True
def RunOsTests(): """Runs all tests related to gnt-os. """ os_enabled = ["os", qa_config.NoVirtualCluster] if qa_config.TestEnabled(qa_rapi.Enabled): rapi_getos = qa_rapi.GetOperatingSystems else: rapi_getos = None for fn in [ qa_os.TestOsList, qa_os.TestOsDiagnose, ]: RunTestIf(os_enabled, fn) for fn in [ qa_os.TestOsValid, qa_os.TestOsInvalid, qa_os.TestOsPartiallyValid, ]: RunTestIf(os_enabled, fn, rapi_getos) for fn in [ qa_os.TestOsModifyValid, qa_os.TestOsModifyInvalid, qa_os.TestOsStatesNonExisting, ]: RunTestIf(os_enabled, fn)
def Enabled(): """Return whether remote API tests should be run. """ # TODO: Implement RAPI tests for virtual clusters return (qa_config.TestEnabled("rapi") and not qa_config.UseVirtualCluster())
def RunPerformanceTests(): if not qa_config.TestEnabled("performance"): ReportTestSkip("performance related tests", "performance") return # For reproducable performance, run performance tests with the watcher # paused. qa_utils.AssertCommand(["gnt-cluster", "watcher", "pause", "4h"]) if qa_config.TestEnabled("jobqueue-performance"): RunTest(qa_performance.TestParallelMaxInstanceCreationPerformance) RunTest( qa_performance.TestParallelNodeCountInstanceCreationPerformance) instances = qa_performance.CreateAllInstances() RunTest(qa_performance.TestParallelModify, instances) RunTest(qa_performance.TestParallelInstanceOSOperations, instances) RunTest(qa_performance.TestParallelInstanceQueries, instances) qa_performance.RemoveAllInstances(instances) RunTest(qa_performance.TestJobQueueSubmissionPerformance) if qa_config.TestEnabled("parallel-performance"): if qa_config.IsTemplateSupported(constants.DT_DRBD8): RunTest(qa_performance.TestParallelDRBDInstanceCreationPerformance) if qa_config.IsTemplateSupported(constants.DT_PLAIN): RunTest( qa_performance.TestParallelPlainInstanceCreationPerformance) # Preparations need to be made only if some of these tests are enabled if qa_config.IsTemplateSupported(constants.DT_DRBD8) and \ qa_config.TestEnabled(qa_config.Either(PARALLEL_TEST_DICT.keys())): inodes = qa_config.AcquireManyNodes(2) try: instance = qa_instance.TestInstanceAddWithDrbdDisk(inodes) try: for (test_name, test_fn) in PARALLEL_TEST_DICT.items(): RunTestIf(test_name, test_fn, instance) finally: instance.Release() qa_instance.TestInstanceRemove(instance) finally: qa_config.ReleaseManyNodes(inodes) qa_utils.AssertCommand(["gnt-cluster", "watcher", "continue"])
def RunCustomSshPortTests(): """Test accessing nodes with custom SSH ports. This requires removing nodes, adding them to a new group, and then undoing the change. """ if not qa_config.TestEnabled("group-custom-ssh-port"): return std_port = netutils.GetDaemonPort(constants.SSH) port = 211 master = qa_config.GetMasterNode() with qa_config.AcquireManyNodesCtx(1, exclude=master) as nodes: # Checks if the node(s) could be contacted through IPv6. # If yes, better skip the whole test. for node in nodes: if qa_utils.UsesIPv6Connection(node.primary, std_port): print( "Node %s is likely to be reached using IPv6," "skipping the test" % (node.primary, )) return for node in nodes: qa_node.NodeRemove(node) with qa_iptables.RulesContext() as r: with qa_group.NewGroupCtx() as group: qa_group.ModifyGroupSshPort(r, group, nodes, port) for node in nodes: qa_node.NodeAdd(node, group=group) # Make sure that the cluster doesn't have any pre-existing problem qa_cluster.AssertClusterVerify() # Create and allocate instances instance1 = qa_instance.TestInstanceAddWithPlainDisk(nodes) try: instance2 = qa_instance.TestInstanceAddWithPlainDisk(nodes) try: # cluster-verify checks that disks are allocated correctly qa_cluster.AssertClusterVerify() # Remove instances qa_instance.TestInstanceRemove(instance2) qa_instance.TestInstanceRemove(instance1) finally: instance2.Release() finally: instance1.Release() for node in nodes: qa_node.NodeRemove(node) for node in nodes: qa_node.NodeAdd(node) qa_cluster.AssertClusterVerify()
def SetupCluster(): """Initializes the cluster. """ RunTestIf("create-cluster", qa_cluster.TestClusterInit) if not qa_config.TestEnabled("create-cluster"): # If the cluster is already in place, we assume that exclusive-storage is # already set according to the configuration qa_config.SetExclusiveStorage(qa_config.get("exclusive-storage", False)) qa_rapi.SetupRapi() qa_group.ConfigureGroups() # Test on empty cluster RunTestIf("node-list", qa_node.TestNodeList) RunTestIf("instance-list", qa_instance.TestInstanceList) RunTestIf("job-list", qa_job.TestJobList) RunTestIf("create-cluster", qa_node.TestNodeAddAll) if not qa_config.TestEnabled("create-cluster"): # consider the nodes are already there qa_node.MarkNodeAddedAll() RunTestIf("test-jobqueue", qa_cluster.TestJobqueue) RunTestIf("test-jobqueue", qa_job.TestJobCancellation) # enable the watcher (unconditionally) RunTest(qa_daemon.TestResumeWatcher) RunTestIf("node-list", qa_node.TestNodeList) # Test listing fields RunTestIf("node-list", qa_node.TestNodeListFields) RunTestIf("instance-list", qa_instance.TestInstanceListFields) RunTestIf("job-list", qa_job.TestJobListFields) RunTestIf("instance-export", qa_instance.TestBackupListFields) RunTestIf("node-info", qa_node.TestNodeInfo)
def RunTestIf(testnames, fn, *args, **kwargs): """Runs a test conditionally. @param testnames: either a single test name in the configuration file, or a list of testnames (which will be AND-ed together) """ if qa_config.TestEnabled(testnames): RunTest(fn, *args, **kwargs) else: desc = _DescriptionOf(fn) ReportTestSkip(desc, testnames)
def TestUpgrade(): """Test gnt-cluster upgrade. This tests the 'gnt-cluster upgrade' command by flipping between the current and a different version of Ganeti. To also recover subtile points in the configuration up/down grades, instances are left over both upgrades. """ this_version = qa_config.get("dir-version") other_version = qa_config.get("other-dir-version") if this_version is None or other_version is None: print qa_utils.FormatInfo("Test not run, as versions not specified") return inst_creates = [] upgrade_instances = qa_config.get("upgrade-instances", []) live_instances = [] for (test_name, templ, cf, n) in qa_instance.available_instance_tests: if (qa_config.TestEnabled(test_name) and qa_config.IsTemplateSupported(templ) and templ in upgrade_instances): inst_creates.append((cf, n)) for (cf, n) in inst_creates: nodes = qa_config.AcquireManyNodes(n) live_instances.append(cf(nodes)) AssertCommand(["gnt-cluster", "upgrade", "--to", other_version]) AssertCommand(["gnt-cluster", "verify"]) for instance in live_instances: qa_instance.TestInstanceRemove(instance) instance.Release() live_instances = [] for (cf, n) in inst_creates: nodes = qa_config.AcquireManyNodes(n) live_instances.append(cf(nodes)) AssertCommand(["gnt-cluster", "upgrade", "--to", this_version]) AssertCommand(["gnt-cluster", "verify"]) for instance in live_instances: qa_instance.TestInstanceRemove(instance) instance.Release()
def TestGroupModify(): """gnt-group modify""" # This tests assumes LVM to be enabled, thus it should skip if # this is not the case if not qa_config.IsStorageTypeSupported(constants.ST_LVM_VG): return (group1, ) = qa_utils.GetNonexistentGroups(1) AssertCommand(["gnt-group", "add", group1]) try: _TestGroupModifyIPolicy(group1) AssertCommand(["gnt-group", "modify", "--alloc-policy", "unallocable", "--node-parameters", "oob_program=/bin/false", group1]) AssertCommand(["gnt-group", "modify", "--alloc-policy", "notvalid", group1], fail=True) AssertCommand(["gnt-group", "modify", "--node-parameters", "spindle_count=10", group1]) if qa_config.TestEnabled("htools"): AssertCommand(["hbal", "-L", "-G", group1]) AssertCommand(["gnt-group", "modify", "--node-parameters", "spindle_count=default", group1]) finally: AssertCommand(["gnt-group", "remove", group1])
def RunInstanceTests(): """Create and exercise instances.""" requested_conversions = qa_config.get("convert-disk-templates", []) supported_conversions = \ set(requested_conversions).difference(constants.DTS_NOT_CONVERTIBLE_TO) for (test_name, templ, create_fun, num_nodes) in \ qa_instance.available_instance_tests: if (qa_config.TestEnabled(test_name) and qa_config.IsTemplateSupported(templ)): inodes = qa_config.AcquireManyNodes(num_nodes) try: instance = RunTest(create_fun, inodes) try: RunTestIf("instance-user-down", qa_instance.TestInstanceUserDown, instance) RunTestIf("instance-communication", qa_instance.TestInstanceCommunication, instance, qa_config.GetMasterNode()) RunTestIf("cluster-epo", qa_cluster.TestClusterEpo) RunDaemonTests(instance) for node in inodes: RunTestIf("haskell-confd", qa_node.TestNodeListDrbd, node, templ == constants.DT_DRBD8) if len(inodes) > 1: RunTestIf("group-rwops", qa_group.TestAssignNodesIncludingSplit, constants.INITIAL_NODE_GROUP_NAME, inodes[0].primary, inodes[1].primary) # This test will run once but it will cover all the supported # user-provided disk template conversions if qa_config.TestEnabled("instance-convert-disk"): if (len(supported_conversions) > 1 and instance.disk_template in supported_conversions): RunTest(qa_instance.TestInstanceShutdown, instance) RunTest( qa_instance.TestInstanceConvertDiskTemplate, instance, supported_conversions) RunTest(qa_instance.TestInstanceStartup, instance) # At this point we clear the set because the requested conversions # has been tested supported_conversions.clear() else: test_desc = "Converting instance of template %s" % templ ReportTestSkip(test_desc, "conversion feature") RunTestIf("instance-modify-disks", qa_instance.TestInstanceModifyDisks, instance) RunCommonInstanceTests(instance, inodes) if qa_config.TestEnabled("instance-modify-primary"): othernode = qa_config.AcquireNode() RunTest(qa_instance.TestInstanceModifyPrimaryAndBack, instance, inodes[0], othernode) othernode.Release() RunGroupListTests() RunExportImportTests(instance, inodes) RunHardwareFailureTests(instance, inodes) RunRepairDiskSizes() RunTestIf(["rapi", "instance-data-censorship"], qa_rapi.TestInstanceDataCensorship, instance, inodes) RunTest(qa_instance.TestInstanceRemove, instance) finally: instance.Release() del instance finally: qa_config.ReleaseManyNodes(inodes) qa_cluster.AssertClusterVerify() else: test_desc = "Creating instances of template %s" % templ if not qa_config.TestEnabled(test_name): ReportTestSkip(test_desc, test_name) else: ReportTestSkip(test_desc, "disk template %s" % templ)
def TestInstanceCreationRestrictedByDiskTemplates(): """Test adding instances for disabled disk templates.""" if qa_config.TestEnabled("cluster-exclusive-storage"): # These tests are valid only for non-exclusive storage return enabled_disk_templates = qa_config.GetEnabledDiskTemplates() nodes = qa_config.AcquireManyNodes(2) # Setup the cluster with the enabled_disk_templates AssertCommand([ "gnt-cluster", "modify", "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates), "--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates) ], fail=False) # Test instance creation for enabled disk templates for disk_template in enabled_disk_templates: instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=False) TestInstanceRemove(instance) instance.Release() # Test that instance creation fails for disabled disk templates disabled_disk_templates = list(constants.DISK_TEMPLATES - set(enabled_disk_templates)) for disk_template in disabled_disk_templates: instance = CreateInstanceByDiskTemplate(nodes, disk_template, fail=True) # Test instance creation for after disabling enabled disk templates if (len(enabled_disk_templates) > 1): # Partition the disk templates, enable them separately and check if the # disabled ones cannot be used by instances. middle = len(enabled_disk_templates) / 2 templates1 = enabled_disk_templates[:middle] templates2 = enabled_disk_templates[middle:] for (enabled, disabled) in [(templates1, templates2), (templates2, templates1)]: AssertCommand([ "gnt-cluster", "modify", "--enabled-disk-templates=%s" % ",".join(enabled), "--ipolicy-disk-templates=%s" % ",".join(enabled) ], fail=False) for disk_template in disabled: CreateInstanceByDiskTemplate(nodes, disk_template, fail=True) elif (len(enabled_disk_templates) == 1): # If only one disk template is enabled in the QA config, we have to enable # some other templates in order to test if the disabling the only enabled # disk template prohibits creating instances of that template. other_disk_templates = list( set([constants.DT_DISKLESS, constants.DT_BLOCK]) - set(enabled_disk_templates)) AssertCommand([ "gnt-cluster", "modify", "--enabled-disk-templates=%s" % ",".join(other_disk_templates), "--ipolicy-disk-templates=%s" % ",".join(other_disk_templates) ], fail=False) CreateInstanceByDiskTemplate(nodes, enabled_disk_templates[0], fail=True) else: raise qa_error.Error("Please enable at least one disk template" " in your QA setup.") # Restore initially enabled disk templates AssertCommand([ "gnt-cluster", "modify", "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates), "--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates) ], fail=False)
def TestInstanceModify(instance): """gnt-instance modify""" default_hv = qa_config.GetDefaultHypervisor() # Assume /sbin/init exists on all systems test_kernel = "/sbin/init" test_initrd = test_kernel orig_maxmem = qa_config.get(constants.BE_MAXMEM) orig_minmem = qa_config.get(constants.BE_MINMEM) #orig_bridge = qa_config.get("bridge", "xen-br0") args = [ ["-B", "%s=128" % constants.BE_MINMEM], ["-B", "%s=128" % constants.BE_MAXMEM], [ "-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem, constants.BE_MAXMEM, orig_maxmem) ], ["-B", "%s=2" % constants.BE_VCPUS], ["-B", "%s=1" % constants.BE_VCPUS], ["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)], ["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)], [ "-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT) ], ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)], ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)], # TODO: bridge tests #["--bridge", "xen-br1"], #["--bridge", orig_bridge], ] if default_hv == constants.HT_XEN_PVM: args.extend([ ["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)], ["-H", "no_%s" % (constants.HV_INITRD_PATH, )], [ "-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT) ], ]) elif default_hv == constants.HT_XEN_HVM: args.extend([ ["-H", "%s=acn" % constants.HV_BOOT_ORDER], [ "-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT) ], ]) elif default_hv == constants.HT_KVM and \ qa_config.TestEnabled("instance-device-hotplug"): args.extend([ ["--net", "-1:add", "--hotplug"], [ "--net", "-1:modify,mac=aa:bb:cc:dd:ee:ff", "--hotplug", "--force" ], ["--net", "-1:remove", "--hotplug"], ["--disk", "-1:add,size=1G", "--hotplug"], ["--disk", "-1:remove", "--hotplug"], ]) for alist in args: AssertCommand(["gnt-instance", "modify"] + alist + [instance.name]) # check no-modify AssertCommand(["gnt-instance", "modify", instance.name], fail=True) # Marking offline while instance is running must fail... AssertCommand(["gnt-instance", "modify", "--offline", instance.name], fail=True) # ...while making it online is ok, and should work AssertCommand(["gnt-instance", "modify", "--online", instance.name])
def RunCommonInstanceTests(instance, inst_nodes): """Runs a few tests that are common to all disk types. """ RunTestIf("instance-shutdown", qa_instance.TestInstanceShutdown, instance) RunTestIf(["instance-shutdown", "instance-console", qa_rapi.Enabled], qa_rapi.TestRapiStoppedInstanceConsole, instance) RunTestIf(["instance-shutdown", "instance-modify"], qa_instance.TestInstanceStoppedModify, instance) RunTestIf("instance-shutdown", qa_instance.TestInstanceStartup, instance) # Test shutdown/start via RAPI RunTestIf(["instance-shutdown", qa_rapi.Enabled], qa_rapi.TestRapiInstanceShutdown, instance) RunTestIf(["instance-shutdown", qa_rapi.Enabled], qa_rapi.TestRapiInstanceStartup, instance) RunTestIf("instance-list", qa_instance.TestInstanceList) RunTestIf("instance-info", qa_instance.TestInstanceInfo, instance) RunTestIf("instance-modify", qa_instance.TestInstanceModify, instance) RunTestIf(["instance-modify", qa_rapi.Enabled], qa_rapi.TestRapiInstanceModify, instance) RunTestIf("instance-console", qa_instance.TestInstanceConsole, instance) RunTestIf(["instance-console", qa_rapi.Enabled], qa_rapi.TestRapiInstanceConsole, instance) RunTestIf("instance-device-names", qa_instance.TestInstanceDeviceNames, instance) DOWN_TESTS = qa_config.Either([ "instance-reinstall", "instance-rename", "instance-grow-disk", ]) # shutdown instance for any 'down' tests RunTestIf(DOWN_TESTS, qa_instance.TestInstanceShutdown, instance) # now run the 'down' state tests RunTestIf("instance-reinstall", qa_instance.TestInstanceReinstall, instance) RunTestIf(["instance-reinstall", qa_rapi.Enabled], qa_rapi.TestRapiInstanceReinstall, instance) if qa_config.TestEnabled("instance-rename"): tgt_instance = qa_config.AcquireInstance() try: rename_source = instance.name rename_target = tgt_instance.name # perform instance rename to the same name RunTest(qa_instance.TestInstanceRenameAndBack, rename_source, rename_source) RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceRenameAndBack, rename_source, rename_source) if rename_target is not None: # perform instance rename to a different name, if we have one configured RunTest(qa_instance.TestInstanceRenameAndBack, rename_source, rename_target) RunTestIf(qa_rapi.Enabled, qa_rapi.TestRapiInstanceRenameAndBack, rename_source, rename_target) finally: tgt_instance.Release() RunTestIf(["instance-grow-disk"], qa_instance.TestInstanceGrowDisk, instance) # and now start the instance again RunTestIf(DOWN_TESTS, qa_instance.TestInstanceStartup, instance) RunTestIf("instance-reboot", qa_instance.TestInstanceReboot, instance) RunTestIf("tags", qa_tags.TestInstanceTags, instance) if instance.disk_template == constants.DT_DRBD8: RunTestIf("cluster-verify", qa_cluster.TestClusterVerifyDisksBrokenDRBD, instance, inst_nodes) RunTestIf("cluster-verify", qa_cluster.TestClusterVerify) RunTestIf(qa_rapi.Enabled, qa_rapi.TestInstance, instance) # Lists instances, too RunTestIf("node-list", qa_node.TestNodeList) # Some jobs have been run, let's test listing them RunTestIf("job-list", qa_job.TestJobList)
def RunQa(): """Main QA body. """ RunTestBlock(RunEnvTests) SetupCluster() RunTestBlock(RunClusterTests) RunTestBlock(RunOsTests) RunTestIf("tags", qa_tags.TestClusterTags) RunTestBlock(RunCommonNodeTests) RunTestBlock(RunGroupListTests) RunTestBlock(RunGroupRwTests) RunTestBlock(RunNetworkTests) RunTestBlock(RunFilterTests) # The master shouldn't be readded or put offline; "delay" needs a non-master # node to test pnode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode()) try: RunTestIf("node-readd", qa_node.TestNodeReadd, pnode) RunTestIf("node-modify", qa_node.TestNodeModify, pnode) RunTestIf("delay", qa_cluster.TestDelay, pnode) finally: pnode.Release() # Make sure the cluster is clean before running instance tests qa_cluster.AssertClusterVerify() pnode = qa_config.AcquireNode() try: RunTestIf("tags", qa_tags.TestNodeTags, pnode) if qa_rapi.Enabled(): RunTest(qa_rapi.TestNode, pnode) if (qa_config.TestEnabled("instance-add-plain-disk") and qa_config.IsTemplateSupported(constants.DT_PLAIN)): # Normal instance allocation via RAPI for use_client in [True, False]: rapi_instance = RunTest(qa_rapi.TestRapiInstanceAdd, pnode, use_client) try: if qa_config.TestEnabled( "instance-plain-rapi-common-tests"): RunCommonInstanceTests(rapi_instance, [pnode]) RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance, use_client) finally: rapi_instance.Release() del rapi_instance # Multi-instance allocation rapi_instance_one, rapi_instance_two = \ RunTest(qa_rapi.TestRapiInstanceMultiAlloc, pnode) try: RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_one, True) RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_two, True) finally: rapi_instance_one.Release() rapi_instance_two.Release() finally: pnode.Release() config_list = [ ("default-instance-tests", lambda: None, lambda _: None), (IsExclusiveStorageInstanceTestEnabled, lambda: qa_cluster.TestSetExclStorCluster(True), qa_cluster.TestSetExclStorCluster), ] for (conf_name, setup_conf_f, restore_conf_f) in config_list: if qa_config.TestEnabled(conf_name): oldconf = setup_conf_f() RunTestBlock(RunInstanceTests) restore_conf_f(oldconf) pnode = qa_config.AcquireNode() try: if qa_config.TestEnabled( ["instance-add-plain-disk", "instance-export"]): for shutdown in [False, True]: instance = RunTest(qa_instance.TestInstanceAddWithPlainDisk, [pnode]) try: expnode = qa_config.AcquireNode(exclude=pnode) try: if shutdown: # Stop instance before exporting and removing it RunTest(qa_instance.TestInstanceShutdown, instance) RunTest(qa_instance.TestInstanceExportWithRemove, instance, expnode) RunTest(qa_instance.TestBackupList, expnode) finally: expnode.Release() finally: instance.Release() del expnode del instance qa_cluster.AssertClusterVerify() finally: pnode.Release() if qa_rapi.Enabled(): RunTestIf("filters", qa_rapi.TestFilters) RunTestIf("cluster-upgrade", qa_cluster.TestUpgrade) RunTestBlock(RunExclusiveStorageTests) RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"], TestIPolicyPlainInstance) RunTestBlock(RunCustomSshPortTests) RunTestIf("instance-add-restricted-by-disktemplates", qa_instance.TestInstanceCreationRestrictedByDiskTemplates) # Test removing instance with offline drbd secondary if qa_config.TestEnabled( ["instance-remove-drbd-offline", "instance-add-drbd-disk"]): # Make sure the master is not put offline snode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode()) try: pnode = qa_config.AcquireNode(exclude=snode) try: instance = qa_instance.TestInstanceAddWithDrbdDisk( [pnode, snode]) set_offline = lambda node: qa_node.MakeNodeOffline(node, "yes") set_online = lambda node: qa_node.MakeNodeOffline(node, "no") RunTest(qa_instance.TestRemoveInstanceOfflineNode, instance, snode, set_offline, set_online) finally: pnode.Release() finally: snode.Release() qa_cluster.AssertClusterVerify() RunTestBlock(RunMonitoringTests) RunPerformanceTests() RunTestIf("cluster-destroy", qa_node.TestNodeRemoveAll) RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)
def RunMonitoringTests(): if qa_config.TestEnabled("mon-collector"): RunTest(qa_monitoring.TestInstStatusCollector)