def TestIcmpPing(): """ICMP ping each node. """ nodes = qa_config.get("nodes") pingprimary = pingsecondary = "fping" if qa_config.get("primary_ip_version") == 6: pingprimary = "fping6" pricmd = [pingprimary, "-e"] seccmd = [pingsecondary, "-e"] for i in nodes: pricmd.append(i.primary) if i.secondary: seccmd.append(i.secondary) pristr = utils.ShellQuoteArgs(pricmd) if seccmd: cmdall = "%s && %s" % (pristr, utils.ShellQuoteArgs(seccmd)) else: cmdall = pristr for node in nodes: AssertCommand(cmdall, node=node)
def _GenInstanceAllocationDict(node, instance): """Creates an instance allocation dict to be used with the RAPI""" instance.SetDiskTemplate(constants.DT_PLAIN) disks = [{ "size": utils.ParseUnit(d.get("size")), "name": str(d.get("name")) } for d in qa_config.GetDiskOptions()] nic0_mac = instance.GetNicMacAddr(0, constants.VALUE_GENERATE) nics = [{ constants.INIC_MAC: nic0_mac, }] beparams = { constants.BE_MAXMEM: utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)), constants.BE_MINMEM: utils.ParseUnit(qa_config.get(constants.BE_MINMEM)), } return _rapi_client.InstanceAllocation(constants.INSTANCE_CREATE, instance.name, constants.DT_PLAIN, disks, nics, os=qa_config.get("os"), pnode=node.primary, beparams=beparams)
def SetupRapi(): """Sets up the RAPI certificate and usernames for the client. """ if not Enabled(): return (None, None) # pylint: disable=W0603 # due to global usage global _rapi_username global _rapi_password _rapi_username = qa_config.get("rapi-user", "ganeti-qa") if qa_config.TestEnabled("create-cluster") and \ qa_config.get("rapi-files-location") is None: # For a new cluster, we have to invent a secret and a user, unless it has # been provided separately _rapi_password = _CreateRapiUser(_rapi_username) else: _EnsureRapiFilesPresence() _rapi_password = _GetRapiSecret(_rapi_username) # Once a username and password have been set, we can fetch the certs and # get all we need for a working RAPI client. ReloadCertificates(ensure_presence=False)
def _GenInstanceAllocationDict(node, instance): """Creates an instance allocation dict to be used with the RAPI""" instance.SetDiskTemplate(constants.DT_PLAIN) disks = [{"size": utils.ParseUnit(d.get("size")), "name": str(d.get("name"))} for d in qa_config.GetDiskOptions()] nic0_mac = instance.GetNicMacAddr(0, constants.VALUE_GENERATE) nics = [{ constants.INIC_MAC: nic0_mac, }] beparams = { constants.BE_MAXMEM: utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)), constants.BE_MINMEM: utils.ParseUnit(qa_config.get(constants.BE_MINMEM)), } return _rapi_client.InstanceAllocation(constants.INSTANCE_CREATE, instance.name, constants.DT_PLAIN, disks, nics, os=qa_config.get("os"), pnode=node.primary, beparams=beparams)
def TestNodeAddAll(): """Adding all nodes to cluster.""" master = qa_config.GetMasterNode() for node in qa_config.get("nodes"): if node != master: NodeAdd(node, readd=False) for node in qa_config.get("nodes"): def GetNonStartDaemons(): cmd = utils.ShellQuoteArgs(["ps", "-Ao", "comm"]) prcs = AssertCommand(cmd, node=node)[1] non_start_daemons = [] def AddIfNotStarted(daemon): if daemon not in prcs: non_start_daemons.append(daemon) AddIfNotStarted('ganeti-noded') if constants.ENABLE_MOND: AddIfNotStarted('ganeti-mond') if node == master: AddIfNotStarted('ganeti-wconfd') AddIfNotStarted('ganeti-rapi') AddIfNotStarted('ganeti-luxid') AddIfNotStarted('ganeti-maintd') return non_start_daemons nsd = GetNonStartDaemons() for daemon in nsd: raise qa_error.Error(daemon + ' is not running at %s' % node.primary)
def GetGenericAddParameters(inst, disk_template, force_mac=None): params = ["-B"] params.append("%s=%s,%s=%s" % (constants.BE_MINMEM, qa_config.get(constants.BE_MINMEM), constants.BE_MAXMEM, qa_config.get(constants.BE_MAXMEM))) if disk_template != constants.DT_DISKLESS: for idx, disk in enumerate(qa_config.GetDiskOptions()): size = disk.get("size") name = disk.get("name") diskparams = "%s:size=%s" % (idx, size) if name: diskparams += ",name=%s" % name if qa_config.AreSpindlesSupported(): spindles = disk.get("spindles") if spindles is None: raise qa_error.Error( "'spindles' is a required parameter for disks" " when you enable exclusive storage tests") diskparams += ",spindles=%s" % spindles params.extend(["--disk", diskparams]) # Set static MAC address if configured if force_mac: nic0_mac = force_mac else: nic0_mac = inst.GetNicMacAddr(0, None) if nic0_mac: params.extend(["--net", "0:mac=%s" % nic0_mac]) return params
def TestClusterRename(): """gnt-cluster rename""" master = qa_config.GetMasterNode() cmd = ['gnt-cluster', 'rename', '-f'] original_name = qa_config.get('name') rename_target = qa_config.get('rename', None) if rename_target is None: print qa_utils.FormatError('"rename" entry is missing') return cmd_1 = cmd + [rename_target] cmd_2 = cmd + [original_name] cmd_verify = ['gnt-cluster', 'verify'] AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd_1)).wait(), 0) AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd_verify)).wait(), 0) AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd_2)).wait(), 0) AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd_verify)).wait(), 0)
def GetGenericAddParameters(inst, disk_template, force_mac=None): params = ["-B"] params.append("%s=%s,%s=%s" % (constants.BE_MINMEM, qa_config.get(constants.BE_MINMEM), constants.BE_MAXMEM, qa_config.get(constants.BE_MAXMEM))) if disk_template != constants.DT_DISKLESS: for idx, disk in enumerate(qa_config.GetDiskOptions()): size = disk.get("size") name = disk.get("name") diskparams = "%s:size=%s" % (idx, size) if name: diskparams += ",name=%s" % name if qa_config.AreSpindlesSupported(): spindles = disk.get("spindles") if spindles is None: raise qa_error.Error("'spindles' is a required parameter for disks" " when you enable exclusive storage tests") diskparams += ",spindles=%s" % spindles params.extend(["--disk", diskparams]) # Set static MAC address if configured if force_mac: nic0_mac = force_mac else: nic0_mac = inst.GetNicMacAddr(0, None) if nic0_mac: params.extend(["--net", "0:mac=%s" % nic0_mac]) return params
def TestClusterBurnin(): """Burnin""" master = qa_config.GetMasterNode() options = qa_config.get("options", {}) disk_template = options.get("burnin-disk-template", constants.DT_DRBD8) parallel = options.get("burnin-in-parallel", False) check_inst = options.get("burnin-check-instances", False) do_rename = options.get("burnin-rename", "") do_reboot = options.get("burnin-reboot", True) reboot_types = options.get("reboot-types", constants.REBOOT_TYPES) # Get as many instances as we need instances = [] try: try: num = qa_config.get("options", {}).get("burnin-instances", 1) for _ in range(0, num): instances.append(qa_config.AcquireInstance()) except qa_error.OutOfInstancesError: print "Not enough instances, continuing anyway." if len(instances) < 1: raise qa_error.Error("Burnin needs at least one instance") script = qa_utils.UploadFile(master.primary, "../tools/burnin") try: disks = qa_config.GetDiskOptions() # Run burnin cmd = [ "env", "PYTHONPATH=%s" % _constants.VERSIONEDSHAREDIR, script, "--os=%s" % qa_config.get("os"), "--minmem-size=%s" % qa_config.get(constants.BE_MINMEM), "--maxmem-size=%s" % qa_config.get(constants.BE_MAXMEM), "--disk-size=%s" % ",".join([d.get("size") for d in disks]), "--disk-growth=%s" % ",".join([d.get("growth") for d in disks]), "--disk-template=%s" % disk_template ] if parallel: cmd.append("--parallel") cmd.append("--early-release") if check_inst: cmd.append("--http-check") if do_rename: cmd.append("--rename=%s" % do_rename) if not do_reboot: cmd.append("--no-reboot") else: cmd.append("--reboot-types=%s" % ",".join(reboot_types)) cmd += [inst.name for inst in instances] AssertCommand(cmd) finally: AssertCommand(["rm", "-f", script]) finally: for inst in instances: inst.Release()
def TestClusterBurnin(): """Burnin""" master = qa_config.GetMasterNode() options = qa_config.get("options", {}) disk_template = options.get("burnin-disk-template", constants.DT_DRBD8) parallel = options.get("burnin-in-parallel", False) check_inst = options.get("burnin-check-instances", False) do_rename = options.get("burnin-rename", "") do_reboot = options.get("burnin-reboot", True) reboot_types = options.get("reboot-types", constants.REBOOT_TYPES) # Get as many instances as we need instances = [] try: try: num = qa_config.get("options", {}).get("burnin-instances", 1) for _ in range(0, num): instances.append(qa_config.AcquireInstance()) except qa_error.OutOfInstancesError: print "Not enough instances, continuing anyway." if len(instances) < 1: raise qa_error.Error("Burnin needs at least one instance") script = qa_utils.UploadFile(master.primary, "../tools/burnin") try: disks = qa_config.GetDiskOptions() # Run burnin cmd = ["env", "PYTHONPATH=%s" % _constants.VERSIONEDSHAREDIR, script, "--os=%s" % qa_config.get("os"), "--minmem-size=%s" % qa_config.get(constants.BE_MINMEM), "--maxmem-size=%s" % qa_config.get(constants.BE_MAXMEM), "--disk-size=%s" % ",".join([d.get("size") for d in disks]), "--disk-growth=%s" % ",".join([d.get("growth") for d in disks]), "--disk-template=%s" % disk_template] if parallel: cmd.append("--parallel") cmd.append("--early-release") if check_inst: cmd.append("--http-check") if do_rename: cmd.append("--rename=%s" % do_rename) if not do_reboot: cmd.append("--no-reboot") else: cmd.append("--reboot-types=%s" % ",".join(reboot_types)) cmd += [inst.name for inst in instances] AssertCommand(cmd) finally: AssertCommand(["rm", "-f", script]) finally: for inst in instances: inst.Release()
def TestRapiInstanceAdd(node, use_client): """Test adding a new instance via RAPI""" if not qa_config.IsTemplateSupported(constants.DT_PLAIN): return instance = qa_config.AcquireInstance() instance.SetDiskTemplate(constants.DT_PLAIN) try: disks = [{ "size": utils.ParseUnit(d.get("size")), "name": str(d.get("name")) } for d in qa_config.GetDiskOptions()] nic0_mac = instance.GetNicMacAddr(0, constants.VALUE_GENERATE) nics = [{ constants.INIC_MAC: nic0_mac, }] beparams = { constants.BE_MAXMEM: utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)), constants.BE_MINMEM: utils.ParseUnit(qa_config.get(constants.BE_MINMEM)), } if use_client: job_id = _rapi_client.CreateInstance(constants.INSTANCE_CREATE, instance.name, constants.DT_PLAIN, disks, nics, os=qa_config.get("os"), pnode=node.primary, beparams=beparams) else: body = { "__version__": 1, "mode": constants.INSTANCE_CREATE, "name": instance.name, "os_type": qa_config.get("os"), "disk_template": constants.DT_PLAIN, "pnode": node.primary, "beparams": beparams, "disks": disks, "nics": nics, } (job_id, ) = _DoTests([ ("/2/instances", _VerifyReturnsJob, "POST", body), ]) _WaitForRapiJob(job_id) return instance except: instance.Release() raise
def TestInstanceModify(instance): """gnt-instance modify""" default_hv = qa_config.GetDefaultHypervisor() # Assume /sbin/init exists on all systems test_kernel = "/sbin/init" test_initrd = test_kernel orig_maxmem = qa_config.get(constants.BE_MAXMEM) orig_minmem = qa_config.get(constants.BE_MINMEM) #orig_bridge = qa_config.get("bridge", "xen-br0") args = [ ["-B", "%s=128" % constants.BE_MINMEM], ["-B", "%s=128" % constants.BE_MAXMEM], ["-B", "%s=%s,%s=%s" % (constants.BE_MINMEM, orig_minmem, constants.BE_MAXMEM, orig_maxmem)], ["-B", "%s=2" % constants.BE_VCPUS], ["-B", "%s=1" % constants.BE_VCPUS], ["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)], ["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_TRUE)], ["-B", "%s=%s" % (constants.BE_ALWAYS_FAILOVER, constants.VALUE_DEFAULT)], ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)], ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)], # TODO: bridge tests #["--bridge", "xen-br1"], #["--bridge", orig_bridge], ] if default_hv == constants.HT_XEN_PVM: args.extend([ ["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)], ["-H", "no_%s" % (constants.HV_INITRD_PATH, )], ["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)], ]) elif default_hv == constants.HT_XEN_HVM: args.extend([ ["-H", "%s=acn" % constants.HV_BOOT_ORDER], ["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)], ]) for alist in args: AssertCommand(["gnt-instance", "modify"] + alist + [instance.name]) # check no-modify AssertCommand(["gnt-instance", "modify", instance.name], fail=True) # Marking offline while instance is running must fail... AssertCommand(["gnt-instance", "modify", "--offline", instance.name], fail=True) # ...while making it online is ok, and should work AssertCommand(["gnt-instance", "modify", "--online", instance.name])
def TestClusterBurnin(): """Burnin""" master = qa_config.GetMasterNode() options = qa_config.get('options', {}) disk_template = options.get('burnin-disk-template', 'drbd') parallel = options.get('burnin-in-parallel', False) check_inst = options.get('burnin-check-instances', False) do_rename = options.get('burnin-rename', '') do_reboot = options.get('burnin-reboot', True) reboot_types = options.get("reboot-types", constants.REBOOT_TYPES) # Get as many instances as we need instances = [] try: try: num = qa_config.get('options', {}).get('burnin-instances', 1) for _ in range(0, num): instances.append(qa_config.AcquireInstance()) except qa_error.OutOfInstancesError: print "Not enough instances, continuing anyway." if len(instances) < 1: raise qa_error.Error("Burnin needs at least one instance") script = qa_utils.UploadFile(master['primary'], '../tools/burnin') try: # Run burnin cmd = [script, '--os=%s' % qa_config.get('os'), '--disk-size=%s' % ",".join(qa_config.get('disk')), '--disk-growth=%s' % ",".join(qa_config.get('disk-growth')), '--disk-template=%s' % disk_template] if parallel: cmd.append('--parallel') cmd.append('--early-release') if check_inst: cmd.append('--http-check') if do_rename: cmd.append('--rename=%s' % do_rename) if not do_reboot: cmd.append('--no-reboot') else: cmd.append('--reboot-types=%s' % ",".join(reboot_types)) cmd += [inst['name'] for inst in instances] AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0) finally: cmd = ['rm', '-f', script] AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0) finally: for inst in instances: qa_config.ReleaseInstance(inst)
def TestRapiInstanceAdd(node, use_client): """Test adding a new instance via RAPI""" if not qa_config.IsTemplateSupported(constants.DT_PLAIN): return instance = qa_config.AcquireInstance() instance.SetDiskTemplate(constants.DT_PLAIN) try: disks = [{"size": utils.ParseUnit(d.get("size")), "name": str(d.get("name"))} for d in qa_config.GetDiskOptions()] nic0_mac = instance.GetNicMacAddr(0, constants.VALUE_GENERATE) nics = [{ constants.INIC_MAC: nic0_mac, }] beparams = { constants.BE_MAXMEM: utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)), constants.BE_MINMEM: utils.ParseUnit(qa_config.get(constants.BE_MINMEM)), } if use_client: job_id = _rapi_client.CreateInstance(constants.INSTANCE_CREATE, instance.name, constants.DT_PLAIN, disks, nics, os=qa_config.get("os"), pnode=node.primary, beparams=beparams) else: body = { "__version__": 1, "mode": constants.INSTANCE_CREATE, "name": instance.name, "os_type": qa_config.get("os"), "disk_template": constants.DT_PLAIN, "pnode": node.primary, "beparams": beparams, "disks": disks, "nics": nics, } (job_id, ) = _DoTests([ ("/2/instances", _VerifyReturnsJob, "POST", body), ]) _WaitForRapiJob(job_id) return instance except: instance.Release() raise
def TestGanetiCommands(): """Test availibility of Ganeti commands. """ cmds = (["gnt-backup", "--version"], ["gnt-cluster", "--version"], ["gnt-debug", "--version"], ["gnt-instance", "--version"], ["gnt-job", "--version"], ["gnt-network", "--version"], ["gnt-node", "--version"], ["gnt-os", "--version"], ["gnt-storage", "--version"], ["gnt-filter", "--version"], ["ganeti-noded", "--version"], ["ganeti-rapi", "--version"], ["ganeti-watcher", "--version"], ["ganeti-confd", "--version"], ["ganeti-luxid", "--version"], ["ganeti-wconfd", "--version"], ) cmd = " && ".join([utils.ShellQuoteArgs(i) for i in cmds]) for node in qa_config.get("nodes"): AssertCommand(cmd, node=node)
def TestNetworkConnect(): """gnt-network connect/disconnect""" (group1, ) = qa_utils.GetNonexistentGroups(1) (network1, ) = GetNonexistentNetworks(1) default_mode = "bridged" default_link = "xen-br0" nicparams = qa_config.get("default-nicparams") if nicparams: mode = nicparams.get("mode", default_mode) link = nicparams.get("link", default_link) else: mode = default_mode link = default_link AssertCommand(["gnt-group", "add", group1]) AssertCommand( ["gnt-network", "add", "--network", "192.0.2.0/24", network1]) AssertCommand(["gnt-network", "connect", network1, mode, link, group1]) TestNetworkList() AssertCommand(["gnt-network", "disconnect", network1, group1]) AssertCommand(["gnt-group", "remove", group1]) AssertCommand(["gnt-network", "remove", network1])
def _EnsureRapiFilesPresence(): """Ensures that the specified RAPI files are present on the cluster, if any. """ rapi_files_location = qa_config.get("rapi-files-location", None) if rapi_files_location is None: # No files to be had return print( qa_logging.FormatWarning("Replacing the certificate and users file on" " the node with the ones provided in %s" % rapi_files_location)) # The RAPI files AssertCommand(["mkdir", "-p", pathutils.RAPI_DATA_DIR]) for filename in _FILES_TO_COPY: basename = os.path.split(filename)[-1] AssertCommand( ["cp", os.path.join(rapi_files_location, basename), filename]) AssertCommand(["gnt-cluster", "copyfile", filename]) # The certificates have to be reloaded now AssertCommand(["service", "ganeti", "restart"])
def TestClusterReservedLvs(): """gnt-cluster reserved lvs""" # if no lvm-based templates are supported, skip the test if not qa_config.IsStorageTypeSupported(constants.ST_LVM_VG): return vgname = qa_config.get("vg-name", constants.DEFAULT_VG) lvname = _QA_LV_PREFIX + "test" lvfullname = "/".join([vgname, lvname]) for fail, cmd in [ (False, _CLUSTER_VERIFY), (False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), (False, ["lvcreate", "-L1G", "-n", lvname, vgname]), (True, _CLUSTER_VERIFY), (False, ["gnt-cluster", "modify", "--reserved-lvs", "%s,.*/other-test" % lvfullname]), (False, _CLUSTER_VERIFY), (False, ["gnt-cluster", "modify", "--reserved-lvs", ".*/%s.*" % _QA_LV_PREFIX]), (False, _CLUSTER_VERIFY), (False, ["gnt-cluster", "modify", "--reserved-lvs", ""]), (True, _CLUSTER_VERIFY), (False, ["lvremove", "-f", lvfullname]), (False, _CLUSTER_VERIFY), ]: AssertCommand(cmd, fail=fail)
def TestClusterModifyBe(): """gnt-cluster modify -B""" for fail, cmd in [ # max/min mem (False, ["gnt-cluster", "modify", "-B", "maxmem=256"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), (False, ["gnt-cluster", "modify", "-B", "minmem=256"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), (True, ["gnt-cluster", "modify", "-B", "maxmem=a"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 256$'"]), (True, ["gnt-cluster", "modify", "-B", "minmem=a"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 256$'"]), (False, ["gnt-cluster", "modify", "-B", "maxmem=128,minmem=128"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *maxmem: 128$'"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *minmem: 128$'"]), # vcpus (False, ["gnt-cluster", "modify", "-B", "vcpus=4"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 4$'"]), (True, ["gnt-cluster", "modify", "-B", "vcpus=a"]), (False, ["gnt-cluster", "modify", "-B", "vcpus=1"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *vcpus: 1$'"]), # auto_balance (False, ["gnt-cluster", "modify", "-B", "auto_balance=False"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: False$'"]), (True, ["gnt-cluster", "modify", "-B", "auto_balance=1"]), (False, ["gnt-cluster", "modify", "-B", "auto_balance=True"]), (False, ["sh", "-c", "gnt-cluster info|grep '^ *auto_balance: True$'"]), ]: AssertCommand(cmd, fail=fail) # redo the original-requested BE parameters, if any bep = qa_config.get("backend-parameters", "") if bep: AssertCommand(["gnt-cluster", "modify", "-B", bep])
def TestClusterReservedLvs(): """gnt-cluster reserved lvs""" # if no lvm-based templates are supported, skip the test if not qa_config.IsStorageTypeSupported(constants.ST_LVM_VG): return vgname = qa_config.get("vg-name", constants.DEFAULT_VG) lvname = _QA_LV_PREFIX + "test" lvfullname = "/".join([vgname, lvname]) # Clean cluster AssertClusterVerify() AssertCommand(["gnt-cluster", "modify", "--reserved-lvs", ""]) AssertCommand(["lvcreate", "-L1G", "-n", lvname, vgname]) AssertClusterVerify(fail=False, warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["gnt-cluster", "modify", "--reserved-lvs", "%s,.*/other-test" % lvfullname]) AssertClusterVerify(no_warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["gnt-cluster", "modify", "--reserved-lvs", ".*/%s.*" % _QA_LV_PREFIX]) AssertClusterVerify(no_warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["gnt-cluster", "modify", "--reserved-lvs", ""]) AssertClusterVerify(fail=False, warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["lvremove", "-f", lvfullname]) AssertClusterVerify()
def _TestOs(mode): """Generic function for OS definition testing """ master = qa_config.GetMasterNode() dir = _TEMP_OS_PATH nodes = [] try: i = 0 for node in qa_config.get('nodes'): nodes.append(node) if mode == 0: valid = False elif mode == 1: valid = True else: valid = bool(i % 2) _SetupTempOs(node, dir, valid) i += 1 cmd = ['gnt-os', 'diagnose'] result = StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait() if mode == 1: AssertEqual(result, 0) else: AssertEqual(result, 1) finally: for node in nodes: _RemoveTempOs(node, dir)
def Setup(username, password): """Configures the RAPI client. """ global _rapi_ca global _rapi_client global _rapi_username global _rapi_password _rapi_username = username _rapi_password = password master = qa_config.GetMasterNode() # Load RAPI certificate from master node cmd = ["cat", constants.RAPI_CERT_FILE] # Write to temporary file _rapi_ca = tempfile.NamedTemporaryFile() _rapi_ca.write(qa_utils.GetCommandOutput(master["primary"], utils.ShellQuoteArgs(cmd))) _rapi_ca.flush() port = qa_config.get("rapi-port", default=constants.DEFAULT_RAPI_PORT) cfg_curl = rapi.client.GenericCurlConfig(cafile=_rapi_ca.name, proxy="") _rapi_client = rapi.client.GanetiRapiClient(master["primary"], port=port, username=username, password=password, curl_config_fn=cfg_curl) print "RAPI protocol version: %s" % _rapi_client.GetVersion()
def TestInstanceModify(instance): """gnt-instance modify""" # Assume /sbin/init exists on all systems test_kernel = "/sbin/init" test_initrd = test_kernel orig_memory = qa_config.get('mem') #orig_bridge = qa_config.get("bridge", "xen-br0") args = [ ["-B", "%s=128" % constants.BE_MEMORY], ["-B", "%s=%s" % (constants.BE_MEMORY, orig_memory)], ["-B", "%s=2" % constants.BE_VCPUS], ["-B", "%s=1" % constants.BE_VCPUS], ["-B", "%s=%s" % (constants.BE_VCPUS, constants.VALUE_DEFAULT)], ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, test_kernel)], ["-H", "%s=%s" % (constants.HV_KERNEL_PATH, constants.VALUE_DEFAULT)], ["-H", "%s=%s" % (constants.HV_INITRD_PATH, test_initrd)], ["-H", "no_%s" % (constants.HV_INITRD_PATH, )], ["-H", "%s=%s" % (constants.HV_INITRD_PATH, constants.VALUE_DEFAULT)], # TODO: bridge tests #["--bridge", "xen-br1"], #["--bridge", orig_bridge], # TODO: Do these tests only with xen-hvm #["-H", "%s=acn" % constants.HV_BOOT_ORDER], #["-H", "%s=%s" % (constants.HV_BOOT_ORDER, constants.VALUE_DEFAULT)], ] for alist in args: AssertCommand(["gnt-instance", "modify"] + alist + [instance["name"]]) # check no-modify AssertCommand(["gnt-instance", "modify", instance["name"]], fail=True)
def TestGroupAddRemoveRename(): """gnt-group add/remove/rename""" groups = qa_config.get("groups", {}) existing_group_with_nodes = GetDefaultGroup() group1, group2, group3 = groups.get("inexistent-groups", ["group1", "group2", "group3"])[:3] AssertCommand(["gnt-group", "add", group1]) AssertCommand(["gnt-group", "add", group2]) AssertCommand(["gnt-group", "add", group2], fail=True) AssertCommand(["gnt-group", "add", existing_group_with_nodes], fail=True) AssertCommand(["gnt-group", "rename", group1, group2], fail=True) AssertCommand(["gnt-group", "rename", group1, group3]) try: AssertCommand(["gnt-group", "rename", existing_group_with_nodes, group1]) AssertCommand(["gnt-group", "remove", group2]) AssertCommand(["gnt-group", "remove", group3]) AssertCommand(["gnt-group", "remove", group1], fail=True) finally: # Try to ensure idempotency re groups that already existed. AssertCommand(["gnt-group", "rename", group1, existing_group_with_nodes])
def TestNetworkConnect(): """gnt-network connect/disconnect""" (group1, ) = qa_utils.GetNonexistentGroups(1) (network1, ) = GetNonexistentNetworks(1) default_mode = "bridged" default_link = "xen-br0" nicparams = qa_config.get("default-nicparams") if nicparams: mode = nicparams.get("mode", default_mode) link = nicparams.get("link", default_link) else: mode = default_mode link = default_link nicparams = "mode=%s,link=%s" % (mode, link) AssertCommand(["gnt-group", "add", group1]) AssertCommand(["gnt-network", "add", "--network", "192.0.2.0/24", network1]) AssertCommand(["gnt-network", "connect", "--nic-parameters", nicparams, network1, group1]) TestNetworkList() AssertCommand(["gnt-network", "disconnect", network1, group1]) AssertCommand(["gnt-group", "remove", group1]) AssertCommand(["gnt-network", "remove", network1])
def _SubmitInstanceCreationJob(instance, disk_template=None): """Submit an instance creation job. @type instance: L{qa_config._QaInstance} @param instance: instance to submit a create command for @type disk_template: string @param disk_template: disk template for the new instance or C{None} which causes the default disk template to be used @rtype: int @return: job id of the submitted creation job """ if disk_template is None: disk_template = qa_config.GetDefaultDiskTemplate() try: cmd = ([ "gnt-instance", "add", "--submit", "--os-type=%s" % qa_config.get("os"), "--disk-template=%s" % disk_template ] + GetGenericAddParameters(instance, disk_template)) cmd.append(instance.name) instance.SetDiskTemplate(disk_template) return _ExecuteJobSubmittingCmd(cmd) except: instance.Release() raise
def _SubmitInstanceCreationJob(instance, disk_template=None): """Submit an instance creation job. @type instance: L{qa_config._QaInstance} @param instance: instance to submit a create command for @type disk_template: string @param disk_template: disk template for the new instance or C{None} which causes the default disk template to be used @rtype: int @return: job id of the submitted creation job """ if disk_template is None: disk_template = qa_config.GetDefaultDiskTemplate() try: cmd = (["gnt-instance", "add", "--submit", "--opportunistic-locking", "--os-type=%s" % qa_config.get("os"), "--disk-template=%s" % disk_template] + GetGenericAddParameters(instance, disk_template)) cmd.append(instance.name) instance.SetDiskTemplate(disk_template) return _ExecuteJobSubmittingCmd(cmd) except: instance.Release() raise
def TestParallelModify(instances): """PERFORMANCE: Parallel instance modify. @type instances: list of L{qa_config._QaInstance} @param instances: list of instances to issue modify commands against """ job_driver = _JobQueueDriver() # set min mem to same value as max mem new_min_mem = qa_config.get(constants.BE_MAXMEM) for instance in instances: cmd = (["gnt-instance", "modify", "--submit", "-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem)]) cmd.append(instance.name) job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd)) cmd = (["gnt-instance", "modify", "--submit", "-O", "fake_os_param=fake_value"]) cmd.append(instance.name) job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd)) cmd = (["gnt-instance", "modify", "--submit", "-O", "fake_os_param=fake_value", "-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem)]) cmd.append(instance.name) job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd)) job_driver.WaitForCompletion()
def GetNonexistentEntityNames(count, name_config, name_prefix): """Gets entity names which shouldn't exist on the cluster. The actualy names can refer to arbitrary entities (for example groups, networks). @param count: Number of names to get @rtype: integer @param name_config: name of the leaf in the config containing this entity's configuration, including a 'inexistent-' element @rtype: string @param name_prefix: prefix of the entity's names, used to compose the default values; for example for groups, the prefix is 'group' and the generated names are then group1, group2, ... @rtype: string """ entities = qa_config.get(name_config, {}) default = [name_prefix + str(i) for i in range(count)] assert count <= len(default) name_config_inexistent = "inexistent-" + name_config candidates = entities.get(name_config_inexistent, default)[:count] if len(candidates) < count: raise Exception("At least %s non-existent %s are needed" % (count, name_config)) return candidates
def TestParallelModify(instances): """PERFORMANCE: Parallel instance modify. @type instances: list of L{qa_config._QaInstance} @param instances: list of instances to issue modify commands against """ job_driver = _JobQueueDriver() # set min mem to same value as max mem new_min_mem = qa_config.get(constants.BE_MAXMEM) for instance in instances: cmd = ([ "gnt-instance", "modify", "--submit", "-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem) ]) cmd.append(instance.name) job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd)) cmd = ([ "gnt-instance", "modify", "--submit", "-O", "fake_os_param=fake_value" ]) cmd.append(instance.name) job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd)) cmd = ([ "gnt-instance", "modify", "--submit", "-O", "fake_os_param=fake_value", "-B", "%s=%s" % (constants.BE_MINMEM, new_min_mem) ]) cmd.append(instance.name) job_driver.AddJob(_ExecuteJobSubmittingCmd(cmd)) job_driver.WaitForCompletion()
def ModifyGroupSshPort(ipt_rules, group, nodes, ssh_port): """Modifies the node group settings and sets up iptable rules. For each pair of nodes add two rules that affect SSH connections from one to the other one. The first one redirects port 22 to some unused port so that connecting through 22 fails. The second redirects port `ssh_port` to port 22. Together this results in master seeing the SSH daemons on the nodes on `ssh_port` instead of 22. """ default_ssh_port = netutils.GetDaemonPort(constants.SSH) all_nodes = qa_config.get("nodes") AssertCommand(["gnt-group", "modify", "--node-parameters=ssh_port=" + str(ssh_port), group]) for node in nodes: ipt_rules.RedirectPort(node.primary, "localhost", default_ssh_port, 65535) ipt_rules.RedirectPort(node.primary, "localhost", ssh_port, default_ssh_port) for node2 in all_nodes: ipt_rules.RedirectPort(node2.primary, node.primary, default_ssh_port, 65535) ipt_rules.RedirectPort(node2.primary, node.primary, ssh_port, default_ssh_port)
def _CheckFileOnAllNodes(filename, content): """Verifies the content of the given file on all nodes. """ cmd = utils.ShellQuoteArgs(["cat", filename]) for node in qa_config.get("nodes"): AssertEqual(qa_utils.GetCommandOutput(node["primary"], cmd), content)
def _CheckFileOnAllNodes(filename, content): """Verifies the content of the given file on all nodes. """ cmd = utils.ShellQuoteArgs(["cat", filename]) for node in qa_config.get("nodes"): AssertEqual(qa_utils.GetCommandOutput(node.primary, cmd), content)
def TestClusterReservedLvs(): """gnt-cluster reserved lvs""" # if no lvm-based templates are supported, skip the test if not qa_config.IsStorageTypeSupported(constants.ST_LVM_VG): return vgname = qa_config.get("vg-name", constants.DEFAULT_VG) lvname = _QA_LV_PREFIX + "test" lvfullname = "/".join([vgname, lvname]) # Clean cluster AssertClusterVerify() AssertCommand(["gnt-cluster", "modify", "--reserved-lvs", ""]) AssertCommand(["lvcreate", "-L1G", "-n", lvname, vgname]) AssertClusterVerify(fail=False, warnings=[constants.CV_ENODEORPHANLV]) AssertCommand([ "gnt-cluster", "modify", "--reserved-lvs", "%s,.*/other-test" % lvfullname ]) AssertClusterVerify(no_warnings=[constants.CV_ENODEORPHANLV]) AssertCommand( ["gnt-cluster", "modify", "--reserved-lvs", ".*/%s.*" % _QA_LV_PREFIX]) AssertClusterVerify(no_warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["gnt-cluster", "modify", "--reserved-lvs", ""]) AssertClusterVerify(fail=False, warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["lvremove", "-f", lvfullname]) AssertClusterVerify()
def TestRapiNodeGroups(): """Test several node group operations using RAPI. """ groups = qa_config.get("groups", {}) group1, group2, group3 = groups.get("inexistent-groups", ["group1", "group2", "group3"])[:3] # Create a group with no attributes body = { "name": group1, } (job_id, ) = _DoTests([ ("/2/groups", _VerifyReturnsJob, "POST", body), ]) _WaitForRapiJob(job_id) # Create a group specifying alloc_policy body = { "name": group2, "alloc_policy": constants.ALLOC_POLICY_UNALLOCABLE, } (job_id, ) = _DoTests([ ("/2/groups", _VerifyReturnsJob, "POST", body), ]) _WaitForRapiJob(job_id) # Modify alloc_policy body = { "alloc_policy": constants.ALLOC_POLICY_UNALLOCABLE, } (job_id, ) = _DoTests([ ("/2/groups/%s/modify" % group1, _VerifyReturnsJob, "PUT", body), ]) _WaitForRapiJob(job_id) # Rename a group body = { "new_name": group3, } (job_id, ) = _DoTests([ ("/2/groups/%s/rename" % group2, _VerifyReturnsJob, "PUT", body), ]) _WaitForRapiJob(job_id) # Delete groups for group in [group1, group3]: (job_id, ) = _DoTests([ ("/2/groups/%s" % group, _VerifyReturnsJob, "DELETE", None), ]) _WaitForRapiJob(job_id)
def TestGanetiCommands(): """Test availibility of Ganeti commands. """ cmds = ( ["gnt-backup", "--version"], ["gnt-cluster", "--version"], ["gnt-debug", "--version"], ["gnt-instance", "--version"], ["gnt-job", "--version"], ["gnt-network", "--version"], ["gnt-node", "--version"], ["gnt-os", "--version"], ["gnt-storage", "--version"], ["gnt-filter", "--version"], ["ganeti-noded", "--version"], ["ganeti-rapi", "--version"], ["ganeti-watcher", "--version"], ["ganeti-confd", "--version"], ["ganeti-luxid", "--version"], ["ganeti-wconfd", "--version"], ) cmd = " && ".join([utils.ShellQuoteArgs(i) for i in cmds]) for node in qa_config.get("nodes"): AssertCommand(cmd, node=node)
def IsExclusiveStorageInstanceTestEnabled(): test_name = "exclusive-storage-instance-tests" if qa_config.TestEnabled(test_name): vgname = qa_config.get("vg-name", constants.DEFAULT_VG) vgscmd = utils.ShellQuoteArgs([ "vgs", "--noheadings", "-o", "pv_count", vgname, ]) nodes = qa_config.GetConfig()["nodes"] for node in nodes: try: pvnum = int(qa_utils.GetCommandOutput(node.primary, vgscmd)) except Exception, e: msg = ( "Cannot get the number of PVs on %s, needed by '%s': %s" % (node.primary, test_name, e)) raise qa_error.Error(msg) if pvnum < 2: raise qa_error.Error( "Node %s has not enough PVs (%s) to run '%s'" % (node.primary, pvnum, test_name)) res = True
def TestClusterRename(): """gnt-cluster rename""" cmd = ["gnt-cluster", "rename", "-f"] original_name = qa_config.get("name") rename_target = qa_config.get("rename", None) if rename_target is None: print qa_utils.FormatError('"rename" entry is missing') return for data in [ cmd + [rename_target], _CLUSTER_VERIFY, cmd + [original_name], _CLUSTER_VERIFY, ]: AssertCommand(data)
def ConfigureGroups(): """Configures groups and nodes for tests such as custom SSH ports. """ defgroup = GetDefaultGroup() nodes = qa_config.get("nodes") options = qa_config.get("options", {}) # Clear any old configuration qa_iptables.CleanRules(nodes) # Custom SSH ports: ssh_port = options.get("ssh-port") default_ssh_port = netutils.GetDaemonPort(constants.SSH) if (ssh_port is not None) and (ssh_port != default_ssh_port): ModifyGroupSshPort(qa_iptables.GLOBAL_RULES, defgroup, nodes, ssh_port)
def _RemoveFileFromAllNodes(filename): """Removes a file from all nodes. """ for node in qa_config.get('nodes'): cmd = ['rm', '-f', filename] AssertEqual(StartSSH(node['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0)
def TestUpgrade(): """Test gnt-cluster upgrade. This tests the 'gnt-cluster upgrade' command by flipping between the current and a different version of Ganeti. To also recover subtile points in the configuration up/down grades, instances are left over both upgrades. """ this_version = qa_config.get("dir-version") other_version = qa_config.get("other-dir-version") if this_version is None or other_version is None: print qa_utils.FormatInfo("Test not run, as versions not specified") return inst_creates = [] upgrade_instances = qa_config.get("upgrade-instances", []) live_instances = [] for (test_name, templ, cf, n) in qa_instance.available_instance_tests: if (qa_config.TestEnabled(test_name) and qa_config.IsTemplateSupported(templ) and templ in upgrade_instances): inst_creates.append((cf, n)) for (cf, n) in inst_creates: nodes = qa_config.AcquireManyNodes(n) live_instances.append(cf(nodes)) AssertCommand(["gnt-cluster", "upgrade", "--to", other_version]) AssertCommand(["gnt-cluster", "verify"]) for instance in live_instances: qa_instance.TestInstanceRemove(instance) instance.Release() live_instances = [] for (cf, n) in inst_creates: nodes = qa_config.AcquireManyNodes(n) live_instances.append(cf(nodes)) AssertCommand(["gnt-cluster", "upgrade", "--to", this_version]) AssertCommand(["gnt-cluster", "verify"]) for instance in live_instances: qa_instance.TestInstanceRemove(instance) instance.Release()
def TestInstanceReinstall(instance): """gnt-instance reinstall""" if instance.disk_template == constants.DT_DISKLESS: print qa_utils.FormatInfo("Test not supported for diskless instances") return qa_storage = qa_config.get("qa-storage") if qa_storage is None: print qa_utils.FormatInfo("Test not supported because the additional QA" " storage is not available") else: # Reinstall with OS image from QA storage url = "%s/busybox.img" % qa_storage AssertCommand(["gnt-instance", "reinstall", "--os-parameters", "os-image=" + url, "-f", instance.name]) # Reinstall with OS image as local file on the node pnode = _GetInstanceField(instance.name, "pnode") cmd = ("wget -O busybox.img %s &> /dev/null &&" " echo $(pwd)/busybox.img") % url image = qa_utils.GetCommandOutput(pnode, cmd).strip() AssertCommand(["gnt-instance", "reinstall", "--os-parameters", "os-image=" + image, "-f", instance.name]) # Reinstall non existing local file AssertCommand(["gnt-instance", "reinstall", "--os-parameters", "os-image=NonExistantOsForQa", "-f", instance.name], fail=True) # Reinstall non existing URL AssertCommand(["gnt-instance", "reinstall", "--os-parameters", "os-image=http://NonExistantOsForQa", "-f", instance.name], fail=True) # Reinstall using OS scripts AssertCommand(["gnt-instance", "reinstall", "-f", instance.name]) # Test with non-existant OS definition AssertCommand(["gnt-instance", "reinstall", "-f", "--os-type=NonExistantOsForQa", instance.name], fail=True) # Test with existing OS but invalid variant AssertCommand(["gnt-instance", "reinstall", "-f", "-o", "debootstrap+ola", instance.name], fail=True) # Test with existing OS but invalid variant AssertCommand(["gnt-instance", "reinstall", "-f", "-o", "debian-image+ola", instance.name], fail=True)
def TestInstanceModifyPrimaryAndBack(instance, currentnode, othernode): """gnt-instance modify --new-primary This will leave the instance on its original primary node, not other node. """ if instance.disk_template != constants.DT_FILE: print qa_utils.FormatInfo( "Test only supported for the file disk template") return cluster_name = qa_config.get("name") name = instance.name current = currentnode.primary other = othernode.primary filestorage = qa_config.get("file-storage-dir", pathutils.DEFAULT_FILE_STORAGE_DIR) disk = os.path.join(filestorage, name) AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name], fail=True) AssertCommand(["gnt-instance", "shutdown", name]) AssertCommand([ "scp", "-oGlobalKnownHostsFile=%s" % pathutils.SSH_KNOWN_HOSTS_FILE, "-oCheckHostIp=no", "-oStrictHostKeyChecking=yes", "-oHashKnownHosts=no", "-oHostKeyAlias=%s" % cluster_name, "-r", disk, "%s:%s" % (other, filestorage) ], node=current) AssertCommand(["gnt-instance", "modify", "--new-primary=%s" % other, name]) AssertCommand(["gnt-instance", "startup", name]) # and back AssertCommand(["gnt-instance", "shutdown", name]) AssertCommand(["rm", "-rf", disk], node=other) AssertCommand( ["gnt-instance", "modify", "--new-primary=%s" % current, name]) AssertCommand(["gnt-instance", "startup", name])
def MarkNodeAddedAll(): """Mark all nodes as added. This is useful if we don't create the cluster ourselves (in qa). """ master = qa_config.GetMasterNode() for node in qa_config.get("nodes"): if node != master: node.MarkAdded()
def _FilterTags(seq): """Removes unwanted tags from a sequence. """ ignore_re = qa_config.get("ignore-tags-re", None) if ignore_re: return itertools.ifilterfalse(re.compile(ignore_re).match, seq) else: return seq
def ReloadCertificates(ensure_presence=True): """Reloads the client RAPI certificate with the one present on the node. If the QA is set up to use a specific certificate using the "rapi-files-location" parameter, it will be put in place prior to retrieving it. """ if ensure_presence: _EnsureRapiFilesPresence() if _rapi_username is None or _rapi_password is None: raise qa_error.Error("RAPI username and password have to be set before" " attempting to reload a certificate.") # pylint: disable=W0603 # due to global usage global _rapi_ca global _rapi_client master = qa_config.GetMasterNode() # Load RAPI certificate from master node cmd = [ "openssl", "x509", "-in", qa_utils.MakeNodePath(master, pathutils.RAPI_CERT_FILE) ] # Write to temporary file _rapi_ca = tempfile.NamedTemporaryFile() _rapi_ca.write( qa_utils.GetCommandOutput(master.primary, utils.ShellQuoteArgs(cmd))) _rapi_ca.flush() port = qa_config.get("rapi-port", default=constants.DEFAULT_RAPI_PORT) cfg_curl = rapi.client.GenericCurlConfig(cafile=_rapi_ca.name, proxy="") if qa_config.UseVirtualCluster(): # TODO: Implement full support for RAPI on virtual clusters print qa_logging.FormatWarning( "RAPI tests are not yet supported on" " virtual clusters and will be disabled") assert _rapi_client is None else: _rapi_client = rapi.client.GanetiRapiClient(master.primary, port=port, username=_rapi_username, password=_rapi_password, curl_config_fn=cfg_curl) print "RAPI protocol version: %s" % _rapi_client.GetVersion()
def SetupCluster(rapi_user): """Initializes the cluster. @param rapi_user: Login user for RAPI @return: Login secret for RAPI """ rapi_secret = utils.GenerateSecret() RunTestIf("create-cluster", qa_cluster.TestClusterInit, rapi_user, rapi_secret) if not qa_config.TestEnabled("create-cluster"): # If the cluster is already in place, we assume that exclusive-storage is # already set according to the configuration qa_config.SetExclusiveStorage(qa_config.get("exclusive-storage", False)) if qa_rapi.Enabled(): # To support RAPI on an existing cluster we have to find out the secret rapi_secret = qa_rapi.LookupRapiSecret(rapi_user) qa_group.ConfigureGroups() # Test on empty cluster RunTestIf("node-list", qa_node.TestNodeList) RunTestIf("instance-list", qa_instance.TestInstanceList) RunTestIf("job-list", qa_job.TestJobList) RunTestIf("create-cluster", qa_node.TestNodeAddAll) if not qa_config.TestEnabled("create-cluster"): # consider the nodes are already there qa_node.MarkNodeAddedAll() RunTestIf("test-jobqueue", qa_cluster.TestJobqueue) RunTestIf("test-jobqueue", qa_job.TestJobCancellation) # enable the watcher (unconditionally) RunTest(qa_daemon.TestResumeWatcher) RunTestIf("node-list", qa_node.TestNodeList) # Test listing fields RunTestIf("node-list", qa_node.TestNodeListFields) RunTestIf("instance-list", qa_instance.TestInstanceListFields) RunTestIf("job-list", qa_job.TestJobListFields) RunTestIf("instance-export", qa_instance.TestBackupListFields) RunTestIf("node-info", qa_node.TestNodeInfo) return rapi_secret
def TestInstanceReboot(instance): """gnt-instance reboot""" options = qa_config.get("options", {}) reboot_types = options.get("reboot-types", constants.REBOOT_TYPES) name = instance.name for rtype in reboot_types: AssertCommand(["gnt-instance", "reboot", "--type=%s" % rtype, name]) AssertCommand(["gnt-instance", "shutdown", name]) qa_utils.RunInstanceCheck(instance, False) AssertCommand(["gnt-instance", "reboot", name]) master = qa_config.GetMasterNode() cmd = ["gnt-instance", "list", "--no-headers", "-o", "status", name] result_output = qa_utils.GetCommandOutput(master.primary, utils.ShellQuoteArgs(cmd)) AssertEqual(result_output.strip(), constants.INSTST_RUNNING)
def Setup(username, password): """Configures the RAPI client. """ # pylint: disable=W0603 # due to global usage global _rapi_ca global _rapi_client global _rapi_username global _rapi_password _rapi_username = username _rapi_password = password master = qa_config.GetMasterNode() # Load RAPI certificate from master node cmd = ["cat", qa_utils.MakeNodePath(master, pathutils.RAPI_CERT_FILE)] # Write to temporary file _rapi_ca = tempfile.NamedTemporaryFile() _rapi_ca.write( qa_utils.GetCommandOutput(master.primary, utils.ShellQuoteArgs(cmd))) _rapi_ca.flush() port = qa_config.get("rapi-port", default=constants.DEFAULT_RAPI_PORT) cfg_curl = rapi.client.GenericCurlConfig(cafile=_rapi_ca.name, proxy="") if qa_config.UseVirtualCluster(): # TODO: Implement full support for RAPI on virtual clusters print qa_logging.FormatWarning( "RAPI tests are not yet supported on" " virtual clusters and will be disabled") assert _rapi_client is None else: _rapi_client = rapi.client.GanetiRapiClient(master.primary, port=port, username=username, password=password, curl_config_fn=cfg_curl) print "RAPI protocol version: %s" % _rapi_client.GetVersion() return _rapi_client
def TestReplaceDisks(instance, curr_nodes, other_nodes): """gnt-instance replace-disks""" def buildcmd(args): cmd = ["gnt-instance", "replace-disks"] cmd.extend(args) cmd.append(instance.name) return cmd if not IsDiskReplacingSupported(instance): print qa_utils.FormatInfo("Instance doesn't support disk replacing," " skipping test") return # Currently all supported templates have one primary and one secondary node assert len(curr_nodes) == 2 snode = curr_nodes[1] assert len(other_nodes) == 1 othernode = other_nodes[0] options = qa_config.get("options", {}) use_ialloc = options.get("use-iallocators", True) for data in [ ["-p"], ["-s"], # A placeholder; the actual command choice depends on use_ialloc None, # Restore the original secondary ["--new-secondary=%s" % snode.primary], ]: if data is None: if use_ialloc: data = ["-I", constants.DEFAULT_IALLOCATOR_SHORTCUT] else: data = ["--new-secondary=%s" % othernode.primary] AssertCommand(buildcmd(data)) AssertCommand(buildcmd(["-a"])) AssertCommand(["gnt-instance", "stop", instance.name]) AssertCommand(buildcmd(["-a"]), fail=True) AssertCommand(["gnt-instance", "activate-disks", instance.name]) AssertCommand( ["gnt-instance", "activate-disks", "--wait-for-sync", instance.name]) AssertCommand(buildcmd(["-a"])) AssertCommand(["gnt-instance", "start", instance.name])
def _GetRapiSecret(rapi_user): """Returns the secret to be used for RAPI access. Where exactly this secret can be found depends on the QA configuration options, and this function invokes additional tools as needed. It can look up a local secret, a remote one, or create a user with a new secret. @param rapi_user: Login user @return: Login secret for the user """ password_file_path = qa_config.get("rapi-password-file", None) if password_file_path is not None: # If the password file is specified, we use the password within. # The file must be present on the QA runner. return _ReadRapiSecret(password_file_path) else: # On an existing cluster, just find out the user's secret return _LookupRapiSecret(rapi_user)
def TestExclStorSharedPv(node): """cluster-verify reports LVs that share the same PV with exclusive_storage. """ vgname = qa_config.get("vg-name", constants.DEFAULT_VG) lvname1 = _QA_LV_PREFIX + "vol1" lvname2 = _QA_LV_PREFIX + "vol2" node_name = node.primary AssertCommand(["lvcreate", "-L1G", "-n", lvname1, vgname], node=node_name) AssertClusterVerify(fail=False, warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["lvcreate", "-L1G", "-n", lvname2, vgname], node=node_name) AssertClusterVerify(fail=True, errors=[constants.CV_ENODELVM], warnings=[constants.CV_ENODEORPHANLV]) AssertCommand(["lvremove", "-f", "/".join([vgname, lvname1])], node=node_name) AssertCommand(["lvremove", "-f", "/".join([vgname, lvname2])], node=node_name) AssertClusterVerify()