Ejemplo n.º 1
0
def TestClusterModifyIPolicy():
    """gnt-cluster modify --ipolicy-*"""
    basecmd = ["gnt-cluster", "modify"]
    (old_policy, old_specs) = _GetClusterIPolicy()
    for par in ["vcpu-ratio", "spindle-ratio"]:
        curr_val = float(old_policy[par])
        test_values = [
            (True, 1.0),
            (True, 1.5),
            (True, 2),
            (False, "a"),
            # Restore the old value
            (True, curr_val),
        ]
        for (good, val) in test_values:
            cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
            AssertCommand(cmd, fail=not good)
            if good:
                curr_val = val
            # Check the affected parameter
            (eff_policy, eff_specs) = _GetClusterIPolicy()
            AssertEqual(float(eff_policy[par]), curr_val)
            # Check everything else
            AssertEqual(eff_specs, old_specs)
            for p in eff_policy.keys():
                if p == par:
                    continue
                AssertEqual(eff_policy[p], old_policy[p])

    # Allowing disk templates via ipolicy requires them to be
    # enabled on the cluster.
    if not (qa_config.IsTemplateSupported(constants.DT_PLAIN)
            and qa_config.IsTemplateSupported(constants.DT_DRBD8)):
        return
    # Disk templates are treated slightly differently
    par = "disk-templates"
    disp_str = "allowed disk templates"
    curr_val = old_policy[disp_str]
    test_values = [
        (True, constants.DT_PLAIN),
        (True, "%s,%s" % (constants.DT_PLAIN, constants.DT_DRBD8)),
        (False, "thisisnotadisktemplate"),
        (False, ""),
        # Restore the old value
        (True, curr_val.replace(" ", "")),
    ]
    for (good, val) in test_values:
        cmd = basecmd + ["--ipolicy-%s=%s" % (par, val)]
        AssertCommand(cmd, fail=not good)
        if good:
            curr_val = val
        # Check the affected parameter
        (eff_policy, eff_specs) = _GetClusterIPolicy()
        AssertEqual(eff_policy[disp_str].replace(" ", ""), curr_val)
        # Check everything else
        AssertEqual(eff_specs, old_specs)
        for p in eff_policy.keys():
            if p == disp_str:
                continue
            AssertEqual(eff_policy[p], old_policy[p])
Ejemplo n.º 2
0
def RunPerformanceTests():
    if not qa_config.TestEnabled("performance"):
        ReportTestSkip("performance related tests", "performance")
        return

    if qa_config.TestEnabled("jobqueue-performance"):
        RunTest(qa_performance.TestParallelMaxInstanceCreationPerformance)
        RunTest(
            qa_performance.TestParallelNodeCountInstanceCreationPerformance)

        instances = qa_performance.CreateAllInstances()

        RunTest(qa_performance.TestParallelModify, instances)
        RunTest(qa_performance.TestParallelInstanceOSOperations, instances)
        RunTest(qa_performance.TestParallelInstanceQueries, instances)

        qa_performance.RemoveAllInstances(instances)

        RunTest(qa_performance.TestJobQueueSubmissionPerformance)

    if qa_config.TestEnabled("parallel-performance"):
        if qa_config.IsTemplateSupported(constants.DT_DRBD8):
            RunTest(qa_performance.TestParallelDRBDInstanceCreationPerformance)
        if qa_config.IsTemplateSupported(constants.DT_PLAIN):
            RunTest(
                qa_performance.TestParallelPlainInstanceCreationPerformance)

        if qa_config.IsTemplateSupported(constants.DT_DRBD8):
            inodes = qa_config.AcquireManyNodes(2)
            try:
                instance = qa_instance.TestInstanceAddWithDrbdDisk(inodes)
                try:
                    RunTest(qa_performance.TestParallelInstanceFailover,
                            instance)
                    RunTest(qa_performance.TestParallelInstanceMigration,
                            instance)
                    RunTest(qa_performance.TestParallelInstanceReplaceDisks,
                            instance)
                    RunTest(qa_performance.TestParallelInstanceReboot,
                            instance)
                    RunTest(qa_performance.TestParallelInstanceReinstall,
                            instance)
                    RunTest(qa_performance.TestParallelInstanceRename,
                            instance)
                finally:
                    qa_instance.TestInstanceRemove(instance)
                    instance.Release()
            finally:
                qa_config.ReleaseManyNodes(inodes)
Ejemplo n.º 3
0
def _TestInstanceOperationInParallelToInstanceCreation(*cmds):
    """Run the given test command in parallel to an instance creation.

  @type cmds: list of list of strings
  @param cmds: commands to execute in parallel to an instance creation. Each
               command in the list is executed once the previous job starts
               to run.

  """
    def _SubmitNextCommand(cmd_idx, job_driver, _):
        if cmd_idx >= len(cmds):
            return
        job_id = _ExecuteJobSubmittingCmd(cmds[cmd_idx])
        job_driver.AddJob(job_id,
                          running_fn=functools.partial(_SubmitNextCommand,
                                                       cmd_idx + 1))

    if not qa_config.IsTemplateSupported(constants.DT_DRBD8):
        print(
            qa_logging.FormatInfo(
                "DRBD disk template not supported, skipping"))

    assert len(cmds) > 0

    job_driver = _JobQueueDriver()
    _SubmitNextCommand(0, job_driver, None)

    _TestParallelInstanceCreationAndRemoval(max_instances=1,
                                            disk_template=constants.DT_DRBD8,
                                            custom_job_driver=job_driver)

    job_driver.WaitForCompletion()
Ejemplo n.º 4
0
def RunPerformanceTests():
    if not qa_config.TestEnabled("performance"):
        ReportTestSkip("performance related tests", "performance")
        return

    # For reproducable performance, run performance tests with the watcher
    # paused.
    qa_utils.AssertCommand(["gnt-cluster", "watcher", "pause", "4h"])

    if qa_config.TestEnabled("jobqueue-performance"):
        RunTest(qa_performance.TestParallelMaxInstanceCreationPerformance)
        RunTest(
            qa_performance.TestParallelNodeCountInstanceCreationPerformance)

        instances = qa_performance.CreateAllInstances()

        RunTest(qa_performance.TestParallelModify, instances)
        RunTest(qa_performance.TestParallelInstanceOSOperations, instances)
        RunTest(qa_performance.TestParallelInstanceQueries, instances)

        qa_performance.RemoveAllInstances(instances)

        RunTest(qa_performance.TestJobQueueSubmissionPerformance)

    if qa_config.TestEnabled("parallel-performance"):
        if qa_config.IsTemplateSupported(constants.DT_DRBD8):
            RunTest(qa_performance.TestParallelDRBDInstanceCreationPerformance)
        if qa_config.IsTemplateSupported(constants.DT_PLAIN):
            RunTest(
                qa_performance.TestParallelPlainInstanceCreationPerformance)

    # Preparations need to be made only if some of these tests are enabled
    if qa_config.IsTemplateSupported(constants.DT_DRBD8) and \
       qa_config.TestEnabled(qa_config.Either(PARALLEL_TEST_DICT.keys())):
        inodes = qa_config.AcquireManyNodes(2)
        try:
            instance = qa_instance.TestInstanceAddWithDrbdDisk(inodes)
            try:
                for (test_name, test_fn) in PARALLEL_TEST_DICT.items():
                    RunTestIf(test_name, test_fn, instance)
            finally:
                instance.Release()
            qa_instance.TestInstanceRemove(instance)
        finally:
            qa_config.ReleaseManyNodes(inodes)

    qa_utils.AssertCommand(["gnt-cluster", "watcher", "continue"])
Ejemplo n.º 5
0
def TestParallelPlainInstanceCreationPerformance():
    """PERFORMANCE: Parallel plain backed instance creation.

  """
    assert qa_config.IsTemplateSupported(constants.DT_PLAIN)

    nodes = list(_AcquireAllNodes())
    _TestParallelInstanceCreationAndRemoval(max_instances=len(nodes) * 2,
                                            disk_template=constants.DT_PLAIN)
    qa_config.ReleaseManyNodes(nodes)
Ejemplo n.º 6
0
def TestRapiInstanceAdd(node, use_client):
    """Test adding a new instance via RAPI"""
    if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
        return
    instance = qa_config.AcquireInstance()
    instance.SetDiskTemplate(constants.DT_PLAIN)
    try:
        disks = [{
            "size": utils.ParseUnit(d.get("size")),
            "name": str(d.get("name"))
        } for d in qa_config.GetDiskOptions()]
        nic0_mac = instance.GetNicMacAddr(0, constants.VALUE_GENERATE)
        nics = [{
            constants.INIC_MAC: nic0_mac,
        }]

        beparams = {
            constants.BE_MAXMEM:
            utils.ParseUnit(qa_config.get(constants.BE_MAXMEM)),
            constants.BE_MINMEM:
            utils.ParseUnit(qa_config.get(constants.BE_MINMEM)),
        }

        if use_client:
            job_id = _rapi_client.CreateInstance(constants.INSTANCE_CREATE,
                                                 instance.name,
                                                 constants.DT_PLAIN,
                                                 disks,
                                                 nics,
                                                 os=qa_config.get("os"),
                                                 pnode=node.primary,
                                                 beparams=beparams)
        else:
            body = {
                "__version__": 1,
                "mode": constants.INSTANCE_CREATE,
                "name": instance.name,
                "os_type": qa_config.get("os"),
                "disk_template": constants.DT_PLAIN,
                "pnode": node.primary,
                "beparams": beparams,
                "disks": disks,
                "nics": nics,
            }

            (job_id, ) = _DoTests([
                ("/2/instances", _VerifyReturnsJob, "POST", body),
            ])

        _WaitForRapiJob(job_id)

        return instance
    except:
        instance.Release()
        raise
Ejemplo n.º 7
0
def TestParallelPlainInstanceCreationPerformance():
    """PERFORMANCE: Parallel plain backed instance creation.

  """
    if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
        print(
            qa_logging.FormatInfo(
                "Plain disk template not supported, skipping"))

    nodes = list(_AcquireAllNodes())
    _TestParallelInstanceCreationAndRemoval(max_instances=len(nodes) * 2,
                                            disk_template=constants.DT_PLAIN)
    qa_config.ReleaseManyNodes(nodes)
Ejemplo n.º 8
0
def TestInstanceImport(newinst, node, expnode, name):
    """gnt-backup import"""
    templ = constants.DT_PLAIN
    if not qa_config.IsTemplateSupported(templ):
        return
    cmd = ([
        "gnt-backup", "import",
        "--disk-template=%s" % templ, "--no-ip-check",
        "--src-node=%s" % expnode.primary,
        "--src-dir=%s/%s" % (pathutils.EXPORT_DIR, name),
        "--node=%s" % node.primary
    ] + GetGenericAddParameters(
        newinst, templ, force_mac=constants.VALUE_GENERATE))
    cmd.append(newinst.name)
    AssertCommand(cmd)
    newinst.SetDiskTemplate(templ)
Ejemplo n.º 9
0
def TestUpgrade():
    """Test gnt-cluster upgrade.

  This tests the 'gnt-cluster upgrade' command by flipping
  between the current and a different version of Ganeti.
  To also recover subtile points in the configuration up/down
  grades, instances are left over both upgrades.

  """
    this_version = qa_config.get("dir-version")
    other_version = qa_config.get("other-dir-version")
    if this_version is None or other_version is None:
        print qa_utils.FormatInfo("Test not run, as versions not specified")
        return

    inst_creates = []
    upgrade_instances = qa_config.get("upgrade-instances", [])
    live_instances = []
    for (test_name, templ, cf, n) in qa_instance.available_instance_tests:
        if (qa_config.TestEnabled(test_name)
                and qa_config.IsTemplateSupported(templ)
                and templ in upgrade_instances):
            inst_creates.append((cf, n))

    for (cf, n) in inst_creates:
        nodes = qa_config.AcquireManyNodes(n)
        live_instances.append(cf(nodes))

    AssertCommand(["gnt-cluster", "upgrade", "--to", other_version])
    AssertCommand(["gnt-cluster", "verify"])

    for instance in live_instances:
        qa_instance.TestInstanceRemove(instance)
        instance.Release()
    live_instances = []
    for (cf, n) in inst_creates:
        nodes = qa_config.AcquireManyNodes(n)
        live_instances.append(cf(nodes))

    AssertCommand(["gnt-cluster", "upgrade", "--to", this_version])
    AssertCommand(["gnt-cluster", "verify"])

    for instance in live_instances:
        qa_instance.TestInstanceRemove(instance)
        instance.Release()
Ejemplo n.º 10
0
def TestRapiInstanceMultiAlloc(node):
    """Test adding two new instances via the RAPI instance-multi-alloc method"""
    if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
        return

    JOBS_KEY = "jobs"

    instance_one = qa_config.AcquireInstance()
    instance_two = qa_config.AcquireInstance()
    instance_list = [instance_one, instance_two]
    try:
        rapi_dicts = [
            _GenInstanceAllocationDict(node, i) for i in instance_list
        ]

        job_id = _rapi_client.InstancesMultiAlloc(rapi_dicts)

        results, = _WaitForRapiJob(job_id)

        if JOBS_KEY not in results:
            raise qa_error.Error("RAPI instance-multi-alloc did not deliver "
                                 "information about created jobs")

        if len(results[JOBS_KEY]) != len(instance_list):
            raise qa_error.Error(
                "RAPI instance-multi-alloc failed to return the "
                "desired number of jobs!")

        for success, job in results[JOBS_KEY]:
            if success:
                _WaitForRapiJob(job)
            else:
                raise qa_error.Error("Failed to create instance in "
                                     "instance-multi-alloc call")
    except:
        # Note that although released, it may be that some of the instance creations
        # have in fact succeeded. Handling this in a better way may be possible, but
        # is not necessary as the QA has already failed at this point.
        for instance in instance_list:
            instance.Release()
        raise

    return (instance_one, instance_two)
Ejemplo n.º 11
0
def RunQa():
    """Main QA body.

  """
    RunTestBlock(RunEnvTests)
    SetupCluster()

    RunTestBlock(RunClusterTests)
    RunTestBlock(RunOsTests)

    RunTestIf("tags", qa_tags.TestClusterTags)

    RunTestBlock(RunCommonNodeTests)
    RunTestBlock(RunGroupListTests)
    RunTestBlock(RunGroupRwTests)
    RunTestBlock(RunNetworkTests)
    RunTestBlock(RunFilterTests)

    # The master shouldn't be readded or put offline; "delay" needs a non-master
    # node to test
    pnode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
    try:
        RunTestIf("node-readd", qa_node.TestNodeReadd, pnode)
        RunTestIf("node-modify", qa_node.TestNodeModify, pnode)
        RunTestIf("delay", qa_cluster.TestDelay, pnode)
    finally:
        pnode.Release()

    # Make sure the cluster is clean before running instance tests
    qa_cluster.AssertClusterVerify()

    pnode = qa_config.AcquireNode()
    try:
        RunTestIf("tags", qa_tags.TestNodeTags, pnode)

        if qa_rapi.Enabled():
            RunTest(qa_rapi.TestNode, pnode)

            if (qa_config.TestEnabled("instance-add-plain-disk")
                    and qa_config.IsTemplateSupported(constants.DT_PLAIN)):
                # Normal instance allocation via RAPI
                for use_client in [True, False]:
                    rapi_instance = RunTest(qa_rapi.TestRapiInstanceAdd, pnode,
                                            use_client)
                    try:
                        if qa_config.TestEnabled(
                                "instance-plain-rapi-common-tests"):
                            RunCommonInstanceTests(rapi_instance, [pnode])
                        RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance,
                                use_client)
                    finally:
                        rapi_instance.Release()
                    del rapi_instance

                # Multi-instance allocation
                rapi_instance_one, rapi_instance_two = \
                  RunTest(qa_rapi.TestRapiInstanceMultiAlloc, pnode)

                try:
                    RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_one,
                            True)
                    RunTest(qa_rapi.TestRapiInstanceRemove, rapi_instance_two,
                            True)
                finally:
                    rapi_instance_one.Release()
                    rapi_instance_two.Release()
    finally:
        pnode.Release()

    config_list = [
        ("default-instance-tests", lambda: None, lambda _: None),
        (IsExclusiveStorageInstanceTestEnabled,
         lambda: qa_cluster.TestSetExclStorCluster(True),
         qa_cluster.TestSetExclStorCluster),
    ]
    for (conf_name, setup_conf_f, restore_conf_f) in config_list:
        if qa_config.TestEnabled(conf_name):
            oldconf = setup_conf_f()
            RunTestBlock(RunInstanceTests)
            restore_conf_f(oldconf)

    pnode = qa_config.AcquireNode()
    try:
        if qa_config.TestEnabled(
            ["instance-add-plain-disk", "instance-export"]):
            for shutdown in [False, True]:
                instance = RunTest(qa_instance.TestInstanceAddWithPlainDisk,
                                   [pnode])
                try:
                    expnode = qa_config.AcquireNode(exclude=pnode)
                    try:
                        if shutdown:
                            # Stop instance before exporting and removing it
                            RunTest(qa_instance.TestInstanceShutdown, instance)
                        RunTest(qa_instance.TestInstanceExportWithRemove,
                                instance, expnode)
                        RunTest(qa_instance.TestBackupList, expnode)
                    finally:
                        expnode.Release()
                finally:
                    instance.Release()
                del expnode
                del instance
            qa_cluster.AssertClusterVerify()

    finally:
        pnode.Release()

    if qa_rapi.Enabled():
        RunTestIf("filters", qa_rapi.TestFilters)

    RunTestIf("cluster-upgrade", qa_cluster.TestUpgrade)

    RunTestBlock(RunExclusiveStorageTests)
    RunTestIf(["cluster-instance-policy", "instance-add-plain-disk"],
              TestIPolicyPlainInstance)

    RunTestBlock(RunCustomSshPortTests)

    RunTestIf("instance-add-restricted-by-disktemplates",
              qa_instance.TestInstanceCreationRestrictedByDiskTemplates)

    # Test removing instance with offline drbd secondary
    if qa_config.TestEnabled(
        ["instance-remove-drbd-offline", "instance-add-drbd-disk"]):
        # Make sure the master is not put offline
        snode = qa_config.AcquireNode(exclude=qa_config.GetMasterNode())
        try:
            pnode = qa_config.AcquireNode(exclude=snode)
            try:
                instance = qa_instance.TestInstanceAddWithDrbdDisk(
                    [pnode, snode])
                set_offline = lambda node: qa_node.MakeNodeOffline(node, "yes")
                set_online = lambda node: qa_node.MakeNodeOffline(node, "no")
                RunTest(qa_instance.TestRemoveInstanceOfflineNode, instance,
                        snode, set_offline, set_online)
            finally:
                pnode.Release()
        finally:
            snode.Release()
        qa_cluster.AssertClusterVerify()

    RunTestBlock(RunMonitoringTests)

    RunPerformanceTests()

    RunTestIf("cluster-destroy", qa_node.TestNodeRemoveAll)

    RunTestIf("cluster-destroy", qa_cluster.TestClusterDestroy)
Ejemplo n.º 12
0
def RunInstanceTests():
    """Create and exercise instances."""

    requested_conversions = qa_config.get("convert-disk-templates", [])
    supported_conversions = \
        set(requested_conversions).difference(constants.DTS_NOT_CONVERTIBLE_TO)
    for (test_name, templ, create_fun, num_nodes) in \
        qa_instance.available_instance_tests:
        if (qa_config.TestEnabled(test_name)
                and qa_config.IsTemplateSupported(templ)):
            inodes = qa_config.AcquireManyNodes(num_nodes)
            try:
                instance = RunTest(create_fun, inodes)
                try:
                    RunTestIf("instance-user-down",
                              qa_instance.TestInstanceUserDown, instance)
                    RunTestIf("instance-communication",
                              qa_instance.TestInstanceCommunication, instance,
                              qa_config.GetMasterNode())
                    RunTestIf("cluster-epo", qa_cluster.TestClusterEpo)
                    RunDaemonTests(instance)
                    for node in inodes:
                        RunTestIf("haskell-confd", qa_node.TestNodeListDrbd,
                                  node, templ == constants.DT_DRBD8)
                    if len(inodes) > 1:
                        RunTestIf("group-rwops",
                                  qa_group.TestAssignNodesIncludingSplit,
                                  constants.INITIAL_NODE_GROUP_NAME,
                                  inodes[0].primary, inodes[1].primary)
                    # This test will run once but it will cover all the supported
                    # user-provided disk template conversions
                    if qa_config.TestEnabled("instance-convert-disk"):
                        if (len(supported_conversions) > 1
                                and instance.disk_template
                                in supported_conversions):
                            RunTest(qa_instance.TestInstanceShutdown, instance)
                            RunTest(
                                qa_instance.TestInstanceConvertDiskTemplate,
                                instance, supported_conversions)
                            RunTest(qa_instance.TestInstanceStartup, instance)
                            # At this point we clear the set because the requested conversions
                            # has been tested
                            supported_conversions.clear()
                        else:
                            test_desc = "Converting instance of template %s" % templ
                            ReportTestSkip(test_desc, "conversion feature")
                    RunTestIf("instance-modify-disks",
                              qa_instance.TestInstanceModifyDisks, instance)
                    RunCommonInstanceTests(instance, inodes)
                    if qa_config.TestEnabled("instance-modify-primary"):
                        othernode = qa_config.AcquireNode()
                        RunTest(qa_instance.TestInstanceModifyPrimaryAndBack,
                                instance, inodes[0], othernode)
                        othernode.Release()
                    RunGroupListTests()
                    RunExportImportTests(instance, inodes)
                    RunHardwareFailureTests(instance, inodes)
                    RunRepairDiskSizes()
                    RunTestIf(["rapi", "instance-data-censorship"],
                              qa_rapi.TestInstanceDataCensorship, instance,
                              inodes)
                    RunTest(qa_instance.TestInstanceRemove, instance)
                finally:
                    instance.Release()
                del instance
            finally:
                qa_config.ReleaseManyNodes(inodes)
            qa_cluster.AssertClusterVerify()
        else:
            test_desc = "Creating instances of template %s" % templ
            if not qa_config.TestEnabled(test_name):
                ReportTestSkip(test_desc, test_name)
            else:
                ReportTestSkip(test_desc, "disk template %s" % templ)
Ejemplo n.º 13
0
def TestIPolicyPlainInstance():
    """Test instance policy interaction with instances"""
    params = [
        "memory-size", "cpu-count", "disk-count", "disk-size", "nic-count"
    ]
    if not qa_config.IsTemplateSupported(constants.DT_PLAIN):
        print "Template %s not supported" % constants.DT_PLAIN
        return

    # This test assumes that the group policy is empty
    (_, old_specs) = qa_cluster.TestClusterSetISpecs()
    # We also assume to have only one min/max bound
    assert len(old_specs[constants.ISPECS_MINMAX]) == 1
    node = qa_config.AcquireNode()
    try:
        # Log of policy changes, list of tuples:
        # (full_change, incremental_change, policy_violated)
        history = []
        instance = qa_instance.TestInstanceAddWithPlainDisk([node])
        try:
            policyerror = [constants.CV_EINSTANCEPOLICY]
            for par in params:
                (iminval,
                 imaxval) = qa_instance.GetInstanceSpec(instance.name, par)
                # Some specs must be multiple of 4
                new_spec = _BuildSpecDict(par, imaxval + 4, imaxval + 4,
                                          imaxval + 4)
                history.append((None, new_spec, True))
                if iminval > 0:
                    # Some specs must be multiple of 4
                    if iminval >= 4:
                        upper = iminval - 4
                    else:
                        upper = iminval - 1
                    new_spec = _BuildSpecDict(par, 0, upper, upper)
                    history.append((None, new_spec, True))
                history.append((old_specs, None, False))

            # Test with two instance specs
            double_specs = copy.deepcopy(old_specs)
            double_specs[constants.ISPECS_MINMAX] = \
                double_specs[constants.ISPECS_MINMAX] * 2
            (par1, par2) = params[0:2]
            (_, imaxval1) = qa_instance.GetInstanceSpec(instance.name, par1)
            (_, imaxval2) = qa_instance.GetInstanceSpec(instance.name, par2)
            old_minmax = old_specs[constants.ISPECS_MINMAX][0]
            history.extend([
                (double_specs, None, False),
                # The first min/max limit is being violated
                (None,
                 _BuildDoubleSpecDict(0, par1, imaxval1 + 4, imaxval1 + 4,
                                      imaxval1 + 4), False),
                # Both min/max limits are being violated
                (None,
                 _BuildDoubleSpecDict(1, par2, imaxval2 + 4, None,
                                      imaxval2 + 4), True),
                # The second min/max limit is being violated
                (None,
                 _BuildDoubleSpecDict(0, par1,
                                      old_minmax[constants.ISPECS_MIN][par1],
                                      old_specs[constants.ISPECS_STD][par1],
                                      old_minmax[constants.ISPECS_MAX][par1]),
                 False),
                (old_specs, None, False),
            ])

            # Apply the changes, and check policy violations after each change
            qa_cluster.AssertClusterVerify()
            for (new_specs, diff_specs, failed) in history:
                qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
                                                diff_specs=diff_specs)
                if failed:
                    qa_cluster.AssertClusterVerify(warnings=policyerror)
                else:
                    qa_cluster.AssertClusterVerify()

            qa_instance.TestInstanceRemove(instance)
        finally:
            instance.Release()

        # Now we replay the same policy changes, and we expect that the instance
        # cannot be created for the cases where we had a policy violation above
        for (new_specs, diff_specs, failed) in history:
            qa_cluster.TestClusterSetISpecs(new_specs=new_specs,
                                            diff_specs=diff_specs)
            if failed:
                qa_instance.TestInstanceAddWithPlainDisk([node], fail=True)
            # Instance creation with no policy violation has been tested already
    finally:
        node.Release()
Ejemplo n.º 14
0
def TestClusterModifyFileBasedStorageDir(file_disk_template, dir_config_key,
                                         default_dir, option_name):
    """Tests gnt-cluster modify wrt to file-based directory options.

  @type file_disk_template: string
  @param file_disk_template: file-based disk template
  @type dir_config_key: string
  @param dir_config_key: key for the QA config to retrieve the default
     directory value
  @type default_dir: string
  @param default_dir: default directory, if the QA config does not specify
     it
  @type option_name: string
  @param option_name: name of the option of 'gnt-cluster modify' to
     change the directory

  """
    enabled_disk_templates = qa_config.GetEnabledDiskTemplates()
    assert file_disk_template in constants.DTS_FILEBASED
    if not qa_config.IsTemplateSupported(file_disk_template):
        return

    # Get some non-file-based disk template to disable file storage
    other_disk_template = _GetOtherEnabledDiskTemplate(
        utils.storage.GetDiskTemplatesOfStorageTypes(constants.ST_FILE,
                                                     constants.ST_SHARED_FILE),
        enabled_disk_templates)

    file_storage_dir = qa_config.get(dir_config_key, default_dir)
    invalid_file_storage_dir = "/boot/"

    for fail, cmd in [
        (False, [
            "gnt-cluster", "modify",
            "--enabled-disk-templates=%s" % file_disk_template,
            "--ipolicy-disk-templates=%s" % file_disk_template
        ]),
        (False, [
            "gnt-cluster", "modify",
            "--%s=%s" % (option_name, file_storage_dir)
        ]),
        (False, [
            "gnt-cluster", "modify",
            "--%s=%s" % (option_name, invalid_file_storage_dir)
        ]),
            # file storage dir is set to an inacceptable path, thus verify
            # should fail
        (True, ["gnt-cluster", "verify"]),
            # unsetting the storage dir while file storage is enabled
            # should fail
        (True, ["gnt-cluster", "modify",
                "--%s=" % option_name]),
        (False, [
            "gnt-cluster", "modify",
            "--%s=%s" % (option_name, file_storage_dir)
        ]),
        (False, [
            "gnt-cluster", "modify",
            "--enabled-disk-templates=%s" % other_disk_template,
            "--ipolicy-disk-templates=%s" % other_disk_template
        ]),
        (False, [
            "gnt-cluster", "modify",
            "--%s=%s" % (option_name, invalid_file_storage_dir)
        ]),
            # file storage is set to an inacceptable path, but file storage
            # is disabled, thus verify should not fail
        (False, ["gnt-cluster", "verify"]),
            # unsetting the file storage dir while file storage is not enabled
            # should be fine
        (False, ["gnt-cluster", "modify",
                 "--%s=" % option_name]),
            # resetting everything to sane values
        (False, [
            "gnt-cluster", "modify",
            "--%s=%s" % (option_name, file_storage_dir),
            "--enabled-disk-templates=%s" % ",".join(enabled_disk_templates),
            "--ipolicy-disk-templates=%s" % ",".join(enabled_disk_templates)
        ])
    ]:
        AssertCommand(cmd, fail=fail)