コード例 #1
0
    def setUp(self):
        self.cluster = mock.Mock(spec=Cluster)
        self.cluster.name.return_value = "Mock Cluster"
        self.vms = [mock.Mock(spec=Vm) for _ in xrange(4)]
        self.cluster.find_vms_regex.return_value = self.vms
        _nodes = [mock.Mock(spec=Node) for _ in xrange(4)]

        self.cluster_metadata = CurieSettings.Cluster()
        self.cluster_metadata.cluster_name = "mock_cluster"
        self.cluster_metadata.cluster_hypervisor_info.CopyFrom(
            CurieSettings.Cluster.ClusterHypervisorInfo())
        self.cluster_metadata.cluster_management_server_info.CopyFrom(
            CurieSettings.Cluster.ClusterManagementServerInfo())
        self.cluster_metadata.cluster_software_info.CopyFrom(
            CurieSettings.Cluster.ClusterSoftwareInfo())
        for id, node in enumerate(_nodes):
            node.node_id.return_value = id
            curr_node = self.cluster_metadata.cluster_nodes.add()
            curr_node.id = str(id)
        self.cluster.metadata.return_value = self.cluster_metadata

        self.cluster.nodes.return_value = _nodes
        self.cluster.node_count.return_value = len(_nodes)
        self.scenario = Scenario(
            cluster=self.cluster,
            output_directory=environment.test_output_dir(self),
            goldimages_directory="/fake/goldimages/directory/")
        self.vm_group = VMGroup(self.scenario, "group_0.*[(-!&")
        for mock_vm, vm_name in zip(self.vms, self.vm_group.get_vms_names()):
            mock_vm.vm_name.return_value = vm_name
        self.scenario.vm_groups = {self.vm_group._name: self.vm_group}
コード例 #2
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_Grow_count_per_cluster(self):
   vm_group = VMGroup(self.scenario, self.group_name,
                      template="ubuntu1604",
                      template_type="DISK",
                      count_per_cluster=1)
   self.scenario.vm_groups = {self.group_name: vm_group}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   new_vms = steps.vm_group.Grow(self.scenario, self.group_name,
                                 count_per_cluster=2)()
   self.assertEqual(len(new_vms), 1)
   self.assertEqual(len(vm_group.get_vms()), 2)
コード例 #3
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_Grow_mixed_starting_with_count_per_node(self):
   vm_group = VMGroup(self.scenario, self.group_name,
                      template="ubuntu1604",
                      template_type="DISK",
                      count_per_node=1)
   self.scenario.vm_groups = {self.group_name: vm_group}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   new_vms = steps.vm_group.Grow(self.scenario, self.group_name,
                                 count_per_cluster=8)()
   self.assertEqual(len(new_vms), 8 - len(self.cluster.nodes()))
   self.assertEqual(len(vm_group.get_vms()), 8)
コード例 #4
0
ファイル: test_workload.py プロジェクト: nutanix/curie
 def test_workload_prefill_concurrent_2_node_limited_by_4_vms(self):
   self.cluster.nodes.return_value = self.nodes[:2]
   self.cluster.node_count.return_value = len(self.cluster.nodes())
   self.vm_group = VMGroup(self.scenario, "group_0", count_per_cluster=4)
   self.scenario.vm_groups = {self.vm_group._name: self.vm_group}
   workload = Workload.parse(self.scenario, "test_workload1",
                             {"vm_group": "group_0",
                              "config_file": "oltp.fio"})
   with mock.patch.object(workload, "_config_class") as mock_fio_config:
     mock_config_instance = mock.Mock(spec=FioConfiguration)
     mock_fio_config.load.return_value = mock_config_instance
     workload.prefill_iogen(max_concurrent=8)
     self.assertEqual(mock_fio_config.load.call_count, 1)
     # There are 2 nodes and 4 VMs, iodepth is 2.
     mock_config_instance.convert_prefill.assert_called_once_with(iodepth=2)
コード例 #5
0
    def test_big_boot_vdi_vmm(self):
        if not isinstance(self.cluster, HyperVCluster):
            raise unittest.SkipTest(
                "Test requires a VMM cluster (found '%r')" %
                type(self.cluster))
        self.scenario.vm_groups = {
            self.group_name:
            VMGroup(self.scenario,
                    self.group_name,
                    template="ubuntu1604",
                    template_type="DISK",
                    count_per_node=100)
        }
        vms = steps.vm_group.CloneFromTemplate(self.scenario,
                                               self.group_name)()
        steps.vm_group.PowerOn(self.scenario, self.group_name)()

        # As this is a large VM batch, it's inefficient to call 'is_powered_on'
        # on each VM individually, so generate a power state map with a bulk query.
        vm_id_power_state_map = self.scenario.cluster.get_power_state_for_vms(
            vms)
        power_states = set(
            map(lambda x: str(x).lower(), vm_id_power_state_map.values()))
        self.assertTrue(power_states == set(["running"]),
                        msg="Not all VMs powered on. States: %s" %
                        power_states)

        for vm in self.cluster.find_vms([vm.vm_name() for vm in vms]):
            self.assertTrue(vm.is_accessible())
コード例 #6
0
ファイル: test_meta.py プロジェクト: nutanix/curie
 def setUp(self):
     self.cluster = mock_cluster()
     self.scenario = Scenario(
         cluster=self.cluster,
         output_directory=environment.test_output_dir(self))
     self.vm_group = VMGroup(self.scenario, "group_0")
     self.scenario.vm_groups = {self.vm_group._name: self.vm_group}
コード例 #7
0
  def __create_stubs(scenario, data):
    """
    Stub out VMGroups, Workloads, and Results in a scenario.

    Allows testing _parse_steps without calling _parse_section so they can be
    tested more independently.

    Args:
      scenario (Scenario): Scenario to modify.
      data (dict): Configuration object.

    Returns:
      None
    """
    scenario.vm_groups = {}
    for vm_group in data.get("vms", []):
      vm_group_name = vm_group.keys()[0]
      scenario.vm_groups[vm_group_name] = VMGroup(scenario,
                                                  vm_group_name)
    scenario.workloads = {}
    for workload in data.get("workloads", []):
      workload_name = workload.keys()[0]
      vm_group_name = workload.values()[0]["vm_group"]
      scenario.workloads[workload_name] = Workload(scenario,
                                                   workload_name,
                                                   vm_group_name)
    scenario.results_map = {}
    for result in data.get("results", []):
      result_name = result.keys()[0]
      scenario.results_map[result_name] = BaseResult(scenario,
                                                     result_name)
コード例 #8
0
 def test_ping_uvmgroups(self):
     playbook_name = "ping.yml"
     playbook_path = os.path.join(self.scenario.source_directory,
                                  playbook_name)
     for vm_group_name in self.vm_group_names:
         self.scenario.vm_groups[vm_group_name] = VMGroup(
             self.scenario,
             vm_group_name,
             template="ubuntu1604",
             template_type="DISK",
             count_per_cluster=2)
         vm_group.CloneFromTemplate(self.scenario, vm_group_name)()
         vm_group.PowerOn(self.scenario, vm_group_name)()
     for vm in self.cluster.find_vms([
             vm.vm_name()
             for name, group in self.scenario.vm_groups.iteritems()
             for vm in group.get_vms()
     ]):
         self.assertTrue(vm.is_powered_on())
         self.assertTrue(vm.is_accessible())
     step = playbook.Run(self.scenario, playbook_name,
                         [name for name in self.scenario.vm_groups.keys()],
                         "nutanix", "nutanix/4u")
     uvm_ips = [
         str(IPv4Address(unicode(vm.vm_ip())))
         for name, group in self.scenario.vm_groups.iteritems()
         for vm in group.get_vms()
     ]
     self.assertGreaterEqual(len(uvm_ips), 1)
     with patch.object(step, "create_annotation") as m_annotate:
         step()
     m_annotate.assert_called_once_with("Playbook %s changed hosts %s." %
                                        (playbook_path, sorted(uvm_ips)))
コード例 #9
0
ファイル: test_workload.py プロジェクト: nutanix/curie
 def setUp(self):
   self.cluster = mock.Mock(spec=Cluster)
   self.vms = [mock.Mock(spec=Vm) for _ in xrange(4)]
   self.nodes = []
   for index in range(4):
     node = mock.Mock(spec=Node)
     node.node_id.return_value = "node_%d" % index
     self.nodes.append(node)
   self.cluster.nodes.return_value = self.nodes
   self.cluster.node_count.return_value = len(self.cluster.nodes())
   self.scenario = Scenario(
     cluster=self.cluster,
     source_directory=os.path.join(environment.resource_dir(), "fio"),
     output_directory=environment.test_output_dir(self))
   self.vm_group = VMGroup(self.scenario, "group_0")
   for mock_vm, vm_name in zip(self.vms, self.vm_group.get_vms_names()):
     mock_vm.vm_name.return_value = vm_name
   self.scenario.vm_groups = {self.vm_group._name: self.vm_group}
コード例 #10
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_run_command_vms_not_exist(self):
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=1)}
   with self.assertRaises(CurieTestException):
     steps.vm_group.RunCommand(self.scenario, self.group_name,
                               "ls /home/nutanix/ > "
                               "/home/nutanix/test_run_command.txt")()
コード例 #11
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_migrate_vms_bad_host(self):
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=1)}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   with self.assertRaises(CurieTestException):
     steps.vm_group.MigrateGroup(self.scenario, self.group_name, 1, 1000)()
コード例 #12
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_Grow_verify_mixed_count_per_node_less_than_existing(self):
   vm_group = VMGroup(self.scenario, self.group_name,
                      template="ubuntu1604",
                      template_type="DISK",
                      count_per_node=1)
   self.scenario.vm_groups = {self.group_name: vm_group}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   step = steps.vm_group.Grow(self.scenario, self.group_name,
                              count_per_cluster=len(self.cluster.nodes()) - 1)
   with self.assertRaises(CurieTestException):
     step.verify()
コード例 #13
0
ファイル: test_workload.py プロジェクト: nutanix/curie
 def setUp(self):
     self.cluster = mock.Mock(spec=Cluster)
     _nodes = [mock.Mock(spec=Node) for _ in xrange(4)]
     for id, node in enumerate(_nodes):
         node.node_id.return_value = id
     self.cluster.nodes.return_value = _nodes
     self.cluster.node_count.return_value = len(_nodes)
     self.scenario = Scenario(
         cluster=self.cluster,
         output_directory=environment.test_output_dir(self))
     self.vms = [mock.Mock(spec=Vm) for _ in xrange(4)]
     self.cluster.find_vms_regex.return_value = self.vms
     for vm in self.vms:
         vm.is_accessible.return_value = True
     self.vm_group = VMGroup(self.scenario, "group_0")
     for mock_vm, vm_name in zip(self.vms, self.vm_group.get_vms_names()):
         mock_vm.vm_name.return_value = vm_name
     self.workload = mock.Mock(spec=Workload)
     self.workload.name.return_value = "workload_0"
     self.workload.vm_group.return_value = self.vm_group
     self.scenario.workloads = {self.workload.name(): self.workload}
コード例 #14
0
ファイル: test_workload.py プロジェクト: nutanix/curie
 def test_PrefillRun_default(self):
   vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
                     template_type="DISK", count_per_cluster=1,
                     data_disks=[16, 16, 16, 16, 16, 16])
   workload = Workload(test=self.scenario, name=self.workload_name,
                       vm_group=vmgroup, generator="fio",
                       config_file=self.valid_fio_path)
   self.scenario.vm_groups = {self.group_name: vmgroup}
   self.scenario.workloads = {self.workload_name: workload}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   steps.workload.PrefillRun(self.scenario, self.workload_name)()
コード例 #15
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_template_clone_default(self):
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=1,
                              data_disks=[1])}
   vms = steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   expected = ["__curie_test_%d_%s_0000" %
               (self.scenario.id,
                NameUtil.sanitize_filename(self.group_name))]
   self.assertEqual(set([vm.vm_name() for vm in vms]), set(expected))
コード例 #16
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_run_command_invalid(self):
   count = 2
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=count)}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   with self.assertRaises(CurieException):
     steps.vm_group.RunCommand(self.scenario, self.group_name,
                               "not_a_valid_command")()
コード例 #17
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_power_on(self):
   count = 2
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=count)}
   vms = steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   for vm in self.cluster.find_vms([vm.vm_name() for vm in vms]):
     self.assertTrue(vm.is_powered_on())
     self.assertTrue(vm.is_accessible())
コード例 #18
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_migrate_vms_same_host(self):
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_node=1)}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   steps.vm_group.MigrateGroup(self.scenario, self.group_name, 1, 1)()
   node_vm_map = self.__get_node_vm_map()
   for node in self.scenario.cluster.nodes():
     # Ensure there is 1 VM from the group on each node.
     self.assertEqual(node_vm_map.get(node.node_id()), 1)
コード例 #19
0
ファイル: test_workload.py プロジェクト: nutanix/curie
 def test_prefill_invalid_fio_path(self):
   vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
                     template_type="DISK", count_per_cluster=1,
                     data_disks=[16, 16, 16, 16, 16, 16])
   workload = Workload(test=self.scenario, name=self.workload_name,
                       vm_group=vmgroup, generator="fio",
                       config_file=self.invalid_fio_path)
   self.scenario.vm_groups = {self.group_name: vmgroup}
   self.scenario.workloads = {self.workload_name: workload}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   with self.assertRaises(CurieTestException):
     steps.workload.PrefillStart(self.scenario, self.workload_name)()
コード例 #20
0
ファイル: test_workload.py プロジェクト: nutanix/curie
 def test_wait_after_finish(self):
   vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
                     template_type="DISK", count_per_cluster=1,
                     data_disks=[16, 16, 16, 16, 16, 16])
   workload = Workload(test=self.scenario, name=self.workload_name,
                       vm_group=vmgroup, generator="fio",
                       config_file=self.valid_fio_path)
   self.scenario.vm_groups = {self.group_name: vmgroup}
   self.scenario.workloads = {self.workload_name: workload}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   duration_secs = 30
   steps.workload.Start(self.scenario, self.workload_name, duration_secs)()
   steps.workload.Wait(self.scenario, self.workload_name)()
コード例 #21
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_run_command(self):
   count = 2
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=count)}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   steps.vm_group.RunCommand(self.scenario, self.group_name,
                             "ls /home/nutanix/ > "
                             "/home/nutanix/test_run_command.txt")()
   steps.vm_group.RunCommand(self.scenario, self.group_name,
                             "cat /home/nutanix/test_run_command.txt")()
コード例 #22
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_migrate_vms(self):
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=2)}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   steps.vm_group.MigrateGroup(self.scenario, self.group_name, 0, 1)()
   first_node_id = steps._util.get_node(self.scenario, 0).node_id()
   second_node_id = steps._util.get_node(self.scenario, 1).node_id()
   node_vm_map = self.__get_node_vm_map()
   # Ensure there are no VMs from the group on the first node.
   self.assertEqual(node_vm_map.get(first_node_id, 0), 0)
   # Ensure there are 2 VMs from the group on the second node.
   self.assertEqual(node_vm_map.get(second_node_id, 0), 2)
コード例 #23
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_relocate_vms_datastore_same_datastore(self):
   if isinstance(self.scenario.cluster, AcropolisCluster) or isinstance(self.scenario.cluster, HyperVCluster):
     log.info("Skipping datastore-related test on AHV or Hyper-V cluster")
     return
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=2)}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   datastore_name = self.scenario.cluster._vcenter_info.vcenter_datastore_name
   step = steps.vm_group.RelocateGroupDatastore(self.scenario,
                                                self.group_name,
                                                datastore_name)
   step()
コード例 #24
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_template_clone_linked_count(self):
   count = 2
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=count,
                              data_disks=[1])}
   vms = steps.vm_group.CloneFromTemplate(self.scenario, self.group_name,
                                          linked_clone = True)()
   expected = ["__curie_test_%d_%s_%04d" %
               (self.scenario.id,
                NameUtil.sanitize_filename(self.group_name),
                index)
               for index in xrange(count)]
   self.assertEqual(set([vm.vm_name() for vm in vms]), set(expected))
コード例 #25
0
ファイル: test_prometheus.py プロジェクト: nutanix/curie
 def test_scenario_target_config(self):
   scenario = Scenario(name="Fake Scenario")
   scenario.cluster = mock_cluster()
   scenario.cluster.name.return_value = "fake_cluster"
   vm_group = VMGroup(scenario, "nasty\nvg?nm /\t#$\\")
   mock_vms = [mock.Mock(spec=Vm) for _ in xrange(3)]
   mock_vms[0].vm_name.return_value = "mock_vm_0"
   mock_vms[1].vm_name.return_value = "mock_vm_1"
   mock_vms[2].vm_name.return_value = "mock_vm_2"
   mock_vms[0].vm_ip.return_value = "fake_addr_0"
   mock_vms[1].vm_ip.return_value = "fake_addr_1"
   mock_vms[2].vm_ip.return_value = None
   scenario.vm_groups = {"fake_vm_group": vm_group}
   with mock.patch.object(vm_group, "get_vms") as mock_get_vms:
     mock_get_vms.return_value = mock_vms
     ret = prometheus.scenario_target_config(scenario)
   expected = [
     {
       "labels": {
         "cluster_name": "fake_cluster",
         "instance": "mock_vm_0",
         "job": "xray",
         "scenario_display_name": "Fake Scenario",
         "scenario_id": str(scenario.id),
         "scenario_name": "Fake Scenario",
         "vm_group": "nasty\nvg?nm /\t#$\\",
       },
       "targets": [
         "fake_addr_0:9100",
       ],
     },
     {
       "labels": {
         "cluster_name": "fake_cluster",
         "instance": "mock_vm_1",
         "job": "xray",
         "scenario_display_name": "Fake Scenario",
         "scenario_id": str(scenario.id),
         "scenario_name": "Fake Scenario",
         "vm_group": "nasty\nvg?nm /\t#$\\",
       },
       "targets": [
         "fake_addr_1:9100",
       ],
     }
   ]
   self.assertEqual(ret, expected)
コード例 #26
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
  def test_relocate_vms_datastore_not_exist(self):
    if isinstance(self.scenario.cluster, AcropolisCluster) or isinstance(self.scenario.cluster, HyperVCluster):
      log.info("Skipping datastore-related test on AHV or Hyper-V cluster")
      return

    self.scenario.vm_groups = {
      self.group_name: VMGroup(self.scenario, self.group_name,
                               template="ubuntu1604",
                               template_type="DISK",
                               count_per_cluster=2)}
    steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
    steps.vm_group.PowerOn(self.scenario, self.group_name)()
    step = steps.vm_group.RelocateGroupDatastore(self.scenario,
                                                 self.group_name,
                                                 "BAD_DATASTORE")
    with self.assertRaises(CurieTestException):
      step()
コード例 #27
0
    def test_monolithic(self):
        # Rolling upgrade one node
        # First make sure the cluster has all nodes powered on.
        for node in self.cluster.nodes():
            self.assertTrue(node.is_powered_on())
        # Deploy VMs, one per node.
        self.scenario.vm_groups = {
            self.group_name:
            VMGroup(self.scenario,
                    self.group_name,
                    template="ubuntu1604",
                    template_type="DISK",
                    count_per_node=1)
        }
        steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
        steps.vm_group.PowerOn(self.scenario, self.group_name)()
        # Check that the VMs are placed appropriately
        placement_map = \
          self.scenario.vm_groups[self.group_name].get_clone_placement_map()
        for vm in self.scenario.vm_groups[self.group_name].get_vms():
            self.assertEqual(
                placement_map[vm.vm_name()].node_id(), vm.node_id(),
                "VM %s not on expected node %s" % (vm.vm_name(), vm.node_id()))
            self.assertTrue(vm.is_powered_on(),
                            "VM %s not powered on." % vm.vm_name())
            # TODO: Doesn't make sense to assert is_accessible yet. Haven't given
            # VMs a chance to become accessible.
            # self.assertTrue(vm.is_accessible(),
            #                 "VM %s not accessible." % vm.vm_name())

        meta_step = steps.meta.RollingUpgrade(self.scenario,
                                              node_count=1,
                                              annotate=True)
        for step in meta_step.itersteps():
            step()
        # Check that the Node is back up
        for node in self.cluster.nodes():
            self.assertTrue(node.is_powered_on())
        # Check that the VMs are placed back where they were
        for vm in self.scenario.vm_groups[self.group_name].get_vms():
            self.assertEqual(
                placement_map[vm.vm_name()].node_id(), vm.node_id(),
                "VM %s not on expected node %s" % (vm.vm_name(), vm.node_id()))
            self.assertTrue(vm.is_powered_on(),
                            "VM %s not powered on." % vm.vm_name())
コード例 #28
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_template_clone_missing(self):
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="bad_template",
                              template_type="DISK",
                              count_per_cluster=1,
                              data_disks=[1])}
   step = steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)
   with self.assertRaises(Exception) as ar:
     step()
   if isinstance(ar.exception, CurieException):
     self.assertIn("Goldimage bad_template-x86_64.vmdk does not exist.",
                   str(ar.exception))
   elif isinstance(ar.exception, CurieTestException):
     self.assertIn("Error copying disk image", str(ar.exception))
   else:
     self.fail("Unexpected exception type %r: %s" %
               (type(ar.exception), ar.exception))
コード例 #29
0
ファイル: test_vm_group.py プロジェクト: nutanix/curie
 def test_template_clone_duplicate_vm_names(self):
   self.scenario.vm_groups = {
     self.group_name: VMGroup(self.scenario, self.group_name,
                              template="ubuntu1604",
                              template_type="DISK",
                              count_per_cluster=1,
                              data_disks=[1])}
   step = steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)
   step()
   vm_group = self.scenario.vm_groups[self.group_name]
   self.assertEqual(len(set(vm_group.get_vms())), 1)
   if (isinstance(self.scenario.cluster, AcropolisCluster) or isinstance(self.scenario.cluster, HyperVCluster)):
     # AHV and Hyper-V can create duplicate-named VMs. We should probably prevent this.
     pass
   else:
     with self.assertRaises(CurieTestException):
       step()
     self.assertEqual(len(set(vm_group.get_vms())), 1)
コード例 #30
0
ファイル: test_workload.py プロジェクト: nutanix/curie
 def test_Stop(self):
   vmgroup = VMGroup(self.scenario, self.group_name, template="ubuntu1604",
                     template_type="DISK", count_per_cluster=1,
                     data_disks=[16, 16, 16, 16, 16, 16])
   workload = Workload(test=self.scenario, name=self.workload_name,
                       vm_group=vmgroup, generator="fio",
                       config_file=self.valid_fio_path)
   self.scenario.vm_groups = {self.group_name: vmgroup}
   self.scenario.workloads = {self.workload_name: workload}
   steps.vm_group.CloneFromTemplate(self.scenario, self.group_name)()
   steps.vm_group.PowerOn(self.scenario, self.group_name)()
   duration_secs = 600
   start_secs = time.time()
   steps.workload.Start(self.scenario, self.workload_name, duration_secs,
                        async=True)()
   time.sleep(30)
   steps.workload.Stop(self.scenario, self.workload_name, duration_secs)()
   total_secs = time.time() - start_secs
   self.assertTrue(total_secs < duration_secs)