Пример #1
0
    def test_05_deploy_vm_on_cluster_override_pod(self):

        # Optional parameters pod, cluster and host
        pod = Pod.list(self.apiclient, zoneid=self.zone.id)[0]
        clusters = Cluster.list(self.apiclient,
                                zoneid=self.zone.id,
                                podid=pod.id)

        self.assertEqual(isinstance(clusters, list), True,
                         "Check list response returns a valid list")

        cmd = deployVirtualMachine.deployVirtualMachineCmd()

        # Required parameters
        cmd.zoneid = self.zone.id
        cmd.serviceofferingid = self.service_offering.id
        template = get_template(self.apiclient,
                                zone_id=self.zone.id,
                                hypervisor=clusters[0].hypervisortype)
        cmd.templateid = template.id

        # Add optional deployment params
        cmd.podid = pod.id
        cmd.clusterid = clusters[0].id

        vm = self.apiclient.deployVirtualMachine(cmd)

        vm_host = Host.list(self.apiclient, id=vm.hostid)

        self.assertEqual(vm_host[0].clusterid, clusters[0].id,
                         "VM was not deployed on the target cluster")

        self.destroy_vm(vm.id)
def update_pod(self, state, pod_id):
    """
    Function to Enable/Disable pod
    """
    pod_status = Pod.update(
                                  self.apiclient,
                                  id=pod_id,
                                  allocationstate=state
                                  )
    return pod_status.allocationstate
    def tearDownClass(cls):
        try:
            podList = Pod.list(cls.apiclient, id=cls.pod.id)
            if podList[0].allocationstate.lower() == DISABLED.lower():
                cmd = updatePod.updatePodCmd()
                cmd.id = podList[0].id
                cmd.allocationstate = ENABLED
                cls.apiclient.updatePod(cmd)

            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
Пример #4
0
    def test_02_pods(self):
        """Check the status of pods"""

        # Validate the following
        # 1. List pods
        # 2. Check allocation state is "enabled" or not

        pods = Pod.list(self.apiclient, zoneid=self.zone.id, listall=True)
        self.assertEqual(isinstance(pods, list), True,
                         "Check if listPods returns a valid response")
        for pod in pods:
            self.assertEqual(pod.allocationstate, 'Enabled',
                             "Pods allocation state should be enabled")
        return
Пример #5
0
    def test_baremetal(self):
        self.debug("Test create baremetal network offering")
        networkoffering = NetworkOffering.create(
            self.apiclient, self.services["network_offering"])
        networkoffering.update(self.apiclient, state="Enabled")
        self.cleanup.append(networkoffering)

        physical_network = PhysicalNetwork.list(self.apiclient,
                                                zoneid=self.zoneid)[0]
        dhcp_provider = NetworkServiceProvider.list(
            self.apiclient,
            name="BaremetalDhcpProvider",
            physical_network_id=physical_network.id)[0]
        NetworkServiceProvider.update(self.apiclient,
                                      id=dhcp_provider.id,
                                      state='Enabled')
        pxe_provider = NetworkServiceProvider.list(
            self.apiclient,
            name="BaremetalPxeProvider",
            physical_network_id=physical_network.id)[0]
        NetworkServiceProvider.update(self.apiclient,
                                      id=pxe_provider.id,
                                      state='Enabled')
        userdata_provider = NetworkServiceProvider.list(
            self.apiclient,
            name="BaremetalUserdataProvider",
            physical_network_id=physical_network.id)[0]
        NetworkServiceProvider.update(self.apiclient,
                                      id=userdata_provider.id,
                                      state='Enabled')

        network = Network.create(self.apiclient,
                                 self.services["network"],
                                 zoneid=self.zoneid,
                                 networkofferingid=networkoffering.id)
        self.cleanup.insert(0, network)

        pod = Pod.list(self.apiclient)[0]
        cmd = createVlanIpRange.createVlanIpRangeCmd()
        cmd.podid = pod.id
        cmd.networkid = network.id
        cmd.gateway = "10.1.1.1"
        cmd.netmask = "255.255.255.0"
        cmd.startip = "10.1.1.20"
        cmd.endip = "10.1.1.40"
        cmd.forVirtualNetwork = "false"
        self.apiclient.createVlanIpRange(cmd)
Пример #6
0
 def test_baremetal(self):
     self.debug("Test create baremetal network offering")
     networkoffering = NetworkOffering.create(self.apiclient, self.services["network_offering"])
     networkoffering.update(self.apiclient, state="Enabled")
     self.cleanup.append(networkoffering)
     
     physical_network = PhysicalNetwork.list(self.apiclient, zoneid=self.zoneid)[0];
     dhcp_provider = NetworkServiceProvider.list(self.apiclient, name="BaremetalDhcpProvider", physical_network_id=physical_network.id)[0]
     NetworkServiceProvider.update(
                                       self.apiclient,
                                       id=dhcp_provider.id,
                                       state='Enabled'
                                       )
     pxe_provider = NetworkServiceProvider.list(self.apiclient, name="BaremetalPxeProvider", physical_network_id=physical_network.id)[0]
     NetworkServiceProvider.update(
                                       self.apiclient,
                                       id=pxe_provider.id,
                                       state='Enabled'
                                       )
     userdata_provider = NetworkServiceProvider.list(self.apiclient, name="BaremetalUserdataProvider", physical_network_id=physical_network.id)[0]
     NetworkServiceProvider.update(
                                       self.apiclient,
                                       id=userdata_provider.id,
                                       state='Enabled'
                                       )
     
     network = Network.create(self.apiclient, self.services["network"], zoneid=self.zoneid, networkofferingid=networkoffering.id)
     self.cleanup.insert(0, network)
     
     pod = Pod.list(self.apiclient)[0]
     cmd = createVlanIpRange.createVlanIpRangeCmd()
     cmd.podid = pod.id
     cmd.networkid = network.id
     cmd.gateway = "10.1.1.1"
     cmd.netmask = "255.255.255.0"
     cmd.startip = "10.1.1.20"
     cmd.endip = "10.1.1.40"
     cmd.forVirtualNetwork="false"
     self.apiclient.createVlanIpRange(cmd)
Пример #7
0
    def test_03_deploy_vm_on_specific_pod(self):
        pods = Pod.list(self.apiclient, )
        target_pod = pods[0]

        # Get host by Pod ID
        host = Host.list(self.apiclient, podid=target_pod.id)

        # deploy vm on pod
        cmd = deployVirtualMachine.deployVirtualMachineCmd()
        cmd.zoneid = target_pod.zoneid
        cmd.serviceofferingid = self.service_offering.id

        template = get_template(self.apiclient,
                                hypervisor=host[0].hypervisortype)

        cmd.templateid = template.id
        cmd.podid = target_pod.id
        vm = self.apiclient.deployVirtualMachine(cmd)

        vm_host = Host.list(self.apiclient, id=vm.hostid)

        self.assertEqual(target_pod.id, vm_host[0].podid,
                         "VM was not deployed on the target pod")
        self.destroy_vm(vm.id)
    def test_02_pods(self):
        """Check the status of pods"""

        # Validate the following
        # 1. List pods
        # 2. Check allocation state is "enabled" or not

        pods = Pod.list(
                          self.apiclient,
                          zoneid=self.zone.id,
              listall=True
                          )
        self.assertEqual(
                         isinstance(pods, list),
                         True,
                         "Check if listPods returns a valid response"
                         )
        for pod in pods:
            self.assertEqual(
                             pod.allocationstate,
                             'Enabled',
                             "Pods allocation state should be enabled"
                             )
        return
Пример #9
0
    def test_list_pod_with_overcommit(self):
        """Test List Pod Api with cluster CPU and Memory OverProvisioning
	    """

        podlist = Pod.list(self.apiclient)

        for pod in podlist:
            clusterlist = Cluster.list(self.apiclient, podid=pod.id)
            if len(clusterlist) > 1:

                updateCpuOvercommitCmd = updateConfiguration.updateConfigurationCmd()
                updateCpuOvercommitCmd.clusterid = clusterlist[0].id
                updateCpuOvercommitCmd.name="cpu.overprovisioning.factor"

                if clusterlist[0].cpuovercommitratio == clusterlist[1].cpuovercommitratio and clusterlist[0].cpuovercommitratio == "1.0":
                    cpuovercommit = "1.0"
                    updateCpuOvercommitCmd.value="2.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                elif clusterlist[0].cpuovercommitratio != clusterlist[1].cpuovercommitratio:
                    cpuovercommit = clusterlist[0].cpuovercommitratio

                else:
                    cpuovercommit = clusterlist[0].cpuovercommitratio
                    updateCpuOvercommitCmd.value="1.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                updateMemoryOvercommitCmd = updateConfiguration.updateConfigurationCmd()
                updateMemoryOvercommitCmd.clusterid = clusterlist[0].id
                updateMemoryOvercommitCmd.name="mem.overprovisioning.factor"

                if clusterlist[0].memoryovercommitratio == clusterlist[1].memoryovercommitratio and clusterlist[0].memoryovercommitratio == "1.0":
                    memoryovercommit = "1.0"
                    updateMemoryOvercommitCmd.value="2.0"
                    self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                elif clusterlist[0].memoryovercommitratio != clusterlist[1].memoryovercommitratio:
                    memoryovercommit = clusterlist[0].memoryovercommitratio

                else:
                    memoryovercommit = clusterlist[0].memoryovercommitratio
                    updateMemoryOvercommitCmd.value="1.0"
                    self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                podWithCap = Pod.list(self.apiclient, id=pod.id, showcapacities=True)
                cpucapacity = Capacities.list(self.apiclient, podid=pod.id, type=1)
                memorycapacity = Capacities.list(self.apiclient, podid=pod.id, type=0)

                updateCpuOvercommitCmd.value = cpuovercommit
                updateMemoryOvercommitCmd.value = memoryovercommit

                self.apiclient.updateConfiguration(updateCpuOvercommitCmd)
                self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                self.assertEqual(
                    [cap for cap in podWithCap[0].capacity if cap.type == 1][0].capacitytotal,
                    cpucapacity[0].capacitytotal,
                    "listPods api returns wrong CPU capacity "
                )

                self.assertEqual(
                    [cap for cap in podWithCap[0].capacity if cap.type == 0][0].capacitytotal,
                    memorycapacity[0].capacitytotal,
                    "listPods api returns wrong memory capacity"
                )
def update_pod(apiclient, state, pod_id):
    """
    Function to Enable/Disable pod
    """
    pod_status = Pod.update(apiclient, id=pod_id, allocationstate=state)
    return pod_status.allocationstate
Пример #11
0
    ipranges = PublicIpRange.list(apiClient)
    if ipranges:
      for iprange in ipranges:
        print "ip range name={}, id={}".format(iprange.name, iprange.id)

    if clusters:
      nets = PhysicalNetwork.list(apiClient)
      if nets:
        for net in nets:
          print "net name={}, id={}".format(net.name, net.id)
          print "Delete PhysicalNetwork"
          n = PhysicalNetwork(tmp_dict)
          n.id = net.id
          n.delete(apiClient)

    pods = Pod.list(apiClient)
    if pods:
      for pod in pods:
        print "pod name={}, id={}".format(pod.name, pod.id)
        print "Delete Pod"
        p = Pod(tmp_dict)
        p.id = pod.id
        p.delete(apiClient)

    img_storages = ImageStore.list(apiClient)
    if img_storages:
      for img_storage in img_storages:
        print "image store name={}, id={}".format(img_storage.name, img_storage.id)
        print "Delete ImageStore"
        i = ImageStore(tmp_dict)
        i.id = img_storage.id
    def test_list_pod_with_overcommit(self):
        """Test List Pod Api with cluster CPU and Memory OverProvisioning
	    """

        podlist = Pod.list(self.apiclient)

        for pod in podlist:
            clusterlist = Cluster.list(self.apiclient, podid=pod.id)
            if len(clusterlist) > 1:

                updateCpuOvercommitCmd = updateConfiguration.updateConfigurationCmd(
                )
                updateCpuOvercommitCmd.clusterid = clusterlist[0].id
                updateCpuOvercommitCmd.name = "cpu.overprovisioning.factor"

                if clusterlist[0].cpuovercommitratio == clusterlist[
                        1].cpuovercommitratio and clusterlist[
                            0].cpuovercommitratio == "1.0":
                    cpuovercommit = "1.0"
                    updateCpuOvercommitCmd.value = "2.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                elif clusterlist[0].cpuovercommitratio != clusterlist[
                        1].cpuovercommitratio:
                    cpuovercommit = clusterlist[0].cpuovercommitratio

                else:
                    cpuovercommit = clusterlist[0].cpuovercommitratio
                    updateCpuOvercommitCmd.value = "1.0"
                    self.apiclient.updateConfiguration(updateCpuOvercommitCmd)

                updateMemoryOvercommitCmd = updateConfiguration.updateConfigurationCmd(
                )
                updateMemoryOvercommitCmd.clusterid = clusterlist[0].id
                updateMemoryOvercommitCmd.name = "mem.overprovisioning.factor"

                if clusterlist[0].memoryovercommitratio == clusterlist[
                        1].memoryovercommitratio and clusterlist[
                            0].memoryovercommitratio == "1.0":
                    memoryovercommit = "1.0"
                    updateMemoryOvercommitCmd.value = "2.0"
                    self.apiclient.updateConfiguration(
                        updateMemoryOvercommitCmd)

                elif clusterlist[0].memoryovercommitratio != clusterlist[
                        1].memoryovercommitratio:
                    memoryovercommit = clusterlist[0].memoryovercommitratio

                else:
                    memoryovercommit = clusterlist[0].memoryovercommitratio
                    updateMemoryOvercommitCmd.value = "1.0"
                    self.apiclient.updateConfiguration(
                        updateMemoryOvercommitCmd)

                podWithCap = Pod.list(self.apiclient,
                                      id=pod.id,
                                      showcapacities=True)
                cpucapacity = Capacities.list(self.apiclient,
                                              podid=pod.id,
                                              type=1)
                memorycapacity = Capacities.list(self.apiclient,
                                                 podid=pod.id,
                                                 type=0)

                updateCpuOvercommitCmd.value = cpuovercommit
                updateMemoryOvercommitCmd.value = memoryovercommit

                self.apiclient.updateConfiguration(updateCpuOvercommitCmd)
                self.apiclient.updateConfiguration(updateMemoryOvercommitCmd)

                self.assertEqual([
                    cap for cap in podWithCap[0].capacity if cap.type == 1
                ][0].capacitytotal, cpucapacity[0].capacitytotal,
                                 "listPods api returns wrong CPU capacity ")

                self.assertEqual([
                    cap for cap in podWithCap[0].capacity if cap.type == 0
                ][0].capacitytotal, memorycapacity[0].capacitytotal,
                                 "listPods api returns wrong memory capacity")
    def test_01_disable_enable_pod(self):
        """disable enable Pod
            1. Disable pod and verify following things:
                For admin user:
                    -- Should be able to create new vm, snapshot,
                            volume,template,iso in the same pod
                For Non-admin user:
                    -- Should not be able to create new vm, snapshot,
                            volume,template,iso in the same pod
            2. Enable the above disabled pod and verify that:
                -All users should be able to create new vm, snapshot,
                volume,template,iso in the same pod
            3. Try to delete the pod and it should fail with error message:
                - "The pod is not deletable because there are servers
                running in this pod"

        """
        # Step 1
        vm_user = VirtualMachine.create(
            self.userapiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        vm_root = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.admin_account.name,
            domainid=self.admin_account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        cmd = updatePod.updatePodCmd()
        cmd.id = self.pod.id
        cmd.allocationstate = DISABLED
        self.apiclient.updatePod(cmd)
        podList = Pod.list(self.apiclient, id=self.pod.id)

        self.assertEqual(podList[0].allocationstate, DISABLED, "Check if the pod is in disabled state")
        self.assertEqual(vm_user.state.lower(), "running", "Verify that the user vm is running")

        self.assertEqual(vm_root.state.lower(), "running", "Verify that the admin vm is running")

        VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.admin_account.name,
            domainid=self.admin_account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )

        root_volume = list_volumes(self.apiclient, virtualmachineid=vm_root.id, type="ROOT", listall=True)
        self.assertEqual(validateList(root_volume)[0], PASS, "list snapshot  is empty for volume id %s" % vm_root.id)

        if self.snapshotSupported:
            Snapshot.create(self.apiclient, root_volume[0].id)

            snapshots = list_snapshots(self.apiclient, volumeid=root_volume[0].id, listall=True)
            self.assertEqual(
                validateList(snapshots)[0], PASS, "list snapshot  is empty for volume id %s" % root_volume[0].id
            )

            Template.create_from_snapshot(self.apiclient, snapshots[0], self.testdata["privatetemplate"])

        builtin_info = get_builtin_template_info(self.apiclient, self.zone.id)
        self.testdata["privatetemplate"]["url"] = builtin_info[0]
        self.testdata["privatetemplate"]["hypervisor"] = builtin_info[1]
        self.testdata["privatetemplate"]["format"] = builtin_info[2]

        Template.register(self.apiclient, self.testdata["privatetemplate"], zoneid=self.zone.id)

        Volume.create(
            self.apiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.admin_account.name,
            domainid=self.admin_account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        Iso.create(
            self.apiclient,
            self.testdata["iso2"],
            zoneid=self.zone.id,
            account=self.admin_account.name,
            domainid=self.admin_account.domainid,
        )

        with self.assertRaises(Exception):
            VirtualMachine.create(
                self.userapiclient,
                self.testdata["small"],
                templateid=self.template.id,
                accountid=self.account.name,
                domainid=self.account.domainid,
                serviceofferingid=self.service_offering.id,
                zoneid=self.zone.id,
            )

        root_volume = list_volumes(self.userapiclient, virtualmachineid=vm_user.id, type="ROOT", listall=True)

        self.assertEqual(validateList(root_volume)[0], PASS, "list volume  is empty for volume id %s" % vm_user.id)
        if self.snapshotSupported:
            Snapshot.create(self.userapiclient, root_volume[0].id)

        Template.register(self.userapiclient, self.testdata["privatetemplate"], zoneid=self.zone.id)

        Volume.create(
            self.userapiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        Iso.create(
            self.userapiclient,
            self.testdata["iso2"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
        )

        # Step 2
        cmd.allocationstate = ENABLED
        self.apiclient.updatePod(cmd)
        podList = Pod.list(self.apiclient, id=self.pod.id)

        self.assertEqual(podList[0].allocationstate, ENABLED, "Check if the pod is in enabled state")

        root_vm_new = VirtualMachine.create(
            self.apiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.admin_account.name,
            domainid=self.admin_account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )
        self.assertEqual(
            root_vm_new.state.lower(),
            "running",
            "Verify that admin should be able \
                                    to create new VM",
        )

        if self.snapshotSupported:
            Snapshot.create(self.apiclient, root_volume[0].id)

            snapshots = list_snapshots(self.apiclient, volumeid=root_volume[0].id, listall=True)

            self.assertEqual(
                validateList(snapshots)[0], PASS, "list snapshot  is empty for volume id %s" % root_volume[0].id
            )

            Template.create_from_snapshot(self.apiclient, snapshots[0], self.testdata["privatetemplate"])

        Template.register(self.apiclient, self.testdata["privatetemplate"], zoneid=self.zone.id)

        Volume.create(
            self.apiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.admin_account.name,
            domainid=self.admin_account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        Iso.create(
            self.apiclient,
            self.testdata["iso2"],
            zoneid=self.zone.id,
            account=self.admin_account.name,
            domainid=self.admin_account.domainid,
        )

        # Non root user
        user_vm_new = VirtualMachine.create(
            self.userapiclient,
            self.testdata["small"],
            templateid=self.template.id,
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.service_offering.id,
            zoneid=self.zone.id,
        )
        self.assertEqual(user_vm_new.state.lower(), "running", "Verify that admin should create new VM")

        if self.snapshotSupported:
            Snapshot.create(self.userapiclient, root_volume[0].id)

            snapshots = list_snapshots(self.userapiclient, volumeid=root_volume[0].id, listall=True)
            self.assertEqual(
                validateList(snapshots)[0], PASS, "list snapshot  is empty for volume id %s" % root_volume[0].id
            )

        Template.register(self.userapiclient, self.testdata["privatetemplate"], zoneid=self.zone.id)

        Volume.create(
            self.userapiclient,
            self.testdata["volume"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
            diskofferingid=self.disk_offering.id,
        )

        Iso.create(
            self.userapiclient,
            self.testdata["iso2"],
            zoneid=self.zone.id,
            account=self.account.name,
            domainid=self.account.domainid,
        )

        user_vm_new.delete(self.apiclient)
        # Step 3
        # Deletion of zone should fail if resources are running on the zone
        with self.assertRaises(Exception):
            self.pod.delete(self.apiclient)

        return