コード例 #1
0
    def test06_primary_storage_cancel_maintenance_mode(self):
        StoragePool.enableMaintenance(self.apiClient,
                                      id=self.primary_storage_id)
        StoragePool.cancelMaintenance(self.apiClient,
                                      id=self.primary_storage_id)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        for storage in storage_pools_response:
            if storage.id == self.primary_storage_id:
                storage_pool = storage
        self.assertEqual(
            storage_pool.state, "Up",
            "Primary storage not in up mode")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        self.assertEqual(
            datera_instance["admin_state"], "online",
            "app-instance not in online mode")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == self.primary_storage_id:
                xen_sr = value
        self.assertEqual(
            set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]),
            False, "Xenserver SR in offline mode")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
コード例 #2
0
    def test06_primary_storage_cancel_maintenance_mode(self):
        StoragePool.enableMaintenance(self.apiClient,
                                      id=self.primary_storage_id)
        StoragePool.cancelMaintenance(self.apiClient,
                                      id=self.primary_storage_id)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for storage in storage_pools_response:
            if storage.id == self.primary_storage_id:
                storage_pool = storage
        self.assertEqual(storage_pool.state, "Up",
                         "Primary storage not in up mode")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        self.assertEqual(datera_instance["admin_state"], "online",
                         "app-instance not in online mode")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == self.primary_storage_id:
                xen_sr = value
        self.assertEqual(
            set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]),
            False, "Xenserver SR in offline mode")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
    def test_03_try_delete_primary_with_snapshots(self):
        virtual_machine = VirtualMachine.create(
            self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=self.zone.id,
            templateid=self.template.id,
            serviceofferingid=self.serviceOfferings.id,
            hypervisor=self.hypervisor,
            rootdisksize=10)

        volume = list_volumes(self.apiclient,
                              virtualmachineid=virtual_machine.id,
                              type="ROOT")

        volume = volume[0]

        name = volume.path.split("/")[3]
        try:
            spvolume = self.spapi.volumeList(volumeName="~" + name)
            if spvolume[0].templateName != self.template_name:
                raise Exception(
                    "Storpool volume's template %s  is not with the same template %s"
                    % (spvolume[0].templateName, self.template_name))
        except spapi.ApiError as err:
            raise Exception(err)

        snapshot = Snapshot.create(
            self.apiclient,
            volume_id=volume.id,
        )
        id = self.helper.get_snapshot_template_id(self.apiclient, snapshot,
                                                  self.storage_pool_id)
        if id is None:
            raise Exception("There isn't primary storgae id")
        virtual_machine.delete(self.apiclient, expunge=True)
        pool = list_storage_pools(self.apiclient, id=id)
        if pool[0].name == self.template_name:
            try:
                StoragePool.delete(self.sp_primary_storage, self.apiclient)
            except Exception as err:
                StoragePool.cancelMaintenance(self.apiclient,
                                              id=self.sp_primary_storage.id)
                self.debug("Storage pool could not be delete due to %s" % err)
        else:
            self.cleanup.append(snapshot)
            raise Exception("Snapshot is not on the same pool")
        Snapshot.delete(snapshot, self.apiclient)
コード例 #4
0
 def tearDown(self):
     # Clean up, terminate the created resources
     StoragePool.cancelMaintenance(self.api_client,
                                   id=self.storageid[0][0])
     cleanup_resources(self.apiClient, self.cleanup)
     return
    def setUpCloudStack(cls):
        cls.spapi = spapi.Api.fromConfig(multiCluster=True)
        testClient = super(TestMigrateVMWithVolumes, cls).getClsTestClient()
        cls.apiclient = testClient.getApiClient()

        cls._cleanup = []

        cls.unsupportedHypervisor = False
        cls.hypervisor = testClient.getHypervisorInfo()
        if cls.hypervisor.lower() in ("hyperv", "lxc"):
            cls.unsupportedHypervisor = True
            return

        cls.services = testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = None

        zones = list_zones(cls.apiclient)

        for z in zones:
            if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp:
                cls.zone = z

        td = TestData()
        cls.testdata = td.testdata
        cls.helper = StorPoolHelper()
        storpool_primary_storage = cls.testdata[TestData.primaryStorage]
        cls.template_name = storpool_primary_storage.get("name")
        storpool_service_offerings = cls.testdata[TestData.serviceOffering]

        nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary]
        ceph_service_offerings = cls.testdata[TestData.serviceOfferingsCeph]

        nfs_disk_offerings = cls.testdata[TestData.nfsDiskOffering]
        ceph_disk_offerings = cls.testdata[TestData.cephDiskOffering]

        storage_pool = list_storage_pools(cls.apiclient,
                                          name=cls.template_name)

        nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary')

        ceph_primary_storage = cls.testdata[TestData.primaryStorage4]

        cls.ceph_storage_pool = list_storage_pools(
            cls.apiclient, name=ceph_primary_storage.get("name"))[0]

        service_offerings = list_service_offering(cls.apiclient,
                                                  name=cls.template_name)
        nfs_service_offering = list_service_offering(cls.apiclient, name='nfs')

        ceph_service_offering = list_service_offering(
            cls.apiclient, name=ceph_primary_storage.get("name"))

        if storage_pool is None:
            storage_pool = StoragePool.create(cls.apiclient,
                                              storpool_primary_storage)
        else:
            storage_pool = storage_pool[0]
        cls.storage_pool = storage_pool
        cls.debug(pprint.pformat(storage_pool))
        if service_offerings is None:
            service_offerings = ServiceOffering.create(
                cls.apiclient, storpool_service_offerings)
        else:
            service_offerings = service_offerings[0]
        if nfs_service_offering is None:
            nfs_service_offering = ServiceOffering.create(
                cls.apiclient, nfs_service_offerings)
        else:
            nfs_service_offering = nfs_service_offering[0]

        if ceph_service_offering is None:
            ceph_service_offering = ServiceOffering.create(
                cls.apiclient, ceph_service_offerings)
        else:
            ceph_service_offering = ceph_service_offering[0]

        nfs_disk_offering = list_disk_offering(cls.apiclient, name="nfs")
        if nfs_disk_offering is None:
            nfs_disk_offering = DiskOffering.create(cls.apiclient,
                                                    nfs_disk_offerings)
        else:
            cls.nfs_disk_offering = nfs_disk_offering[0]

        ceph_disk_offering = list_disk_offering(cls.apiclient, name="ceph")
        if ceph_disk_offering is None:
            cls.ceph_disk_offering = DiskOffering.create(
                cls.apiclient, ceph_disk_offerings)
        else:
            cls.ceph_disk_offering = ceph_disk_offering[0]

        template = get_template(cls.apiclient, cls.zone.id, account="system")

        cls.nfs_storage_pool = nfs_storage_pool[0]
        if cls.nfs_storage_pool.state == "Maintenance":
            cls.nfs_storage_pool = StoragePool.cancelMaintenance(
                cls.apiclient, cls.nfs_storage_pool.id)

        if cls.ceph_storage_pool.state == "Maintenance":
            cls.ceph_storage_pool = StoragePool.cancelMaintenance(
                cls.apiclient, cls.ceph_storage_pool.id)

        cls.account = cls.helper.create_account(cls.apiclient,
                                                cls.services["account"],
                                                accounttype=1,
                                                domainid=cls.domain.id,
                                                roleid=1)
        cls._cleanup.append(cls.account)

        securitygroup = SecurityGroup.list(cls.apiclient,
                                           account=cls.account.name,
                                           domainid=cls.account.domainid)[0]
        cls.helper.set_securityGroups(cls.apiclient,
                                      account=cls.account.name,
                                      domainid=cls.account.domainid,
                                      id=securitygroup.id)

        cls.vm = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=nfs_service_offering.id,
            diskofferingid=cls.nfs_disk_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)

        cls.vm2 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=template.id,
            accountid=cls.account.name,
            domainid=cls.account.domainid,
            serviceofferingid=ceph_service_offering.id,
            diskofferingid=cls.ceph_disk_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)

        cls.debug(pprint.pformat(template))
        cls.debug(pprint.pformat(cls.hypervisor))

        if template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = template.ostypeid
        cls.services["zoneid"] = cls.zone.id

        cls.service_offering = service_offerings
        cls.nfs_service_offering = nfs_service_offering
        cls.debug(pprint.pformat(cls.service_offering))

        cls.template = template
        cls.random_data_0 = random_gen(size=100)
        cls.test_dir = "/tmp"
        cls.random_data = "random.data"
        return
    def setUpCloudStack(cls):
        super(TestMigrationFromUuidToGlobalId, cls).setUpClass()

        cls._cleanup = []
        cls.helper = HelperUtil(cls)
        cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS)
        cfg.logger.info("Starting CloudStack")
        cls.mvn_proc = subprocess.Popen(
            ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'],
            cwd=cls.ARGS.forked,
            preexec_fn=os.setsid,
            stdout=cfg.misc,
            stderr=subprocess.STDOUT,
        )
        cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid)
        cfg.logger.info("Started CloudStack in process group %d",
                        cls.mvn_proc_grp)
        cfg.logger.info("Waiting for a while to give it a chance to start")
        proc = subprocess.Popen(["tail", "-f", cfg.misc_name],
                                shell=False,
                                bufsize=0,
                                stdout=subprocess.PIPE)
        while True:
            line = proc.stdout.readline()
            if not line:
                cfg.logger.info("tail ended, was this expected?")
                cfg.logger.info("Stopping CloudStack")
                os.killpg(cls.mvn_proc_grp, signal.SIGINT)
                break
            if "[INFO] Started Jetty Server" in line:
                cfg.logger.info("got it!")
                break
        proc.terminate()
        proc.wait()
        time.sleep(15)
        cfg.logger.info("Processing with the setup")

        cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg)
        cls.testClient = cls.obj_marvininit.getTestClient()
        cls.apiclient = cls.testClient.getApiClient()
        dbclient = cls.testClient.getDbConnection()
        v = dbclient.execute(
            "select * from configuration where name='sp.migration.to.global.ids.completed'"
        )
        cfg.logger.info("Configuration setting for update of db is %s", v)
        if len(v) > 0:
            update = dbclient.execute(
                "update configuration set value='false' where name='sp.migration.to.global.ids.completed'"
            )
            cfg.logger.info("DB configuration table was updated %s", update)

        cls.spapi = spapi.Api.fromConfig(multiCluster=True)

        td = TestData()
        cls.testdata = td.testdata

        cls.services = cls.testClient.getParsedTestDataConfig()
        # Get Zone, Domain and templates
        cls.domain = get_domain(cls.apiclient)
        cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests())
        cls.cluster = list_clusters(cls.apiclient)[0]
        cls.hypervisor = get_hypervisor_type(cls.apiclient)

        #The version of CentOS has to be supported
        cls.template = get_template(cls.apiclient,
                                    cls.zone.id,
                                    account="system")

        if cls.template == FAILED:
            assert False, "get_template() failed to return template\
                    with description %s" % cls.services["ostype"]

        cls.services["domainid"] = cls.domain.id
        cls.services["small"]["zoneid"] = cls.zone.id
        cls.services["templates"]["ostypeid"] = cls.template.ostypeid
        cls.services["zoneid"] = cls.zone.id
        primarystorage = cls.testdata[TestData.primaryStorage]
        primarystorage2 = cls.testdata[TestData.primaryStorage2]

        serviceOffering = cls.testdata[TestData.serviceOffering]
        serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2]
        storage_pool = list_storage_pools(cls.apiclient,
                                          name=primarystorage.get("name"))
        storage_pool2 = list_storage_pools(cls.apiclient,
                                           name=primarystorage2.get("name"))
        cls.primary_storage = storage_pool[0]
        cls.primary_storage2 = storage_pool2[0]

        disk_offering = list_disk_offering(cls.apiclient, name="Small")

        assert disk_offering is not None

        service_offering = list_service_offering(cls.apiclient, name="ssd")
        if service_offering is not None:
            cls.service_offering = service_offering[0]
        else:
            cls.service_offering = ServiceOffering.create(
                cls.apiclient, serviceOffering)
        assert cls.service_offering is not None

        service_offering2 = list_service_offering(cls.apiclient, name="ssd2")
        if service_offering2 is not None:
            cls.service_offering2 = service_offering2[0]
        else:
            cls.service_offering2 = ServiceOffering.create(
                cls.apiclient, serviceOffering2)
        assert cls.service_offering2 is not None

        nfs_service_offerings = {
            "name": "nfs",
            "displaytext": "NFS service offerings",
            "cpunumber": 1,
            "cpuspeed": 500,
            "memory": 512,
            "storagetype": "shared",
            "customizediops": False,
            "hypervisorsnapshotreserve": 200,
            "tags": "nfs"
        }

        nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary')

        nfs_service_offering = list_service_offering(cls.apiclient, name='nfs')

        if nfs_service_offering is None:
            nfs_service_offering = ServiceOffering.create(
                cls.apiclient, nfs_service_offerings)
        else:
            nfs_service_offering = nfs_service_offering[0]

        cls.nfs_service_offering = nfs_service_offering

        cls.nfs_storage_pool = nfs_storage_pool[0]

        cls.nfs_storage_pool = StoragePool.cancelMaintenance(
            cls.apiclient, cls.nfs_storage_pool.id)

        cls.disk_offering = disk_offering[0]

        account = list_accounts(cls.apiclient, name="admin")
        cls.account = account[0]

        cls.virtual_machine = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.nfs_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine)

        cls.virtual_machine2 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.nfs_service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine2)

        cls.virtual_machine3 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine3)

        cls.virtual_machine4 = VirtualMachine.create(
            cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=cls.zone.id,
            templateid=cls.template.id,
            serviceofferingid=cls.service_offering2.id,
            hypervisor=cls.hypervisor,
            rootdisksize=10)
        cls._cleanup.append(cls.virtual_machine4)

        cls.volume = Volume.create(cls.apiclient,
                                   cls.testdata[TestData.volume_1],
                                   account=cls.account.name,
                                   domainid=cls.domain.id,
                                   zoneid=cls.zone.id,
                                   diskofferingid=cls.disk_offering.id)

        cls._cleanup.append(cls.volume)

        cls.primary_storage = StoragePool.update(cls.apiclient,
                                                 id=cls.primary_storage.id,
                                                 tags=["ssd, nfs, ssd2"])
        cls.primary_storage2 = StoragePool.update(cls.apiclient,
                                                  id=cls.primary_storage2.id,
                                                  tags=["ssd, ssd2"])
        #change to latest commit with globalId implementation
        cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS)
        cfg.logger.info("The setup is done, proceeding with the tests")
        cls.primary_storage = list_storage_pools(
            cls.apiclient, name=primarystorage.get("name"))[0]
        cls.primary_storage2 = list_storage_pools(
            cls.apiclient, name=primarystorage2.get("name"))[0]
コード例 #7
0
    def test_01_create_system_vms_on_managed_storage(self):
        self._disable_zone_and_delete_system_vms(None, False)

        primary_storage = self.testdata[TestData.primaryStorage]

        primary_storage_1 = StoragePool.create(
            self.apiClient,
            primary_storage
        )

        self._prepare_to_use_managed_storage_for_system_vms()

        enabled = "Enabled"

        self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled)

        system_vms = self._wait_for_and_get_running_system_vms(2)

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        # This virtual machine was only created and started so that the virtual router would be created and started.
        # Just delete this virtual machine once it has been created and started.
        virtual_machine.delete(self.apiClient, True)

        virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0]

        system_vms.append(virtual_router)

        self._check_system_vms(system_vms, primary_storage_1.id)

        primary_storage[TestData.name] = TestData.get_name_for_solidfire_storage()

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primary_storage
        )

        StoragePool.enableMaintenance(self.apiClient, primary_storage_1.id)

        self._wait_for_storage_cleanup_thread(system_vms)

        sf_util.purge_solidfire_volumes(self.sfe)

        system_vms = self._wait_for_and_get_running_system_vms(2)

        virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0]

        system_vms.append(virtual_router)

        self._check_system_vms(system_vms, primary_storage_2.id)

        StoragePool.cancelMaintenance(self.apiClient, primary_storage_1.id)

        primary_storage_1.delete(self.apiClient)

        self._disable_zone_and_delete_system_vms(virtual_router)

        self._wait_for_storage_cleanup_thread(system_vms)

        sf_util.purge_solidfire_volumes(self.sfe)

        primary_storage_2.delete(self.apiClient)

        self._verify_no_active_solidfire_volumes()

        self._prepare_to_stop_using_managed_storage_for_system_vms()

        self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled)

        self._wait_for_and_get_running_system_vms(2)
    def test_04_try_delete_primary_with_template(self):
        virtual_machine = VirtualMachine.create(
            self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()},
            zoneid=self.zone.id,
            templateid=self.template.id,
            serviceofferingid=self.serviceOfferings.id,
            hypervisor=self.hypervisor,
            rootdisksize=10)

        volume = list_volumes(self.apiclient,
                              virtualmachineid=virtual_machine.id,
                              type="ROOT",
                              listall=True)

        volume = volume[0]

        name = volume.path.split("/")[3]
        try:
            spvolume = self.spapi.volumeList(volumeName="~" + name)
            if spvolume[0].templateName != self.template_name:
                raise Exception(
                    "Storpool volume's template %s  is not with the same template %s"
                    % (spvolume[0].templateName, self.template_name))
        except spapi.ApiError as err:
            raise Exception(err)

        backup_config = list_configurations(self.apiclient,
                                            name="sp.bypass.secondary.storage")
        if (backup_config[0].value == "false"):
            backup_config = Configurations.update(
                self.apiclient,
                name="sp.bypass.secondary.storage",
                value="true")

        snapshot = Snapshot.create(
            self.apiclient,
            volume_id=volume.id,
        )
        self.debug("###################### %s" % snapshot)
        id = self.helper.get_snapshot_template_id(self.apiclient, snapshot,
                                                  self.storage_pool_id)
        if id is None:
            raise Exception("There isn't primary storgae id")
        virtual_machine.delete(self.apiclient, expunge=True)
        pool = list_storage_pools(self.apiclient, id=id)

        services = {
            "displaytext": "Template-1",
            "name": "Template-1-name",
            "ostypeid": self.template.ostypeid,
            "ispublic": "true"
        }
        template = Template.create_from_snapshot(self.apiclient,
                                                 snapshot=snapshot,
                                                 services=services)
        Snapshot.delete(snapshot, self.apiclient)

        try:
            StoragePool.delete(self.sp_primary_storage, self.apiclient)
        except Exception as err:
            StoragePool.cancelMaintenance(self.apiclient,
                                          id=self.sp_primary_storage.id)
            self.debug("Storge pool could not be delete due to %s" % err)

        Template.delete(template, self.apiclient)
コード例 #9
0
 def tearDown(self):
     # Clean up, terminate the created resources
     StoragePool.cancelMaintenance(self.api_client, id=self.storageid[0][0])
     cleanup_resources(self.apiClient, self.cleanup)
     return