def test06_primary_storage_cancel_maintenance_mode(self):
        StoragePool.enableMaintenance(self.apiClient,
                                      id=self.primary_storage_id)
        StoragePool.cancelMaintenance(self.apiClient,
                                      id=self.primary_storage_id)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(
            self.apiClient, clusterid=self.cluster.id)
        for storage in storage_pools_response:
            if storage.id == self.primary_storage_id:
                storage_pool = storage
        self.assertEqual(
            storage_pool.state, "Up",
            "Primary storage not in up mode")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        self.assertEqual(
            datera_instance["admin_state"], "online",
            "app-instance not in online mode")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == self.primary_storage_id:
                xen_sr = value
        self.assertEqual(
            set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]),
            False, "Xenserver SR in offline mode")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
    def test06_primary_storage_cancel_maintenance_mode(self):
        StoragePool.enableMaintenance(self.apiClient,
                                      id=self.primary_storage_id)
        StoragePool.cancelMaintenance(self.apiClient,
                                      id=self.primary_storage_id)

        # Verify in cloudsatck
        storage_pools_response = list_storage_pools(self.apiClient,
                                                    clusterid=self.cluster.id)
        for storage in storage_pools_response:
            if storage.id == self.primary_storage_id:
                storage_pool = storage
        self.assertEqual(storage_pool.state, "Up",
                         "Primary storage not in up mode")

        # Verify in datera
        datera_primary_storage_name = "cloudstack-" + self.primary_storage_id
        for instance in self.datera_api.app_instances.list():
            if instance['name'] == datera_primary_storage_name:
                datera_instance = instance
        self.assertEqual(datera_instance["admin_state"], "online",
                         "app-instance not in online mode")

        # Verify in xenserver
        for key, value in self.xen_session.xenapi.SR.get_all_records().items():
            if value['name_description'] == self.primary_storage_id:
                xen_sr = value
        self.assertEqual(
            set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]),
            False, "Xenserver SR in offline mode")

        StoragePool.delete(self.primary_storage, self.apiClient)
        self.cleanup = []
    def cleanUpCloudStack(cls):
        cfg.logger.info("Cleaning up after the whole test run")
        try:
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)

            primary_storage = list_storage_pools(
                cls.apiclient, name=cls.primary_storage.name)[0]
            primary_storage2 = list_storage_pools(
                cls.apiclient, name=cls.primary_storage2.name)[0]
            storage_pool1 = StoragePool.enableMaintenance(
                cls.apiclient, primary_storage.id)
            storage_pool2 = StoragePool.enableMaintenance(
                cls.apiclient, primary_storage2.id)

            cls.delete_storage_pool(id=primary_storage.id)
            cls.delete_storage_pool(id=primary_storage2.id)

            cls.spapi.volumeTemplateDelete(templateName=cls.sp_template_1)
            cls.spapi.volumeTemplateDelete(templateName=cls.sp_template_2)
        except Exception as e:
            cfg.logger.info("cleanup_resources failed: %s", e)
            os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

            time.sleep(30)

            raise Exception("Warning: Exception during cleanup : %s" % e)

        cfg.logger.info("Stopping CloudStack")
        os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

        time.sleep(30)

        return
    def cleanUpCloudStack(cls):
        try:
            if cls.nfs_storage_pool.state is not "Maintenance":
                cls.nfs_storage_pool = StoragePool.enableMaintenance(
                    cls.apiclient, cls.nfs_storage_pool.id)

            if cls.ceph_storage_pool.state is not "Maintenance":
                cls.ceph_storage_pool = StoragePool.enableMaintenance(
                    cls.apiclient, cls.ceph_storage_pool.id)

            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            raise Exception("Warning: Exception during cleanup : %s" % e)
        return
    def cleanUpCloudStack(cls):
        cfg.logger.info("Cleaning up after the whole test run")
        try:
            cls.nfs_storage_pool = StoragePool.enableMaintenance(
                cls.apiclient, cls.nfs_storage_pool.id)
            cls.storage_pool = StoragePool.update(cls.apiclient,
                                                  id=cls.primary_storage.id,
                                                  tags=["ssd"])
            cls.storage_pool2 = StoragePool.update(cls.apiclient,
                                                   id=cls.primary_storage2.id,
                                                   tags=["ssd2"])
            # Cleanup resources used
            cleanup_resources(cls.apiclient, cls._cleanup)
        except Exception as e:
            cfg.logger.info("cleanup_resources failed: %s", e)
            os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

            raise Exception("Warning: Exception during cleanup : %s" % e)

        cfg.logger.info("Stopping CloudStack")
        os.killpg(cls.mvn_proc_grp, signal.SIGTERM)

        time.sleep(30)

        return
    def test_ha_with_storage_maintenance(self):
        """put storage in maintenance mode and start ha vm and check usage"""
        # Steps
        # 1. Create a Compute service offering with the 'Offer HA' option
        # selected.
        # 2. Create a Guest VM with the compute service offering created above.
        # 3. put PS into maintenance  mode
        # 4. vm should go in stop state
        # 5. start vm ,vm should come up on another storage
        # 6. check usage events are getting generated for root disk

        host = list_hosts(
            self.api_client,
            clusterid=self.clusterWithSufficientPool.id)
        self.assertEqual(validateList(host)[0],
                         PASS,
                         "check list host response for cluster id %s"
                         % self.clusterWithSufficientPool.id)

        self.virtual_machine_with_ha = VirtualMachine.create(
            self.api_client,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.services_off.id,
            hostid=host[0].id
        )

        vms = VirtualMachine.list(
            self.api_client,
            id=self.virtual_machine_with_ha.id,
            listall=True,
        )

        self.assertEqual(
            validateList(vms)[0],
            PASS,
            "List VMs should return valid response for deployed VM"
        )

        vm = vms[0]

        self.debug("Deployed VM on host: %s" % vm.hostid)

        # Put storage in maintenance  mode

        self.list_root_volume = Volume.list(self.api_client,
                                            virtualmachineid=vm.id,
                                            type='ROOT',
                                            account=self.account.name,
                                            domainid=self.account.domainid)

        self.assertEqual(validateList(self.list_root_volume)[0],
                         PASS,
                         "check list voume_response for vm id %s" % vm.id)

        self.pool_id = self.dbclient.execute(
            "select pool_id from volumes where uuid = '%s';"
            % self.list_root_volume[0].id)
        self.storageid = self.dbclient.execute(
            "select uuid from storage_pool where id = '%s';"
            % self.pool_id[0][0])

        StoragePool.enableMaintenance(self.api_client,
                                      id=self.storageid[0][0])

        self.virtual_machine_with_ha.start(self.api_client)
        self.events = self.dbclient.execute(
            "select type from usage_event where resource_name='%s';"
            % self.list_root_volume[0].name
        )
        self.assertEqual(len(self.events),
                         3,
                         "check the usage event table for root disk %s"
                         % self.list_root_volume[0].name
                         )

        self.assertEqual(str(self.events[0][0]),
                         "VOLUME.CREATE",
                         "check volume create events for volume %s"
                         % self.list_root_volume[0].name)
        self.assertEqual(str(self.events[1][0]),
                         "VOLUME.DELETE",
                         "check fvolume delete events for volume%s"
                         % self.list_root_volume[0].name)
        self.assertEqual(str(self.events[2][0]),
                         "VOLUME.CREATE",
                         "check volume create events for volume %s"
                         % self.list_root_volume[0].name)
示例#7
0
    def test_01_create_system_vms_on_managed_storage(self):
        self._disable_zone_and_delete_system_vms(None, False)

        primary_storage = self.testdata[TestData.primaryStorage]

        primary_storage_1 = StoragePool.create(
            self.apiClient,
            primary_storage
        )

        self._prepare_to_use_managed_storage_for_system_vms()

        enabled = "Enabled"

        self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled)

        system_vms = self._wait_for_and_get_running_system_vms(2)

        virtual_machine = VirtualMachine.create(
            self.apiClient,
            self.testdata[TestData.virtualMachine],
            accountid=self.account.name,
            zoneid=self.zone.id,
            serviceofferingid=self.compute_offering.id,
            templateid=self.template.id,
            domainid=self.domain.id,
            startvm=True
        )

        # This virtual machine was only created and started so that the virtual router would be created and started.
        # Just delete this virtual machine once it has been created and started.
        virtual_machine.delete(self.apiClient, True)

        virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0]

        system_vms.append(virtual_router)

        self._check_system_vms(system_vms, primary_storage_1.id)

        primary_storage[TestData.name] = TestData.get_name_for_solidfire_storage()

        primary_storage_2 = StoragePool.create(
            self.apiClient,
            primary_storage
        )

        StoragePool.enableMaintenance(self.apiClient, primary_storage_1.id)

        self._wait_for_storage_cleanup_thread(system_vms)

        sf_util.purge_solidfire_volumes(self.sfe)

        system_vms = self._wait_for_and_get_running_system_vms(2)

        virtual_router = list_routers(self.apiClient, listall=True, state="Running")[0]

        system_vms.append(virtual_router)

        self._check_system_vms(system_vms, primary_storage_2.id)

        StoragePool.cancelMaintenance(self.apiClient, primary_storage_1.id)

        primary_storage_1.delete(self.apiClient)

        self._disable_zone_and_delete_system_vms(virtual_router)

        self._wait_for_storage_cleanup_thread(system_vms)

        sf_util.purge_solidfire_volumes(self.sfe)

        primary_storage_2.delete(self.apiClient)

        self._verify_no_active_solidfire_volumes()

        self._prepare_to_stop_using_managed_storage_for_system_vms()

        self.zone.update(self.apiClient, id=self.zone.id, allocationstate=enabled)

        self._wait_for_and_get_running_system_vms(2)
示例#8
0
    def test_ha_with_storage_maintenance(self):
        """put storage in maintenance mode and start ha vm and check usage"""
        # Steps
        # 1. Create a Compute service offering with the 'Offer HA' option
        # selected.
        # 2. Create a Guest VM with the compute service offering created above.
        # 3. put PS into maintenance  mode
        # 4. vm should go in stop state
        # 5. start vm ,vm should come up on another storage
        # 6. check usage events are getting generated for root disk

        host = list_hosts(self.api_client,
                          clusterid=self.clusterWithSufficientPool.id)
        self.assertEqual(
            validateList(host)[0], PASS,
            "check list host response for cluster id %s" %
            self.clusterWithSufficientPool.id)

        self.virtual_machine_with_ha = VirtualMachine.create(
            self.api_client,
            self.services["virtual_machine"],
            accountid=self.account.name,
            domainid=self.account.domainid,
            serviceofferingid=self.services_off.id,
            hostid=host[0].id)

        vms = VirtualMachine.list(
            self.api_client,
            id=self.virtual_machine_with_ha.id,
            listall=True,
        )

        self.assertEqual(
            validateList(vms)[0], PASS,
            "List VMs should return valid response for deployed VM")

        vm = vms[0]

        self.debug("Deployed VM on host: %s" % vm.hostid)

        # Put storage in maintenance  mode

        self.list_root_volume = Volume.list(self.api_client,
                                            virtualmachineid=vm.id,
                                            type='ROOT',
                                            account=self.account.name,
                                            domainid=self.account.domainid)

        self.assertEqual(
            validateList(self.list_root_volume)[0], PASS,
            "check list voume_response for vm id %s" % vm.id)

        self.pool_id = self.dbclient.execute(
            "select pool_id from volumes where uuid = '%s';" %
            self.list_root_volume[0].id)
        self.storageid = self.dbclient.execute(
            "select uuid from storage_pool where id = '%s';" %
            self.pool_id[0][0])

        StoragePool.enableMaintenance(self.api_client, id=self.storageid[0][0])

        self.virtual_machine_with_ha.start(self.api_client)
        self.events = self.dbclient.execute(
            "select type from usage_event where resource_name='%s';" %
            self.list_root_volume[0].name)
        self.assertEqual(
            len(self.events), 3,
            "check the usage event table for root disk %s" %
            self.list_root_volume[0].name)

        self.assertEqual(
            str(self.events[0][0]), "VOLUME.CREATE",
            "check volume create events for volume %s" %
            self.list_root_volume[0].name)
        self.assertEqual(
            str(self.events[1][0]), "VOLUME.DELETE",
            "check fvolume delete events for volume%s" %
            self.list_root_volume[0].name)
        self.assertEqual(
            str(self.events[2][0]), "VOLUME.CREATE",
            "check volume create events for volume %s" %
            self.list_root_volume[0].name)