def test06_primary_storage_cancel_maintenance_mode(self): StoragePool.enableMaintenance(self.apiClient, id=self.primary_storage_id) StoragePool.cancelMaintenance(self.apiClient, id=self.primary_storage_id) # Verify in cloudsatck storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) for storage in storage_pools_response: if storage.id == self.primary_storage_id: storage_pool = storage self.assertEqual( storage_pool.state, "Up", "Primary storage not in up mode") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance self.assertEqual( datera_instance["admin_state"], "online", "app-instance not in online mode") # Verify in xenserver for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == self.primary_storage_id: xen_sr = value self.assertEqual( set(["forget", "destroy"]).issubset(xen_sr["allowed_operations"]), False, "Xenserver SR in offline mode") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test13_update_primary_storage_capacityIops_to_zero(self): updatedIops = 0 StoragePool.update(self.apiClient, id=self.primary_storage_id, capacityiops=updatedIops, tags=self.primary_tag) # Verify in cloudsatck storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) for data in storage_pools_response: if data.id == self.primary_storage_id: storage_pool = data self.assertEqual( storage_pool.capacityiops, updatedIops, "Primary storage capacityiops not updated") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance app_instance_response_iops = ( datera_instance['storage_instances'] ['storage-1']['volumes']['volume-1']['performance_policy'] ['total_iops_max']) self.assertEqual( app_instance_response_iops, updatedIops, "app-instance capacityiops not updated") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test10_add_vm_with_datera_storage_and_volume(self): primarystorage = self.testdata[TestData.primaryStorage] primary_storage = StoragePool.create( self.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=self.zone.id, clusterid=self.cluster.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) primary_storage_url = primarystorage[TestData.url] self._verify_attributes( primary_storage.id, primary_storage_url) self.cleanup.append(primary_storage) self.virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True ) self._validate_storage(primary_storage, self.virtual_machine) volume = Volume.create( self.apiClient, self.testdata[TestData.volume_1], account=self.account.name, domainid=self.domain.id, zoneid=self.zone.id, diskofferingid=self.disk_offering.id ) virtual_machine.attach_volume( self.apiClient, volume ) storage_pools_response = list_storage_pools( self.apiClient, id=primary_storage.id) for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == primary_storage.id: xen_server_response = value self.assertNotEqual( int(storage_pools_response[0].disksizeused), int(xen_server_response['physical_utilisation']))
def _validate_storage(self, storage, vm): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=vm.id, listall=True) self.assertNotEqual( list_volumes_response, None, "'list_volumes_response' should not be equal to 'None'.") for volume in list_volumes_response: if volume.type.upper() == "ROOT": volumeData = volume self.assertEqual(volume.storage, storage.name, "Volume not created for VM " + str(vm.id)) #Verify in cloudstack storage_pools_response = list_storage_pools( self.apiClient, id=storage.id) self.assertEqual( int(volumeData.size), int(storage_pools_response[0].disksizeused), "Allocated disk sizes are not same in volumes and primary stoarge") #Verify in datera datera_primarystorage_name = "cloudstack-" + storage.id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primarystorage_name: app_instance_response = instance app_instance_response_disk = ( app_instance_response['storage_instances'] ['storage-1']['volumes']['volume-1'] ['capacity_in_use'] * 1073741824) self.assertEqual( int(volumeData.size), int(app_instance_response_disk), "App instance usage size is incorrect") #Verify in xen server for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == storage.id: xen_server_response = value self.assertEqual( int(xen_server_response['physical_utilisation']), int(volumeData.size), "Allocated disk sizes is incorrect in xenserver")
def test07_update_primary_storage_capacityBytes(self): updatedDiskSize = self.testdata[TestData.newCapacityBytes] StoragePool.update(self.apiClient, id=self.primary_storage_id, capacitybytes=updatedDiskSize, tags=self.primary_tag) # Verify in cloudsatck storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) for data in storage_pools_response: if data.id == self.primary_storage_id: storage_pool = data self.assertEqual( storage_pool.disksizetotal, updatedDiskSize, "Primary storage not updated") # Verify in datera datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primary_storage_name: datera_instance = instance app_instance_response_disk_size = ( datera_instance['storage_instances'] ['storage-1']['volumes']['volume-1']['size'] * 1073741824) self.assertEqual( app_instance_response_disk_size, updatedDiskSize, "app-instance not updated") # Verify in xenserver #for key, value in self.xen_session.xenapi.SR.get_all_records().items(): # if value['name_description'] == self.primary_storage_id: # xen_sr = value #Uncomment after xen fix #print xen_sr #print xen_sr['physical_size'], updatedDiskSize #self.assertEqual( # int(xen_sr['physical_size']) + 12582912, updatedDiskSize, # "Xen server physical storage not updated") StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = []
def test04_delete_primary_storage(self): #cleanup_resources(self.apiClient, self._primary_storage) StoragePool.delete(self.primary_storage, self.apiClient) self.cleanup = [] # Verify in Cloudstack storage_pools_response = list_storage_pools( self.apiClient, clusterid=self.cluster.id) if len(storage_pools_response) > 0: for storage in storage_pools_response: self.assertNotEqual( storage.id, self.primary_storage_id, "Primary storage not deleted") # Verify in Datera flag = 0 datera_primary_storage_name = "cloudstack-" + self.primary_storage_id for item in self.datera_api.app_instances.list(): if item['name'] == datera_primary_storage_name: flag = 1 self.assertEqual(flag, 0, "app instance not deleted.") # Verify in xenserver for key, value in self.xen_session.xenapi.SR.get_all_records().items(): self.assertNotEqual( value['name_description'], self.primary_storage_id, "SR not deleted in xenserver") # Verify in sql database command = "select uuid from storage_pool" sql_result = self.dbConnection.execute(command) key = 0 for uuid in sql_result: if uuid[0] == self.primary_storage_id: key = 1 self.assertEqual( key, 0, "Primary storage not deleted in database")
def test_04_try_delete_primary_with_template(self): virtual_machine = VirtualMachine.create( self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.serviceOfferings.id, hypervisor=self.hypervisor, rootdisksize=10) volume = list_volumes(self.apiclient, virtualmachineid=virtual_machine.id, type="ROOT", listall=True) volume = volume[0] name = volume.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].templateName != self.template_name: raise Exception( "Storpool volume's template %s is not with the same template %s" % (spvolume[0].templateName, self.template_name)) except spapi.ApiError as err: raise Exception(err) backup_config = list_configurations(self.apiclient, name="sp.bypass.secondary.storage") if (backup_config[0].value == "false"): backup_config = Configurations.update( self.apiclient, name="sp.bypass.secondary.storage", value="true") snapshot = Snapshot.create( self.apiclient, volume_id=volume.id, ) self.debug("###################### %s" % snapshot) id = self.helper.get_snapshot_template_id(self.apiclient, snapshot, self.storage_pool_id) if id is None: raise Exception("There isn't primary storgae id") virtual_machine.delete(self.apiclient, expunge=True) pool = list_storage_pools(self.apiclient, id=id) services = { "displaytext": "Template-1", "name": "Template-1-name", "ostypeid": self.template.ostypeid, "ispublic": "true" } template = Template.create_from_snapshot(self.apiclient, snapshot=snapshot, services=services) Snapshot.delete(snapshot, self.apiclient) try: StoragePool.delete(self.sp_primary_storage, self.apiclient) except Exception as err: StoragePool.cancelMaintenance(self.apiclient, id=self.sp_primary_storage.id) self.debug("Storge pool could not be delete due to %s" % err) Template.delete(template, self.apiclient)
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") import pprint cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None #=============================================================================== # # service_offering = list_service_offering( # cls.apiclient, # name="tags" # ) # if service_offering is not None: # cls.service_offering = service_offering[0] # else: # cls.service_offering = ServiceOffering.create( # cls.apiclient, # serviceOffering) #=============================================================================== service_offering_only = list_service_offering(cls.apiclient, name="cloud-test-dev-2") if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10) # Resources that are to be destroyed cls._cleanup = [cls.virtual_machine, cls.volume] cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls._cleanup = [] cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) storpool_primary_storage = cls.testdata[TestData.primaryStorage] storpool_service_offerings = cls.testdata[TestData.serviceOffering] cls.template_name = storpool_primary_storage.get("name") storage_pool = list_storage_pools(cls.apiclient, name=cls.template_name) service_offerings = list_service_offering(cls.apiclient, name=cls.template_name) disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.disk_offerings = disk_offerings[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offerings.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.volume_1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume_2 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpClass(cls): cls.testClient = super(TestResizeVolume, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower() cls.storageID = None # Fill services from the external config file cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.services["mode"] = cls.zone.networktype cls._cleanup = [] cls.unsupportedStorageType = False cls.unsupportedHypervisorType = False cls.updateclone = False if cls.hypervisor not in ['xenserver', "kvm", "vmware"]: cls.unsupportedHypervisorType = True return cls.template = get_template(cls.api_client, cls.zone.id) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.services["volume"]["zoneid"] = cls.zone.id try: cls.parent_domain = Domain.create(cls.api_client, services=cls.services["domain"], parentdomainid=cls.domain.id) cls.parentd_admin = Account.create(cls.api_client, cls.services["account"], admin=True, domainid=cls.parent_domain.id) cls._cleanup.append(cls.parentd_admin) cls._cleanup.append(cls.parent_domain) list_pool_resp = list_storage_pools(cls.api_client, account=cls.parentd_admin.name, domainid=cls.parent_domain.id) res = validateList(list_pool_resp) if res[2] == INVALID_INPUT: raise Exception( "Failed to list storage pool-no storagepools found ") #Identify the storage pool type and set vmware fullclone to true if storage is VMFS if cls.hypervisor == 'vmware': for strpool in list_pool_resp: if strpool.type.lower() == "vmfs" or strpool.type.lower( ) == "networkfilesystem": list_config_storage_response = list_configurations( cls.api_client, name="vmware.create.full.clone", storageid=strpool.id) res = validateList(list_config_storage_response) if res[2] == INVALID_INPUT: raise Exception("Failed to list configurations ") if list_config_storage_response[0].value == "false": Configurations.update(cls.api_client, "vmware.create.full.clone", value="true", storageid=strpool.id) cls.updateclone = True StoragePool.update(cls.api_client, id=strpool.id, tags="scsi") cls.storageID = strpool.id cls.unsupportedStorageType = False break else: cls.unsupportedStorageType = True # Creating service offering with normal config cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls.services_offering_vmware = ServiceOffering.create( cls.api_client, cls.services["service_offering"], tags="scsi") cls._cleanup.extend( [cls.service_offering, cls.services_offering_vmware]) except Exception as e: cls.tearDownClass() return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) with open(cls.ARGS.cfg) as json_text: cfg.logger.info(cls.ARGS.cfg) cfg.logger.info(json_text) conf = json.load(json_text) cfg.logger.info(conf) zone = conf['mgtSvr'][0].get('zone') cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, zone_name=zone) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()]) cfg.logger.info( pprint.pformat("############################ %s" % cls.zone)) storpool_primary_storage = { "name": cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl( url=storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"]) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cfg.logger.info(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()]) storpool_primary_storage2 = { "name": cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage2["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"]) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 os.killpg(cls.mvn_proc_grp, signal.SIGTERM) time.sleep(30) cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) #check that ROOT disk is created with uuid root_volume = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine3.id, type="ROOT") try: spvolume = cls.spapi.volumeList(volumeName=root_volume[0].id) except spapi.ApiError as err: cfg.logger.info("Root volume is not created with UUID") raise Exception(err) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.volume2 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume2) cls.virtual_machine4.stop(cls.apiclient, forced=True) cls.virtual_machine4.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine4.detach_volume(cls.apiclient, cls.volume2) vol = list_volumes(cls.apiclient, id=cls.volume2.id) cls.random_data_vm_snapshot1 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" volume_attached = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume) cls.helper.write_on_disks(cls.random_data_vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) MemorySnapshot = False cls.vm_snapshot1 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) cls.random_data_vm_snapshot2 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot2 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) #vm snapshot to be deleted without revert cls.random_data_vm_snapshot3 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot3, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot_for_delete = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot( cls.vm_snapshot_for_delete, cls.virtual_machine, cls.test_dir, cls.random_data) cls.snapshot_on_secondary = cls.helper.create_snapshot( False, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_on_secondary) cls.template_on_secondary = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_on_secondary.id) cls._cleanup.append(cls.template_on_secondary) cls.snapshot_bypassed = cls.helper.create_snapshot( False, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_bypassed) cls.template_bypased = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_bypassed.id) cls._cleanup.append(cls.template_bypased) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): cls.cloudstacktestclient = super(TestDeployVmRootSize, cls).getClsTestClient() cls.api_client = cls.cloudstacktestclient.getApiClient() cls.hypervisor = cls.cloudstacktestclient.getHypervisorInfo().lower() cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ # Get Zone, Domain and Default Built-in template cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.cloudstacktestclient.getZoneForTests()) cls.services = cls.testClient.getParsedTestDataConfig() cls.services["mode"] = cls.zone.networktype cls._cleanup = [] cls.updateclone = False cls.restartreq = False cls.defaultdiskcontroller = "ide" cls.template = get_template(cls.api_client, cls.zone.id) if cls.template == FAILED: assert False, "get_template() failed to return template " #create a user account cls.account = Account.create( cls.api_client, cls.services["account"], domainid=cls.domain.id,admin=True ) cls._cleanup.append(cls.account) list_pool_resp = list_storage_pools(cls.api_client, account=cls.account.name, domainid=cls.domain.id) #Identify the storage pool type and set vmware fullclone to # true if storage is VMFS if cls.hypervisor == 'vmware': # please make sure url of templateregister dictionary in # test_data.config pointing to .ova file list_config_storage_response = list_configurations( cls.api_client , name= "vmware.root.disk.controller") cls.defaultdiskcontroller = list_config_storage_response[0].value if list_config_storage_response[0].value == "ide" or \ list_config_storage_response[0].value == \ "osdefault": Configurations.update(cls.api_client, "vmware.root.disk.controller", value="scsi") cls.updateclone = True cls.restartreq = True list_config_fullclone_global_response = list_configurations( cls.api_client , name= "vmware.create.full.clone") if list_config_fullclone_global_response[0].value=="false": Configurations.update(cls.api_client, "vmware.create.full.clone", value="true") cls.updateclone = True cls.restartreq = True cls.tempobj = Template.register(cls.api_client, cls.services["templateregister"], hypervisor=cls.hypervisor, zoneid=cls.zone.id, account=cls.account.name, domainid=cls.domain.id ) cls.tempobj.download(cls.api_client) for strpool in list_pool_resp: if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem": list_config_storage_response = list_configurations( cls.api_client , name= "vmware.create.full.clone",storageid=strpool.id) res = validateList(list_config_storage_response) if res[2]== INVALID_INPUT: raise Exception("Failed to list configurations ") if list_config_storage_response[0].value == "false": Configurations.update(cls.api_client, "vmware.create.full.clone", value="true", storageid=strpool.id) cls.updateclone = True StoragePool.update(cls.api_client,id=strpool.id, tags="scsi") cls.storageID = strpool.id break if cls.restartreq: cls.restartServer() #create a service offering cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] ) #build cleanup list cls.services_offering_vmware=ServiceOffering.create( cls.api_client,cls.services["service_offering"],tags="scsi") cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware])
def setUpClass(cls): testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) storpool_primary_storage = { "name": "cloud-test-dev-1", "zoneid": cls.zone.id, "url": "cloud-test-dev-1", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "cloud-test-dev-1" } storpool_service_offerings = { "name": "cloud-test-dev-1", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "cloud-test-dev-1" } storage_pool = list_storage_pools(cls.apiclient, name='cloud-test-dev-1') service_offerings = list_service_offering(cls.apiclient, name='cloud-test-dev-1') disk_offerings = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") cls.disk_offerings = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.volume_1 = Volume.create( cls.apiclient, {"diskname": "StorPoolDisk-1"}, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, ) cls.volume_2 = Volume.create( cls.apiclient, {"diskname": "StorPoolDisk-2"}, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [] cls._cleanup.append(cls.virtual_machine) cls._cleanup.append(cls.volume_1) cls._cleanup.append(cls.volume_2) return
def setUpCloudStack(cls): cls._cleanup = [] cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestResizeVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = cls.testdata[TestData.primaryStorage] cls.template_name = storpool_primary_storage.get("name") storpool_service_offerings = cls.testdata[TestData.serviceOfferingsIops] storpool_disk_offerings = { "name": "iops", "displaytext": "Testing IOPS on StorPool", "customizediops": True, "storagetype": "shared", "tags" : cls.template_name, } storage_pool = list_storage_pools( cls.apiclient, name = cls.template_name ) service_offerings = list_service_offering( cls.apiclient, name='iops' ) disk_offerings = list_disk_offering( cls.apiclient, name="iops" ) if disk_offerings is None: disk_offerings = DiskOffering.create(cls.apiclient, storpool_disk_offerings, custom = True) else: disk_offerings = disk_offerings[0] cls.disk_offerings = disk_offerings if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] template = get_template( cls.apiclient, cls.zone.id, account = "system" ) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], accounttype = 1, domainid=cls.domain.id, ) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient, zoneid = cls.zone.id) cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient, zoneid = cls.zone.id) cls.host_remote = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.volume_1 = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.volume_2 = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.volume = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.virtual_machine = cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, rootdisksize = 10 ) cls.virtual_machine2= cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, rootdisksize = 10 ) cls.virtual_machine_live_migration_1 = cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, hostid = cls.host_on_local_1.id, rootdisksize = 10 ) cls.virtual_machine_live_migration_2= cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, hostid = cls.host_on_remote1.id, rootdisksize = 10 ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): cls._cleanup = [] cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z cls.debug("##################### zone %s" % cls.zone) td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = { "name": "ssd", TestData.scope: "ZONE", "url": "ssd", TestData.provider: "StorPool", "path": "/dev/storpool", TestData.capacityBytes: 2251799813685248, TestData.hypervisor: "KVM" } storpool_without_qos = { "name": "ssd", "displaytext": "ssd", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storpool_qos_template = { "name": "qos", "displaytext": "qos", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } disk_offering_ssd = { "name": "ssd", "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", "disksize": 10, "customizediops": False, "hypervisorsnapshotreserve": 200, TestData.tags: "ssd", "storagetype": "shared" } disk_offering_qos = { "name": "qos", "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", "disksize": 10, "customizediops": False, "hypervisorsnapshotreserve": 200, TestData.tags: "ssd", "storagetype": "shared" } cls.template_name = storpool_primary_storage.get("name") cls.template_name_2 = "qos" storage_pool = list_storage_pools(cls.apiclient, name="ssd") cls.primary_storage = storage_pool[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) service_offerings_ssd = list_service_offering(cls.apiclient, name=cls.template_name) service_offerings_qos = list_service_offering(cls.apiclient, name=cls.template_name_2) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_without_qos) else: service_offerings_ssd = service_offerings_ssd[0] if service_offerings_qos is None: service_offerings_qos = ServiceOffering.create( cls.apiclient, storpool_qos_template) ResourceDetails.create(cls.apiclient, resourceid=service_offerings_qos.id, resourcetype="ServiceOffering", details={"SP_TEMPLATE": "qos"}, fordisplay=True) else: service_offerings_qos = service_offerings_qos[0] cls.service_offering_ssd = service_offerings_ssd cls.service_offering_qos = service_offerings_qos cls.disk_offering_ssd = list_disk_offering(cls.apiclient, name=cls.template_name) cls.disk_offering_qos = list_disk_offering(cls.apiclient, name=cls.template_name_2) if cls.disk_offering_ssd is None: cls.disk_offering_ssd = DiskOffering.create( cls.apiclient, disk_offering_ssd) else: cls.disk_offering_ssd = cls.disk_offering_ssd[0] if cls.disk_offering_qos is None: cls.disk_offering_qos = DiskOffering.create( cls.apiclient, disk_offering_qos) ResourceDetails.create(cls.apiclient, resourceid=cls.disk_offering_qos.id, resourcetype="DiskOffering", details={"SP_TEMPLATE": "qos"}, fordisplay=True) else: cls.disk_offering_qos = cls.disk_offering_qos[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offering_ssd.id cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) cls.volume_1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume_2 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_ssd.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_1) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_2) cls.template = template cls.hostid = cls.virtual_machine.hostid cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) #check that ROOT disk is created with uuid root_volume = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine3.id, type="ROOT") try: spvolume = cls.spapi.volumeList(volumeName=root_volume[0].id) except spapi.ApiError as err: cfg.logger.info("Root volume is not created with UUID") raise Exception(err) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.random_data_vm_snapshot1 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" volume_attached = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume) cls.helper.write_on_disks(cls.random_data_vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) MemorySnapshot = False cls.vm_snapshot1 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) cls.random_data_vm_snapshot2 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot2 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) #vm snapshot to be deleted without revert cls.random_data_vm_snapshot3 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot3, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot_for_delete = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot( cls.vm_snapshot_for_delete, cls.virtual_machine, cls.test_dir, cls.random_data) cls.snapshot_on_secondary = cls.helper.create_snapshot( False, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_on_secondary) cls.template_on_secondary = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_on_secondary.id) cls._cleanup.append(cls.template_on_secondary) cls.snapshot_bypassed = cls.helper.create_snapshot( True, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_bypassed) cls.template_bypased = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_bypassed.id) cls._cleanup.append(cls.template_bypased) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def _verify_attributes(self, primarystorage_id, primary_storage_url): #Cloudstack Primary storage pool storage_pools_response = list_storage_pools(self.apiClient, id=primarystorage_id) storage_pools_response = storage_pools_response[0] storage_pools_response_type = storage_pools_response.type storage_pools_response_iqn = storage_pools_response.path storage_pools_response_disk = storage_pools_response.disksizetotal storage_pools_response_ipaddress = storage_pools_response.ipaddress storage_pools_response_state = storage_pools_response.state storage_pools_response_iops = storage_pools_response.capacityiops self.assertEqual(storage_pools_response_type, "IscsiLUN", "Failed to create IscsiLUN") self.assertEqual(storage_pools_response_state, "Up", "Primary storage status is down") # Datera app instances datera_primarystorage_name = "cloudstack-" + primarystorage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primarystorage_name: app_instance_response = instance app_instance_response_iqn = (app_instance_response['storage_instances'] ['storage-1']['access']['iqn']) app_instance_response_ipaddress = ( app_instance_response['storage_instances']['storage-1']['access'] ['ips']) app_instance_response_op_state = ( app_instance_response['storage_instances']['storage-1']['volumes'] ['volume-1']['op_state']) app_instance_response_disk = ( app_instance_response['storage_instances']['storage-1']['volumes'] ['volume-1']['size'] * 1073741824) app_instance_response_iops = ( app_instance_response['storage_instances']['storage-1']['volumes'] ['volume-1']['performance_policy']['total_iops_max']) app_instance_response_replica = ( app_instance_response['storage_instances']['storage-1']['volumes'] ['volume-1']['replica_count']) app_instance_response_ippool = ( app_instance_response['storage_instances']['storage-1']['ip_pool']) cs_replica = [ data for data in primary_storage_url.split(";") if data.startswith('replica') ] cs_netPool = [ data for data in primary_storage_url.split(";") if data.startswith('networkPoolName') ] if cs_replica: replica_count = ''.join(cs_replica).split("=")[1] self.assertEqual(int(replica_count), int(app_instance_response_replica), "Incorrect replicas count") if not cs_replica: self.assertEqual(int(3), int(app_instance_response_replica), "Incorrect replicas count") if cs_netPool: cs_netPool = ''.join(cs_netPool).split("=")[1] self.assertEqual( app_instance_response_ippool.split('/')[2], cs_netPool, "Incorrect networkPoolName") if not cs_netPool: self.assertEqual( app_instance_response_ippool.split('/')[2], 'default', "Incorrect networkPool Name") # Verify in sql database command = ("select status,id from storage_pool where uuid='" + primarystorage_id + "'") sql_result = self.dbConnection.execute(command) self.assertEqual(sql_result[0][0], 'Up', "Priamry storage not added in database") command2 = ("select * from storage_pool_details where pool_id='" + str(sql_result[0][1]) + "'") sql_result1 = self.dbConnection.execute(command2) flag = 0 for data in sql_result1: for record in data: if record == datera_primarystorage_name: flag = 1 self.assertEqual(flag, 1, "Priamry storage not added in database") self.assertEqual(app_instance_response_op_state, "available", "datera app instance is down") # xen server details for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == primarystorage_id: xen_server_response = value #xen_server_response_iqn = xen_server_response['name_description'] xen_server_response_type = xen_server_response['type'] xen_server_physical_size = xen_server_response['physical_size'] if (storage_pools_response_ipaddress not in app_instance_response_ipaddress): self.assert_(False, "Storage ip address are different") self.assertEqual(xen_server_response_type, "lvmoiscsi", "Failed to craete lvmoiscsi on xen server") self.assertEqual( storage_pools_response_iqn.split("/")[1], app_instance_response_iqn, "Iqn values mismatch") self.assertEqual(app_instance_response_disk, storage_pools_response_disk, "Disk sizes are different") self.assertEqual( int(xen_server_physical_size) + 12582912, storage_pools_response_disk, "Disk sizes are not same in xen and cloudsatck") self.assertEqual(storage_pools_response_iops, app_instance_response_iops, "IOPS values are incorrect")
def setUpClass(cls): cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.userapiclient = testClient.getUserApiClient( UserName="******", DomainName="ROOT") cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z storpool_primary_storage = { "name": "ssd", "zoneid": cls.zone.id, "url": "ssd", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "ssd" } storpool_service_offerings = { "name": "ssd", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storage_pool = list_storage_pools(cls.apiclient, name='ssd') service_offerings = list_service_offering(cls.apiclient, name='ssd') disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.disk_offerings = disk_offerings[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] template = get_template(cls.apiclient, cls.zone.id, account="system") if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings user = list_users(cls.apiclient, account='StorPoolUser', domainid=cls.domain.id) account = list_accounts(cls.apiclient, id=user[0].accountid) if account is None: role = Role.list(cls.apiclient, name='User') cmd = createAccount.createAccountCmd() cmd.email = '*****@*****.**' cmd.firstname = 'StorPoolUser' cmd.lastname = 'StorPoolUser' cmd.password = '******' cmd.username = '******' cmd.roleid = role[0].id account = cls.apiclient.createAccount(cmd) else: account = account[0] cls.account = account # cls.tmp_files = [] # cls.keypair = SSHKeyPair.create( # cls.apiclient, # name=random_gen() + ".pem", # account=cls.account.name, # domainid=cls.account.domainid) # # keyPairFilePath = tempfile.gettempdir() + os.sep + cls.keypair.name # # Clenaup at end of execution # cls.tmp_files.append(keyPairFilePath) # # cls.debug("File path: %s" % keyPairFilePath) # # f = open(keyPairFilePath, "w+") # f.write(cls.keypair.privatekey) # f.close() # # os.system("chmod 400 " + keyPairFilePath) # # cls.keyPairFilePath = keyPairFilePath cls.volume_1 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-1"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume_2 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-2"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-3"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.virtual_machine = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.virtual_machine2 = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [] cls._cleanup.append(cls.virtual_machine) cls._cleanup.append(cls.virtual_machine2) cls._cleanup.append(cls.volume_1) cls._cleanup.append(cls.volume_2) cls._cleanup.append(cls.volume) return
def setUpCloudStack(cls): super(TestRenameObjectsWithUuids, cls).setUpClass() cls.spapi = spapi.Api.fromConfig(multiCluster=True) cls.helper = HelperUtil(cls) cls._cleanup = [] testClient = super(TestRenameObjectsWithUuids, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z storpool_primary_storage = { "name" : "ssd", "zoneid": cls.zone.id, "url": "ssd", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "ssd" } storpool_service_offerings = { "name": "ssd", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storage_pool = list_storage_pools( cls.apiclient, name='ssd' ) service_offerings = list_service_offering( cls.apiclient, name='ssd' ) disk_offerings = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) cls.disk_offering = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.get_local_cluster(zoneid = cls.zone.id) cls.host = cls.list_hosts_by_cluster_id(cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.get_remote_cluster(zoneid = cls.zone.id) cls.host_remote = cls.list_hosts_by_cluster_id(cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.volume_1 = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-1" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume_1) cls.volume = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid = cls.host_on_local_1.id, rootdisksize=10 ) cls.volume_on_remote = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume_on_remote) cls.virtual_machine_on_remote = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid = cls.host_on_remote1.id, rootdisksize=10 ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def _verify_attributes(self, primarystorage_id, primary_storage_url): #Cloudstack Primary storage pool storage_pools_response = list_storage_pools( self.apiClient, id=primarystorage_id) storage_pools_response = storage_pools_response[0] storage_pools_response_type = storage_pools_response.type storage_pools_response_iqn = storage_pools_response.path storage_pools_response_disk = storage_pools_response.disksizetotal storage_pools_response_ipaddress = storage_pools_response.ipaddress storage_pools_response_state = storage_pools_response.state storage_pools_response_iops = storage_pools_response.capacityiops self.assertEqual( storage_pools_response_type, "IscsiLUN", "Failed to create IscsiLUN") self.assertEqual( storage_pools_response_state, "Up", "Primary storage status is down") # Datera app instances datera_primarystorage_name = "cloudstack-" + primarystorage_id for instance in self.datera_api.app_instances.list(): if instance['name'] == datera_primarystorage_name: app_instance_response = instance app_instance_response_iqn = ( app_instance_response['storage_instances'] ['storage-1']['access']['iqn']) app_instance_response_ipaddress = ( app_instance_response['storage_instances'] ['storage-1']['access']['ips']) app_instance_response_op_state = ( app_instance_response['storage_instances'] ['storage-1']['volumes']['volume-1']['op_state']) app_instance_response_disk = ( app_instance_response['storage_instances'] ['storage-1']['volumes']['volume-1']['size'] * 1073741824) app_instance_response_iops = ( app_instance_response['storage_instances'] ['storage-1']['volumes']['volume-1']['performance_policy'] ['total_iops_max']) app_instance_response_replica = ( app_instance_response['storage_instances'] ['storage-1']['volumes']['volume-1']['replica_count']) app_instance_response_ippool = ( app_instance_response['storage_instances'] ['storage-1']['ip_pool']) cs_replica = [data for data in primary_storage_url.split(";") if data.startswith('replica')] cs_netPool = [data for data in primary_storage_url.split(";") if data.startswith('networkPoolName')] if cs_replica: replica_count = ''.join(cs_replica).split("=")[1] self.assertEqual( int(replica_count), int(app_instance_response_replica), "Incorrect replicas count") if not cs_replica: self.assertEqual( int(3), int(app_instance_response_replica), "Incorrect replicas count") if cs_netPool: cs_netPool = ''.join(cs_netPool).split("=")[1] self.assertEqual( app_instance_response_ippool.split('/')[2], cs_netPool, "Incorrect networkPoolName") if not cs_netPool: self.assertEqual( app_instance_response_ippool.split('/')[2], 'default', "Incorrect networkPool Name") # Verify in sql database command = ( "select status,id from storage_pool where uuid='" + primarystorage_id + "'") sql_result = self.dbConnection.execute(command) self.assertEqual( sql_result[0][0], 'Up', "Priamry storage not added in database") command2 = ( "select * from storage_pool_details where pool_id='" + str(sql_result[0][1]) + "'") sql_result1 = self.dbConnection.execute(command2) flag = 0 for data in sql_result1: for record in data: if record == datera_primarystorage_name: flag = 1 self.assertEqual( flag, 1, "Priamry storage not added in database") self.assertEqual( app_instance_response_op_state, "available", "datera app instance is down") # xen server details for key, value in self.xen_session.xenapi.SR.get_all_records().items(): if value['name_description'] == primarystorage_id: xen_server_response = value #xen_server_response_iqn = xen_server_response['name_description'] xen_server_response_type = xen_server_response['type'] xen_server_physical_size = xen_server_response['physical_size'] if (storage_pools_response_ipaddress not in app_instance_response_ipaddress): self.assert_(False, "Storage ip address are different") self.assertEqual( xen_server_response_type, "lvmoiscsi", "Failed to craete lvmoiscsi on xen server") self.assertEqual( storage_pools_response_iqn.split("/")[1], app_instance_response_iqn, "Iqn values mismatch") self.assertEqual( app_instance_response_disk, storage_pools_response_disk, "Disk sizes are different") self.assertEqual( int(xen_server_physical_size) + 12582912, storage_pools_response_disk, "Disk sizes are not same in xen and cloudsatck") self.assertEqual( storage_pools_response_iops, app_instance_response_iops, "IOPS values are incorrect")
def setUpClass(cls): testClient = super(TestDeployVM, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = get_hypervisor_type(cls.apiclient) # Setup test data td = TestData() cls.testdata = td.testdata # OS template template = cls.testdata[TestData.template] #StorPool primary storages primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) if storage_pool is None: cls.primary_storage = StoragePool.create( cls.apiclient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) else: cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage2.get("name") ) if storage_pool is None: cls.primary_storage2 = StoragePool.create( cls.apiclient, primarystorage2, scope=primarystorage2[TestData.scope], zoneid=cls.zone.id, provider=primarystorage2[TestData.provider], tags=primarystorage2[TestData.tags], capacityiops=primarystorage2[TestData.capacityIops], capacitybytes=primarystorage2[TestData.capacityBytes], hypervisor=primarystorage2[TestData.hypervisor] ) else: cls.primary_storage2 = storage_pool[0] cls.debug(primarystorage) cls.debug(primarystorage2) os_template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if os_template == FAILED: cls.template = Template.register( cls.apiclient, template, cls.zone.id, randomize_name = False ) else: cls.template = os_template cls.debug(template) # Set Zones and disk offerings cls.services["small"]["zoneid"] = cls.zone.id cls.services["small"]["template"] = cls.template.id cls.services["iso1"]["zoneid"] = cls.zone.id cls.account = list_accounts( cls.apiclient, name="admin" )[0] cls.debug(cls.account.id) service_offering_only = list_service_offering( cls.apiclient, name="cloud-test-dev-1" ) cls.service_offering = service_offering_only[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services['mode'] ) cls.cleanup = [ cls.virtual_machine ]
def setUpClass(cls): cls.cloudstacktestclient = super(TestDeployVmRootSize, cls).getClsTestClient() cls.api_client = cls.cloudstacktestclient.getApiClient() cls.hypervisor = cls.cloudstacktestclient.getHypervisorInfo().lower() cls.mgtSvrDetails = cls.config.__dict__["mgtSvr"][0].__dict__ # Get Zone, Domain and Default Built-in template cls.domain = get_domain(cls.api_client) cls.zone = get_zone(cls.api_client, cls.cloudstacktestclient.getZoneForTests()) cls.services = cls.testClient.getParsedTestDataConfig() cls.services["mode"] = cls.zone.networktype cls._cleanup = [] cls.updateclone = False cls.restartreq = False cls.defaultdiskcontroller = "ide" cls.template = get_template(cls.api_client, cls.zone.id) if cls.template == FAILED: assert False, "get_template() failed to return template " #create a user account cls.account = Account.create(cls.api_client, cls.services["account"], domainid=cls.domain.id, admin=True) cls._cleanup.append(cls.account) list_pool_resp = list_storage_pools(cls.api_client, account=cls.account.name, domainid=cls.domain.id) #Identify the storage pool type and set vmware fullclone to # true if storage is VMFS if cls.hypervisor == 'vmware': # please make sure url of templateregister dictionary in # test_data.config pointing to .ova file list_config_storage_response = list_configurations( cls.api_client, name="vmware.root.disk.controller") cls.defaultdiskcontroller = list_config_storage_response[0].value if list_config_storage_response[0].value == "ide" or \ list_config_storage_response[0].value == \ "osdefault": Configurations.update(cls.api_client, "vmware.root.disk.controller", value="scsi") cls.updateclone = True cls.restartreq = True list_config_fullclone_global_response = list_configurations( cls.api_client, name="vmware.create.full.clone") if list_config_fullclone_global_response[0].value == "false": Configurations.update(cls.api_client, "vmware.create.full.clone", value="true") cls.updateclone = True cls.restartreq = True cls.tempobj = Template.register(cls.api_client, cls.services["templateregister"], hypervisor=cls.hypervisor, zoneid=cls.zone.id, account=cls.account.name, domainid=cls.domain.id) cls.tempobj.download(cls.api_client) for strpool in list_pool_resp: if strpool.type.lower() == "vmfs" or strpool.type.lower( ) == "networkfilesystem": list_config_storage_response = list_configurations( cls.api_client, name="vmware.create.full.clone", storageid=strpool.id) res = validateList(list_config_storage_response) if res[2] == INVALID_INPUT: raise Exception("Failed to list configurations ") if list_config_storage_response[0].value == "false": Configurations.update(cls.api_client, "vmware.create.full.clone", value="true", storageid=strpool.id) cls.updateclone = True StoragePool.update(cls.api_client, id=strpool.id, tags="scsi") cls.storageID = strpool.id break if cls.restartreq: cls.restartServer() #create a service offering cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) #build cleanup list cls.services_offering_vmware = ServiceOffering.create( cls.api_client, cls.services["service_offering"], tags="scsi") cls._cleanup.extend( [cls.service_offering, cls.services_offering_vmware])
def test_02_list_snapshots_with_removed_data_store(self): """Test listing volume snapshots with removed data stores """ # 1 - Create new volume -> V # 2 - Create new Primary Storage -> PS # 3 - Attach and detach volume V from vm # 4 - Migrate volume V to PS # 5 - Take volume V snapshot -> S # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed # Create new volume vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.cleanup.append(vol) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list(self.apiclient, id=vol.id) self.assertEqual( validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id # Create new Primary Storage clusters = list_clusters(self.apiclient, zoneid=self.zone.id) assert isinstance(clusters, list) and len(clusters) > 0 storage = StoragePool.create(self.apiclient, self.services["nfs2"], clusterid=clusters[0].id, zoneid=self.zone.id, podid=self.pod.id) self.cleanup.append(self.virtual_machine_with_disk) self.cleanup.append(storage) self.assertEqual(storage.state, 'Up', "Check primary storage state") self.assertEqual(storage.type, 'NetworkFilesystem', "Check storage pool type") storage_pools_response = list_storage_pools(self.apiclient, id=storage.id) self.assertEqual(isinstance(storage_pools_response, list), True, "Check list response returns a valid list") self.assertNotEqual(len(storage_pools_response), 0, "Check list Hosts response") storage_response = storage_pools_response[0] self.assertEqual(storage_response.id, storage.id, "Check storage pool ID") self.assertEqual(storage.type, storage_response.type, "Check storage pool type ") # Attach created volume to vm, then detach it to be able to migrate it self.virtual_machine_with_disk.stop(self.apiclient) self.virtual_machine_with_disk.attach_volume(self.apiclient, vol) self.virtual_machine_with_disk.detach_volume(self.apiclient, vol) # Migrate volume to new Primary Storage Volume.migrate(self.apiclient, storageid=storage.id, volumeid=vol.id) volume_response = list_volumes( self.apiclient, id=vol.id, ) self.assertNotEqual(len(volume_response), 0, "Check list Volumes response") volume_migrated = volume_response[0] self.assertEqual(volume_migrated.storageid, storage.id, "Check volume storage id") # Take snapshot of new volume snapshot = Snapshot.create(self.apiclient, volume_migrated.id, account=self.account.name, domainid=self.account.domainid) self.debug("Snapshot created: ID - %s" % snapshot.id) # Delete volume, VM and created Primary Storage cleanup_resources(self.apiclient, self.cleanup) # List snapshot and verify it gets properly listed although Primary Storage was removed snapshot_response = Snapshot.list(self.apiclient, id=snapshot.id) self.assertNotEqual(len(snapshot_response), 0, "Check list Snapshot response") self.assertEqual(snapshot_response[0].id, snapshot.id, "Check snapshot id") # Delete snapshot and verify it gets properly deleted (should not be listed) self.cleanup = [snapshot] cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] snapshot_response_2 = Snapshot.list(self.apiclient, id=snapshot.id) self.assertEqual(snapshot_response_2, None, "Check list Snapshot response") return
def test_02_list_snapshots_with_removed_data_store(self): """Test listing volume snapshots with removed data stores """ # 1 - Create new volume -> V # 2 - Create new Primary Storage -> PS # 3 - Attach and detach volume V from vm # 4 - Migrate volume V to PS # 5 - Take volume V snapshot -> S # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed # Create new volume vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.cleanup.append(vol) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list( self.apiclient, id=vol.id ) self.assertEqual( validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id # Create new Primary Storage clusters = list_clusters( self.apiclient, zoneid=self.zone.id ) assert isinstance(clusters,list) and len(clusters)>0 storage = StoragePool.create(self.apiclient, self.services["nfs2"], clusterid=clusters[0].id, zoneid=self.zone.id, podid=self.pod.id ) self.cleanup.append(self.virtual_machine_with_disk) self.cleanup.append(storage) self.assertEqual( storage.state, 'Up', "Check primary storage state" ) self.assertEqual( storage.type, 'NetworkFilesystem', "Check storage pool type" ) storage_pools_response = list_storage_pools(self.apiclient, id=storage.id) self.assertEqual( isinstance(storage_pools_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(storage_pools_response), 0, "Check list Hosts response" ) storage_response = storage_pools_response[0] self.assertEqual( storage_response.id, storage.id, "Check storage pool ID" ) self.assertEqual( storage.type, storage_response.type, "Check storage pool type " ) # Attach created volume to vm, then detach it to be able to migrate it self.virtual_machine_with_disk.stop(self.apiclient) self.virtual_machine_with_disk.attach_volume( self.apiclient, vol ) self.virtual_machine_with_disk.detach_volume( self.apiclient, vol ) # Migrate volume to new Primary Storage Volume.migrate(self.apiclient, storageid=storage.id, volumeid=vol.id ) volume_response = list_volumes( self.apiclient, id=vol.id, ) self.assertNotEqual( len(volume_response), 0, "Check list Volumes response" ) volume_migrated = volume_response[0] self.assertEqual( volume_migrated.storageid, storage.id, "Check volume storage id" ) # Take snapshot of new volume snapshot = Snapshot.create( self.apiclient, volume_migrated.id, account=self.account.name, domainid=self.account.domainid ) self.debug("Snapshot created: ID - %s" % snapshot.id) # Delete volume, VM and created Primary Storage cleanup_resources(self.apiclient, self.cleanup) # List snapshot and verify it gets properly listed although Primary Storage was removed snapshot_response = Snapshot.list( self.apiclient, id=snapshot.id ) self.assertNotEqual( len(snapshot_response), 0, "Check list Snapshot response" ) self.assertEqual( snapshot_response[0].id, snapshot.id, "Check snapshot id" ) # Delete snapshot and verify it gets properly deleted (should not be listed) self.cleanup = [snapshot] cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] snapshot_response_2 = Snapshot.list( self.apiclient, id=snapshot.id ) self.assertEqual( snapshot_response_2, None, "Check list Snapshot response" ) return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute("select * from configuration where name='sp.migration.to.global.ids.completed'") cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute("update configuration set value='false' where name='sp.migration.to.global.ids.completed'") cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None service_offering = list_service_offering( cls.apiclient, name="ssd" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering( cls.apiclient, name="ssd2" ) if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts( cls.apiclient, name="admin" ) cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume(cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid = cls.virtual_machine5.id, type = "ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.helper.bypass_secondary(True) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): cls.testClient = super(TestResizeVolume, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.hypervisor = (cls.testClient.getHypervisorInfo()).lower() cls.storageID = None # Fill services from the external config file cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone( cls.api_client, cls.testClient.getZoneForTests()) cls.services["mode"] = cls.zone.networktype cls._cleanup = [] cls.unsupportedStorageType = False cls.unsupportedHypervisorType = False cls.updateclone = False if cls.hypervisor not in ['xenserver',"kvm","vmware"]: cls.unsupportedHypervisorType=True return cls.template = get_template( cls.api_client, cls.zone.id ) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.services["volume"]["zoneid"] = cls.zone.id try: cls.parent_domain = Domain.create(cls.api_client, services=cls.services[ "domain"], parentdomainid=cls.domain.id) cls.parentd_admin = Account.create(cls.api_client, cls.services["account"], admin=True, domainid=cls.parent_domain.id) cls._cleanup.append(cls.parentd_admin) cls._cleanup.append(cls.parent_domain) list_pool_resp = list_storage_pools(cls.api_client, account=cls.parentd_admin.name,domainid=cls.parent_domain.id) res = validateList(list_pool_resp) if res[2]== INVALID_INPUT: raise Exception("Failed to list storage pool-no storagepools found ") #Identify the storage pool type and set vmware fullclone to true if storage is VMFS if cls.hypervisor == 'vmware': for strpool in list_pool_resp: if strpool.type.lower() == "vmfs" or strpool.type.lower()== "networkfilesystem": list_config_storage_response = list_configurations( cls.api_client , name= "vmware.create.full.clone",storageid=strpool.id) res = validateList(list_config_storage_response) if res[2]== INVALID_INPUT: raise Exception("Failed to list configurations ") if list_config_storage_response[0].value == "false": Configurations.update(cls.api_client, "vmware.create.full.clone", value="true",storageid=strpool.id) cls.updateclone = True StoragePool.update(cls.api_client,id=strpool.id,tags="scsi") cls.storageID = strpool.id cls.unsupportedStorageType = False break else: cls.unsupportedStorageType = True # Creating service offering with normal config cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls.services_offering_vmware=ServiceOffering.create( cls.api_client,cls.services["service_offering"],tags="scsi") cls._cleanup.extend([cls.service_offering,cls.services_offering_vmware]) except Exception as e: cls.tearDownClass() return
def setUpCloudStack(cls): super(TestLiveMigration, cls).setUpClass() cls._cleanup = [] testClient = super(TestLiveMigration, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.template_name = cls.testdata[TestData.primaryStorage].get("name") storage_pool = list_storage_pools(cls.apiclient, name=cls.template_name) service_offerings = list_service_offering(cls.apiclient, name=cls.template_name) disk_offerings = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") cls.disk_offerings = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = cls.helper.create_sp_template_and_storage_pool( cls.apiclient, cls.template_name, cls.testdata[TestData.primaryStorage], cls.zone.id) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, cls.testdata[TestData.serviceOffering]) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient, zoneid=cls.zone.id) cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient, zoneid=cls.zone.id) cls.host_remote = cls.helper.list_hosts_by_cluster_id( cls.apiclient, cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.services["domainid"] = cls.domain.id cls.services["zoneid"] = cls.zone.id cls.services["template"] = template.id cls.services["diskofferingid"] = disk_offerings[0].id cls.account = Account.create(cls.apiclient, cls.services["account"], domainid=cls.domain.id) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) cls.volume_1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume_on_remote = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_local_1.id, rootdisksize=10) cls.virtual_machine_on_remote = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_remote1.id, rootdisksize=10) cls.virtual_machine_migr_btw_cl = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_local_1.id, rootdisksize=10) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return