def test_11_migrate_volume_and_change_offering(self): # Validates the following # # 1. Creates a new Volume with a small disk offering # # 2. Migrates the Volume to another primary storage and changes the offering # # 3. Verifies the Volume has new offering when migrated to the new storage. small_offering = list_disk_offering(self.apiclient, name="Small")[0] large_offering = list_disk_offering(self.apiclient, name="Large")[0] volume = Volume.create(self.apiClient, self.services, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=small_offering.id) self.debug("Created a small volume: %s" % volume.id) self.virtual_machine.attach_volume(self.apiclient, volume=volume) if self.virtual_machine.hypervisor == "KVM": self.virtual_machine.stop(self.apiclient) pools = StoragePool.listForMigration(self.apiclient, id=volume.id) pool = None if pools and len(pools) > 0: pool = pools[0] else: raise self.skipTest( "Not enough storage pools found, skipping test") if hasattr(pool, 'tags'): StoragePool.update(self.apiclient, id=pool.id, tags="") self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id)) livemigrate = False if self.virtual_machine.hypervisor.lower( ) == "vmware" or self.virtual_machine.hypervisor.lower( ) == 'xenserver': livemigrate = True Volume.migrate(self.apiclient, volumeid=volume.id, storageid=pool.id, newdiskofferingid=large_offering.id, livemigrate=livemigrate) if self.virtual_machine.hypervisor == "KVM": self.virtual_machine.start(self.apiclient) migrated_vol = Volume.list(self.apiclient, id=volume.id)[0] self.assertEqual(migrated_vol.diskofferingname, large_offering.name, "Offering name did not match with the new one ") return
def test_01_create_disk_offering(self): """Test to create disk offering # Validate the following: # 1. createDiskOfferings should return valid info for new offering # 2. The Cloud Database contains the valid information """ offering_data_domainid = "{0},{1}".format(self.domain_11.id, self.domain_2.id) disk_offering = DiskOffering.create(self.apiclient, self.services["disk_offering"], domainid=offering_data_domainid) self.cleanup.append(disk_offering) self.debug("Created Disk offering with ID: %s" % disk_offering.id) list_disk_response = list_disk_offering(self.apiclient, id=disk_offering.id) self.assertEqual(isinstance(list_disk_response, list), True, "Check list response returns a valid list") self.assertNotEqual(len(list_disk_response), 0, "Check Disk offering is created") disk_response = list_disk_response[0] self.assertEqual(disk_response.displaytext, self.services["disk_offering"]["displaytext"], "Check server displaytext in createDiskOffering") self.assertEqual(disk_response.name, self.services["disk_offering"]["name"], "Check name in createDiskOffering") self.assertItemsEqual(disk_response.domainid.split(","), offering_data_domainid.split(","), "Check domainid in createDiskOffering") return
def test_02_edit_disk_offering(self): """Test to update existing disk offering""" # 1. updateDiskOffering should return a valid information for the updated offering # 2. updateDiskOffering should fail when trying to add child domain but parent domain is # also passed # 3. updateDiskOffering should be able to add new domain to the offering self.debug("Updating disk offering with ID: %s" % self.disk_offering.id) cmd = updateDiskOffering.updateDiskOfferingCmd() # Add parameters for API call cmd.id = self.disk_offering.id input_domainid ="{0},{1},{2}".format(self.domain_1.id, self.domain_11.id, self.domain_2.id) result_domainid = "{0},{1}".format(self.domain_1.id, self.domain_2.id) cmd.domainid = input_domainid self.apiclient.updateDiskOffering(cmd) list_disk_response = list_disk_offering( self.apiclient, id=self.disk_offering.id ) self.assertEqual( isinstance(list_disk_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_disk_response), 0, "Check Disk offering is updated" ) try: self.assertCountEqual( list_disk_response[0].domainid.split(","), input_domainid.split(","), "Check child domainid in updateDiskOffering, should fail" ) self.fail("Child domain added to offering when parent domain already exist. Must be an error.") except AssertionError: self.debug("Child domain check successful") self.assertCountEqual( list_disk_response[0].domainid.split(","), result_domainid.split(","), "Check domainid in updateDiskOffering" ) return
def _get_cs_volume_size_with_hsr(self, cs_volume): disk_size_bytes = cs_volume.size disk_offering_id = cs_volume.diskofferingid disk_offering = list_disk_offering(self.apiClient, id=disk_offering_id)[0] hsr = disk_offering.hypervisorsnapshotreserve disk_size_with_hsr_bytes = disk_size_bytes + (disk_size_bytes * hsr) / 100 disk_size_with_hsr_gb = int(math.ceil(disk_size_with_hsr_bytes / (1024 ** 3))) return disk_size_with_hsr_gb
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) storage_pool2 = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage = storage_pool[0] cls.primary_storage2 = storage_pool2[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering(cls.apiclient, name="ssd2") if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None nfs_service_offerings = { "name": "nfs", "displaytext": "NFS service offerings", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "nfs" } nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary') nfs_service_offering = list_service_offering(cls.apiclient, name='nfs') if nfs_service_offering is None: nfs_service_offering = ServiceOffering.create( cls.apiclient, nfs_service_offerings) else: nfs_service_offering = nfs_service_offering[0] cls.nfs_service_offering = nfs_service_offering cls.nfs_storage_pool = nfs_storage_pool[0] cls.nfs_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.nfs_storage_pool.id) cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering2.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.primary_storage = StoragePool.update(cls.apiclient, id=cls.primary_storage.id, tags=["ssd, nfs, ssd2"]) cls.primary_storage2 = StoragePool.update(cls.apiclient, id=cls.primary_storage2.id, tags=["ssd, ssd2"]) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests") cls.primary_storage = list_storage_pools( cls.apiclient, name=primarystorage.get("name"))[0] cls.primary_storage2 = list_storage_pools( cls.apiclient, name=primarystorage2.get("name"))[0]
def setUpClass(cls): # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() # Setup test data td = TestData() cls.testdata = td.testdata # Get Resources from Cloud Infrastructure cls.domain = get_domain(cls.apiClient) cls.zone = get_zone(cls.apiClient, testclient.getZoneForTests()) cls.cluster = list_clusters(cls.apiClient)[0] cls.hypervisor = get_hypervisor_type(cls.apiClient) cls.template = get_template( cls.apiClient, cls.zone.id, account = "system" ) primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage2.get("name") ) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering( cls.apiClient, name="Small" ) assert disk_offering is not None service_offering = list_service_offering( cls.apiClient, name="cloud-test-dev-1" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiClient, serviceOffering) service_offering_only = list_service_offering( cls.apiClient, name="cloud-test-dev-2" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiClient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts( cls.apiClient, name="admin" ) cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine = VirtualMachine.create( cls.apiClient, {"name":"StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) # Resources that are to be destroyed cls._cleanup = [ cls.virtual_machine, cls.volume ]
def setUpClass(cls): testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) storpool_primary_storage = { "name": "cloudLocal", "zoneid": cls.zone.id, "url": "cloudLocal", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "cloudLocal" } storpool_service_offerings = { "name": "tags", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "test_tags" } storage_pool = list_storage_pools(cls.apiclient, name='cloudLocal') service_offerings = list_service_offering(cls.apiclient, name='tags') disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template(apiclient=cls.apiclient, zone_id=cls.zone.id, template_filter='self', template_name="centos6", domain_id=cls.domain.id, hypervisor=cls.hypervisor) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.volume_1 = Volume.create( cls.apiclient, {"diskname": "StorPoolDisk-1"}, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, ) cls.volume_2 = Volume.create( cls.apiclient, {"diskname": "StorPoolDisk-2"}, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [] cls._cleanup.append(cls.virtual_machine) cls._cleanup.append(cls.volume_1) cls._cleanup.append(cls.volume_2) return
def setUpCloudStack(cls): super(MigrationUuidToGlobalIdLiveMigration, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") cls.disk_offering_20 = disk_offering_20[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.local_cluster = cls.helper.get_local_cluster() cls.host = cls.helper.list_hosts_by_cluster_id(cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster() cls.host_remote = cls.helper.list_hosts_by_cluster_id( cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_local_1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) #vm and volume on remote cls.virtual_machine_remote = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_remote1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine_remote) cls.volume_remote = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume_remote) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.userapiclient = testClient.getUserApiClient( UserName="******", DomainName="ROOT") cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z storpool_primary_storage = { "name": "ssd", "zoneid": cls.zone.id, "url": "ssd", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "ssd" } storpool_service_offerings = { "name": "ssd", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storage_pool = list_storage_pools(cls.apiclient, name='ssd') service_offerings = list_service_offering(cls.apiclient, name='ssd') disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.disk_offerings = disk_offerings[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] template = get_template(cls.apiclient, cls.zone.id, account="system") if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings user = list_users(cls.apiclient, account='StorPoolUser', domainid=cls.domain.id) account = list_accounts(cls.apiclient, id=user[0].accountid) if account is None: role = Role.list(cls.apiclient, name='User') cmd = createAccount.createAccountCmd() cmd.email = '*****@*****.**' cmd.firstname = 'StorPoolUser' cmd.lastname = 'StorPoolUser' cmd.password = '******' cmd.username = '******' cmd.roleid = role[0].id account = cls.apiclient.createAccount(cmd) else: account = account[0] cls.account = account # cls.tmp_files = [] # cls.keypair = SSHKeyPair.create( # cls.apiclient, # name=random_gen() + ".pem", # account=cls.account.name, # domainid=cls.account.domainid) # # keyPairFilePath = tempfile.gettempdir() + os.sep + cls.keypair.name # # Clenaup at end of execution # cls.tmp_files.append(keyPairFilePath) # # cls.debug("File path: %s" % keyPairFilePath) # # f = open(keyPairFilePath, "w+") # f.write(cls.keypair.privatekey) # f.close() # # os.system("chmod 400 " + keyPairFilePath) # # cls.keyPairFilePath = keyPairFilePath cls.volume_1 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-1"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume_2 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-2"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-3"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.virtual_machine = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.virtual_machine2 = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [] cls._cleanup.append(cls.virtual_machine) cls._cleanup.append(cls.virtual_machine2) cls._cleanup.append(cls.volume_1) cls._cleanup.append(cls.volume_2) cls._cleanup.append(cls.volume) return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) with open(cls.ARGS.cfg) as json_text: cfg.logger.info(cls.ARGS.cfg) cfg.logger.info(json_text) conf = json.load(json_text) cfg.logger.info(conf) zone = conf['mgtSvr'][0].get('zone') cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, zone_name=zone) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()]) cfg.logger.info( pprint.pformat("############################ %s" % cls.zone)) storpool_primary_storage = { "name": cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl( url=storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"]) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cls._cleanup.append(cls.service_offering) cfg.logger.info(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()]) storpool_primary_storage2 = { "name": cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage2["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"]) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 cls._cleanup.append(cls.service_offering2) os.killpg(cls.mvn_proc_grp, signal.SIGTERM) time.sleep(30) cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) disk_offering = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.volume8 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine5.id, type="ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create( cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpCloudStack(cls): cls.testClient = super(TestNewPrimaryStorage, cls).getClsTestClient() cls.cleanup = [] cls.apiclient = cls.testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = cls.testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z cls.debug("################## zone %s" % cls.zone) cls.template_name = cls.testdata[TestData.primaryStorage3].get("name") cls.debug("################## template_name %s" % cls.template_name) cls.storage_pool_id = "spStoragePoolId" cls.sp_primary_storage, cls.spapiRemote, cls.spapi = cls.helper.create_sp_template_and_storage_pool( cls.apiclient, cls.template_name, cls.testdata[TestData.primaryStorage3], cls.zone.id) disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.disk_offerings = disk_offerings[0] diskOffering = { "name": cls.template_name, "displaytext": "test new primary storage disk offerigns", "disksize": 128, "tags": cls.template_name, "storagetype": "shared" } cls.sp_disk_offering = DiskOffering.create(cls.apiclient, diskOffering) cls.cleanup.append(cls.sp_disk_offering) sp_offerings = { "name": cls.template_name, "displaytext": cls.template_name, "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "tags": cls.template_name } cls.serviceOfferings = ServiceOffering.create(cls.apiclient, sp_offerings) cls.template = get_template(cls.apiclient, cls.zone.id, account="system")
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") import pprint cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None #=============================================================================== # # service_offering = list_service_offering( # cls.apiclient, # name="tags" # ) # if service_offering is not None: # cls.service_offering = service_offering[0] # else: # cls.service_offering = ServiceOffering.create( # cls.apiclient, # serviceOffering) #=============================================================================== service_offering_only = list_service_offering(cls.apiclient, name="cloud-test-dev-2") if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10) # Resources that are to be destroyed cls._cleanup = [cls.virtual_machine, cls.volume] cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute("select * from configuration where name='sp.migration.to.global.ids.completed'") cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute("update configuration set value='false' where name='sp.migration.to.global.ids.completed'") cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None service_offering = list_service_offering( cls.apiclient, name="ssd" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering( cls.apiclient, name="ssd2" ) if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts( cls.apiclient, name="admin" ) cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume(cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid = cls.virtual_machine5.id, type = "ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.helper.bypass_secondary(True) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpCloudStack(cls): cls._cleanup = [] cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z cls.debug("##################### zone %s" % cls.zone) td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = { "name": "ssd", TestData.scope: "ZONE", "url": "ssd", TestData.provider: "StorPool", "path": "/dev/storpool", TestData.capacityBytes: 2251799813685248, TestData.hypervisor: "KVM" } storpool_without_qos = { "name": "ssd", "displaytext": "ssd", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storpool_qos_template = { "name": "qos", "displaytext": "qos", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } disk_offering_ssd = { "name": "ssd", "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", "disksize": 10, "customizediops": False, "hypervisorsnapshotreserve": 200, TestData.tags: "ssd", "storagetype": "shared" } disk_offering_qos = { "name": "qos", "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", "disksize": 10, "customizediops": False, "hypervisorsnapshotreserve": 200, TestData.tags: "ssd", "storagetype": "shared" } cls.template_name = storpool_primary_storage.get("name") cls.template_name_2 = "qos" storage_pool = list_storage_pools(cls.apiclient, name="ssd") cls.primary_storage = storage_pool[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) service_offerings_ssd = list_service_offering(cls.apiclient, name=cls.template_name) service_offerings_qos = list_service_offering(cls.apiclient, name=cls.template_name_2) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_without_qos) else: service_offerings_ssd = service_offerings_ssd[0] if service_offerings_qos is None: service_offerings_qos = ServiceOffering.create( cls.apiclient, storpool_qos_template) ResourceDetails.create(cls.apiclient, resourceid=service_offerings_qos.id, resourcetype="ServiceOffering", details={"SP_TEMPLATE": "qos"}, fordisplay=True) else: service_offerings_qos = service_offerings_qos[0] cls.service_offering_ssd = service_offerings_ssd cls.service_offering_qos = service_offerings_qos cls.disk_offering_ssd = list_disk_offering(cls.apiclient, name=cls.template_name) cls.disk_offering_qos = list_disk_offering(cls.apiclient, name=cls.template_name_2) if cls.disk_offering_ssd is None: cls.disk_offering_ssd = DiskOffering.create( cls.apiclient, disk_offering_ssd) else: cls.disk_offering_ssd = cls.disk_offering_ssd[0] if cls.disk_offering_qos is None: cls.disk_offering_qos = DiskOffering.create( cls.apiclient, disk_offering_qos) ResourceDetails.create(cls.apiclient, resourceid=cls.disk_offering_qos.id, resourcetype="DiskOffering", details={"SP_TEMPLATE": "qos"}, fordisplay=True) else: cls.disk_offering_qos = cls.disk_offering_qos[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offering_ssd.id cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) cls.volume_1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume_2 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_ssd.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_1) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_2) cls.template = template cls.hostid = cls.virtual_machine.hostid cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) #check that ROOT disk is created with uuid root_volume = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine3.id, type="ROOT") try: spvolume = cls.spapi.volumeList(volumeName=root_volume[0].id) except spapi.ApiError as err: cfg.logger.info("Root volume is not created with UUID") raise Exception(err) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.random_data_vm_snapshot1 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" volume_attached = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume) cls.helper.write_on_disks(cls.random_data_vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) MemorySnapshot = False cls.vm_snapshot1 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) cls.random_data_vm_snapshot2 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot2 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) #vm snapshot to be deleted without revert cls.random_data_vm_snapshot3 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot3, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot_for_delete = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot( cls.vm_snapshot_for_delete, cls.virtual_machine, cls.test_dir, cls.random_data) cls.snapshot_on_secondary = cls.helper.create_snapshot( False, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_on_secondary) cls.template_on_secondary = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_on_secondary.id) cls._cleanup.append(cls.template_on_secondary) cls.snapshot_bypassed = cls.helper.create_snapshot( True, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_bypassed) cls.template_bypased = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_bypassed.id) cls._cleanup.append(cls.template_bypased) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpCloudStack(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.name == cls.getClsConfig().mgtSvr[0].zone: cls.zone = z assert cls.zone is not None cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], accounttype = 1, domainid=cls.domain.id, roleid = 1 ) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="ssd" ) assert disk_offering is not None service_offering_only = list_service_offering( cls.apiclient, name="ssd" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, size=10 ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def test_11_migrate_volume_and_change_offering(self): # Validates the following # # 1. Creates a new Volume with a small disk offering # # 2. Migrates the Volume to another primary storage and changes the offering # # 3. Verifies the Volume has new offering when migrated to the new storage. small_offering = list_disk_offering( self.apiclient, name = "Small" )[0] large_offering = list_disk_offering( self.apiclient, name = "Large" )[0] volume = Volume.create( self.apiClient, self.services, zoneid = self.zone.id, account = self.account.name, domainid = self.account.domainid, diskofferingid = small_offering.id ) self.debug("Created a small volume: %s" % volume.id) self.virtual_machine.attach_volume(self.apiclient, volume=volume) if self.virtual_machine.hypervisor == "KVM": self.virtual_machine.stop(self.apiclient) pools = StoragePool.listForMigration( self.apiclient, id=volume.id ) pool = None if pools and len(pools) > 0: pool = pools[0] else: raise self.skipTest("Not enough storage pools found, skipping test") if hasattr(pool, 'tags'): StoragePool.update(self.apiclient, id=pool.id, tags="") self.debug("Migrating Volume-ID: %s to Pool: %s" % (volume.id, pool.id)) Volume.migrate( self.apiclient, volumeid = volume.id, storageid = pool.id, newdiskofferingid = large_offering.id ) if self.virtual_machine.hypervisor == "KVM": self.virtual_machine.start(self.apiclient ) migrated_vol = Volume.list( self.apiclient, id = volume.id )[0] self.assertEqual( migrated_vol.diskofferingname, large_offering.name, "Offering name did not match with the new one " ) return
def setUpCloudStack(cls): cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestMigrateVMWithVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = cls.testdata[TestData.primaryStorage] cls.template_name = storpool_primary_storage.get("name") storpool_service_offerings = cls.testdata[TestData.serviceOffering] nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary] ceph_service_offerings = cls.testdata[TestData.serviceOfferingsCeph] nfs_disk_offerings = cls.testdata[TestData.nfsDiskOffering] ceph_disk_offerings = cls.testdata[TestData.cephDiskOffering] storage_pool = list_storage_pools(cls.apiclient, name=cls.template_name) nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary') ceph_primary_storage = cls.testdata[TestData.primaryStorage4] cls.ceph_storage_pool = list_storage_pools( cls.apiclient, name=ceph_primary_storage.get("name"))[0] service_offerings = list_service_offering(cls.apiclient, name=cls.template_name) nfs_service_offering = list_service_offering(cls.apiclient, name='nfs') ceph_service_offering = list_service_offering( cls.apiclient, name=ceph_primary_storage.get("name")) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] if nfs_service_offering is None: nfs_service_offering = ServiceOffering.create( cls.apiclient, nfs_service_offerings) else: nfs_service_offering = nfs_service_offering[0] if ceph_service_offering is None: ceph_service_offering = ServiceOffering.create( cls.apiclient, ceph_service_offerings) else: ceph_service_offering = ceph_service_offering[0] nfs_disk_offering = list_disk_offering(cls.apiclient, name="nfs") if nfs_disk_offering is None: nfs_disk_offering = DiskOffering.create(cls.apiclient, nfs_disk_offerings) else: cls.nfs_disk_offering = nfs_disk_offering[0] ceph_disk_offering = list_disk_offering(cls.apiclient, name="ceph") if ceph_disk_offering is None: cls.ceph_disk_offering = DiskOffering.create( cls.apiclient, ceph_disk_offerings) else: cls.ceph_disk_offering = ceph_disk_offering[0] template = get_template(cls.apiclient, cls.zone.id, account="system") cls.nfs_storage_pool = nfs_storage_pool[0] if cls.nfs_storage_pool.state == "Maintenance": cls.nfs_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.nfs_storage_pool.id) if cls.ceph_storage_pool.state == "Maintenance": cls.ceph_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.ceph_storage_pool.id) cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) cls.vm = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=nfs_service_offering.id, diskofferingid=cls.nfs_disk_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.vm2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=ceph_service_offering.id, diskofferingid=cls.ceph_disk_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.nfs_service_offering = nfs_service_offering cls.debug(pprint.pformat(cls.service_offering)) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): cls._cleanup = [] cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestResizeVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = cls.testdata[TestData.primaryStorage] cls.template_name = storpool_primary_storage.get("name") storpool_service_offerings = cls.testdata[TestData.serviceOfferingsIops] storpool_disk_offerings = { "name": "iops", "displaytext": "Testing IOPS on StorPool", "customizediops": True, "storagetype": "shared", "tags" : cls.template_name, } storage_pool = list_storage_pools( cls.apiclient, name = cls.template_name ) service_offerings = list_service_offering( cls.apiclient, name='iops' ) disk_offerings = list_disk_offering( cls.apiclient, name="iops" ) if disk_offerings is None: disk_offerings = DiskOffering.create(cls.apiclient, storpool_disk_offerings, custom = True) else: disk_offerings = disk_offerings[0] cls.disk_offerings = disk_offerings if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] template = get_template( cls.apiclient, cls.zone.id, account = "system" ) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], accounttype = 1, domainid=cls.domain.id, ) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient, zoneid = cls.zone.id) cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient, zoneid = cls.zone.id) cls.host_remote = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.volume_1 = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.volume_2 = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.volume = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.virtual_machine = cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, rootdisksize = 10 ) cls.virtual_machine2= cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, rootdisksize = 10 ) cls.virtual_machine_live_migration_1 = cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, hostid = cls.host_on_local_1.id, rootdisksize = 10 ) cls.virtual_machine_live_migration_2= cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, hostid = cls.host_on_remote1.id, rootdisksize = 10 ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None cls._cleanup = [] zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) storpool_primary_storage = cls.testdata[TestData.primaryStorage] storpool_service_offerings = cls.testdata[TestData.serviceOffering] cls.template_name = storpool_primary_storage.get("name") storage_pool = list_storage_pools(cls.apiclient, name=cls.template_name) service_offerings = list_service_offering(cls.apiclient, name=cls.template_name) disk_offerings = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") cls.disk_offerings = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient, zoneid=cls.zone.id) cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.local_cluster.id) cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient, zoneid=cls.zone.id) cls.host_remote = cls.helper.list_hosts_by_cluster_id( cls.apiclient, cls.remote_cluster.id) cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offerings.id cls.volume_1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host[0].id, rootdisksize=10) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host[0].id, rootdisksize=10) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host[0].id, rootdisksize=10) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): super(TestRenameObjectsWithUuids, cls).setUpClass() cls.spapi = spapi.Api.fromConfig(multiCluster=True) cls.helper = HelperUtil(cls) cls._cleanup = [] testClient = super(TestRenameObjectsWithUuids, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z storpool_primary_storage = { "name" : "ssd", "zoneid": cls.zone.id, "url": "ssd", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "ssd" } storpool_service_offerings = { "name": "ssd", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storage_pool = list_storage_pools( cls.apiclient, name='ssd' ) service_offerings = list_service_offering( cls.apiclient, name='ssd' ) disk_offerings = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) cls.disk_offering = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.get_local_cluster(zoneid = cls.zone.id) cls.host = cls.list_hosts_by_cluster_id(cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.get_remote_cluster(zoneid = cls.zone.id) cls.host_remote = cls.list_hosts_by_cluster_id(cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.volume_1 = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-1" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume_1) cls.volume = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid = cls.host_on_local_1.id, rootdisksize=10 ) cls.volume_on_remote = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume_on_remote) cls.virtual_machine_on_remote = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid = cls.host_on_remote1.id, rootdisksize=10 ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return