def setUpClass(cls): # Set up API client testclient = super(TestPrimaryStorage, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() cls.testdata = TestData().testdata cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) for cluster in list_clusters(cls.apiClient): if cluster.name == cls.testdata[TestData.clusterName]: cls.cluster = cluster cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.cluster.id) for host in cls.xs_pool_master_ip: if host.name == cls.testdata[TestData.hostName]: cls.xs_pool_master_ip = host.ipaddress host_ip = "https://" + cls.xs_pool_master_ip cls.xen_session = XenAPI.Session(host_ip) xenserver = cls.testdata[TestData.xenServer] cls.xen_session.xenapi.login_with_password( xenserver[TestData.username], xenserver[TestData.password]) datera = cls.testdata[TestData.Datera] cls.datera_api = DateraApi(username=datera[TestData.login], password=datera[TestData.password], hostname=datera[TestData.mvip]) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering]) cls._cleanup = [cls.compute_offering]
def setUpClass(cls): # Set up API client testclient = super(TestPrimaryStorage, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() cls.testdata = TestData().testdata cls.zone = get_zone( cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) for cluster in list_clusters(cls.apiClient): if cluster.name == cls.testdata[TestData.clusterName]: cls.cluster = cluster cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) cls.xs_pool_master_ip = list_hosts( cls.apiClient, clusterid=cls.cluster.id) for host in cls.xs_pool_master_ip: if host.name == cls.testdata[TestData.hostName]: cls.xs_pool_master_ip = host.ipaddress host_ip = "https://" + cls.xs_pool_master_ip cls.xen_session = XenAPI.Session(host_ip) xenserver = cls.testdata[TestData.xenServer] cls.xen_session.xenapi.login_with_password( xenserver[TestData.username], xenserver[TestData.password]) datera = cls.testdata[TestData.Datera] cls.datera_api = DateraApi( username=datera[TestData.login], password=datera[TestData.password], hostname=datera[TestData.mvip]) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering] ) cls._cleanup = [cls.compute_offering]
def setUpClass(cls): # Set up API client testclient = super(TestAddRemoveHosts, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress # Set up XenAPI connection host_ip = "https://" + cls.xs_pool_master_ip cls.xen_session = XenAPI.Session(host_ip) xenserver = cls.testdata[TestData.xenServer] cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, cls.testdata[TestData.osType]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata[TestData.account], admin=1 ) # Set up connection to make customized API calls user = User.create( cls.apiClient, cls.testdata[TestData.user], account=cls.account.name, domainid=cls.domain.id ) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering] ) cls._cleanup = [ cls.compute_offering, user, cls.account ]
def setUpClass(cls): testClient = super(TestMultipleVolumeSnapshots, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.testdata = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.hypervisor = cls.testClient.getHypervisorInfo() cls.template = get_template( cls.apiclient, cls.zone.id, cls.testdata["ostype"]) cls._cleanup = [] cls.skiptest = False clus_list = list_clusters(cls.apiclient) if cls.hypervisor.lower() not in ['vmware'] or len(clus_list) < 2: cls.skiptest = True return try: # Create an account cls.account = Account.create( cls.apiclient, cls.testdata["account"], domainid=cls.domain.id ) # Create user api client of the account cls.userapiclient = testClient.getUserApiClient( UserName=cls.account.name, DomainName=cls.account.domain ) # Create Service offering cls.service_offering_zwps = ServiceOffering.create( cls.apiclient, cls.testdata["service_offering"], tags=ZONETAG1 ) cls.disk_offering_zwps = DiskOffering.create( cls.apiclient, cls.testdata["disk_offering"], tags=ZONETAG1 ) cls._cleanup = [ cls.account, cls.service_offering_zwps, cls.disk_offering_zwps, ] except Exception as e: cls.tearDownClass() raise e return
def setUpClass(cls): # Set up API client testclient = super(TestAddRemoveHosts, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata if TestData.hypervisor_type == TestData.xenServer: cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_master_hostname)[0].ipaddress cls._connect_to_hypervisor() # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata[TestData.account], admin=1 ) # Set up connection to make customized API calls user = User.create( cls.apiClient, cls.testdata[TestData.user], account=cls.account.name, domainid=cls.domain.id ) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering] ) cls._cleanup = [ cls.compute_offering, user, cls.account ]
def setUpClass(cls): # Set up API client testclient = super(TestAddRemoveHosts, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata if TestData.hypervisor_type == TestData.xenServer: cls.xs_pool_master_ip = list_hosts( cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name=TestData.xen_server_master_hostname)[0].ipaddress cls._connect_to_hypervisor() # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create(cls.apiClient, cls.testdata[TestData.account], admin=1) # Set up connection to make customized API calls user = User.create(cls.apiClient, cls.testdata[TestData.user], account=cls.account.name, domainid=cls.domain.id) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering]) cls._cleanup = [cls.compute_offering, user, cls.account]
def setUpClass(cls): # Set up API client testclient = super(TestPrimaryStorage, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() cls.testdata = TestData().testdata cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) for cluster in list_clusters(cls.apiClient): if cluster.name == cls.testdata[TestData.clusterName]: cls.cluster = cluster list_template_response = list_templates(cls.apiClient, zoneid=cls.zone.id, templatefilter='all') for templates in list_template_response: if templates.name == cls.testdata[TestData.osName]: cls.template = templates cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) cls.xs_pool_master_ip = list_hosts(cls.apiClient, clusterid=cls.cluster.id) for host in cls.xs_pool_master_ip: if host.name == cls.testdata[TestData.hostName]: cls.xs_pool_master_ip = host.ipaddress host_ip = "https://" + cls.xs_pool_master_ip cls.xen_session = XenAPI.Session(host_ip) xenserver = cls.testdata[TestData.xenServer] cls.xen_session.xenapi.login_with_password( xenserver[TestData.username], xenserver[TestData.password]) datera = cls.testdata[TestData.Datera] cls.datera_api = DateraApi(username=datera[TestData.login], password=datera[TestData.password], hostname=datera[TestData.mvip]) # Create test account cls.account = Account.create(cls.apiClient, cls.testdata[TestData.account], admin=1) # Set up connection to make customized API calls user = User.create(cls.apiClient, cls.testdata[TestData.user], account=cls.account.name, domainid=cls.domain.id) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering]) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering]) cls._cleanup = [ cls.compute_offering, cls.disk_offering, user, cls.account ]
def get_local_cluster(self): storpool_clusterid = subprocess.check_output(['storpool_confshow', 'CLUSTER_ID']) clusterid = storpool_clusterid.split("=") clusters = list_clusters(self.testClass.apiclient) for c in clusters: configuration = list_configurations( self.testClass.apiclient, clusterid = c.id ) for conf in configuration: if conf.name == 'sp.cluster.id' and (conf.value in clusterid[1]): return c
def get_remote_cluster(cls, apiclient, zoneid): storpool_clusterid = subprocess.check_output( ['storpool_confshow', 'CLUSTER_ID']) clusterid = storpool_clusterid.split("=") logging.debug(storpool_clusterid) clusters = list_clusters(apiclient, zoneid=zoneid) for c in clusters: configuration = list_configurations(apiclient, clusterid=c.id) for conf in configuration: if conf.name == 'sp.cluster.id' and (conf.value not in clusterid[1]): return c
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) return
def setUpClass(cls): testClient = super(TestMultipleVolumeSnapshots, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.testdata = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.hypervisor = cls.testClient.getHypervisorInfo() cls.template = get_template(cls.apiclient, cls.zone.id, cls.testdata["ostype"]) cls._cleanup = [] cls.skiptest = False clus_list = list_clusters(cls.apiclient) if cls.hypervisor.lower() not in ['vmware'] or len(clus_list) < 2: cls.skiptest = True return try: # Create an account cls.account = Account.create(cls.apiclient, cls.testdata["account"], domainid=cls.domain.id) # Create user api client of the account cls.userapiclient = testClient.getUserApiClient( UserName=cls.account.name, DomainName=cls.account.domain) # Create Service offering cls.service_offering_zwps = ServiceOffering.create( cls.apiclient, cls.testdata["service_offering"], tags=ZONETAG1) cls.disk_offering_zwps = DiskOffering.create( cls.apiclient, cls.testdata["disk_offering"], tags=ZONETAG1) cls._cleanup = [ cls.account, cls.service_offering_zwps, cls.disk_offering_zwps, ] except Exception as e: cls.tearDownClass() raise e return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) #check that ROOT disk is created with uuid root_volume = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine3.id, type="ROOT") try: spvolume = cls.spapi.volumeList(volumeName=root_volume[0].id) except spapi.ApiError as err: cfg.logger.info("Root volume is not created with UUID") raise Exception(err) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.random_data_vm_snapshot1 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" volume_attached = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume) cls.helper.write_on_disks(cls.random_data_vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) MemorySnapshot = False cls.vm_snapshot1 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) cls.random_data_vm_snapshot2 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot2 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) #vm snapshot to be deleted without revert cls.random_data_vm_snapshot3 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot3, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot_for_delete = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot( cls.vm_snapshot_for_delete, cls.virtual_machine, cls.test_dir, cls.random_data) cls.snapshot_on_secondary = cls.helper.create_snapshot( False, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_on_secondary) cls.template_on_secondary = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_on_secondary.id) cls._cleanup.append(cls.template_on_secondary) cls.snapshot_bypassed = cls.helper.create_snapshot( True, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_bypassed) cls.template_bypased = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_bypassed.id) cls._cleanup.append(cls.template_bypased) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def test_02_list_snapshots_with_removed_data_store(self): """Test listing volume snapshots with removed data stores """ # 1 - Create new volume -> V # 2 - Create new Primary Storage -> PS # 3 - Attach and detach volume V from vm # 4 - Migrate volume V to PS # 5 - Take volume V snapshot -> S # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed # Create new volume vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.cleanup.append(vol) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list(self.apiclient, id=vol.id) self.assertEqual( validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id clusters = list_clusters(self.apiclient, zoneid=self.zone.id) assert isinstance(clusters, list) and len(clusters) > 0 # Attach created volume to vm, then detach it to be able to migrate it self.virtual_machine_with_disk.stop(self.apiclient) self.virtual_machine_with_disk.attach_volume(self.apiclient, vol) # Create new Primary Storage storage = StoragePool.create(self.apiclient, self.services["nfs2"], clusterid=clusters[0].id, zoneid=self.zone.id, podid=self.pod.id) self.cleanup.append(storage) self.assertEqual(storage.state, 'Up', "Check primary storage state") self.assertEqual(storage.type, 'NetworkFilesystem', "Check storage pool type") storage_pools_response = list_storage_pools(self.apiclient, id=storage.id) self.assertEqual(isinstance(storage_pools_response, list), True, "Check list response returns a valid list") self.assertNotEqual(len(storage_pools_response), 0, "Check list Hosts response") storage_response = storage_pools_response[0] self.assertEqual(storage_response.id, storage.id, "Check storage pool ID") self.assertEqual(storage.type, storage_response.type, "Check storage pool type ") self.virtual_machine_with_disk.detach_volume(self.apiclient, vol) # Migrate volume to new Primary Storage Volume.migrate(self.apiclient, storageid=storage.id, volumeid=vol.id) volume_response = list_volumes( self.apiclient, id=vol.id, ) self.assertNotEqual(len(volume_response), 0, "Check list Volumes response") volume_migrated = volume_response[0] self.assertEqual(volume_migrated.storageid, storage.id, "Check volume storage id") # Take snapshot of new volume snapshot = Snapshot.create(self.apiclient, volume_migrated.id, account=self.account.name, domainid=self.account.domainid) self.debug("Snapshot created: ID - %s" % snapshot.id) # Delete volume, VM and created Primary Storage cleanup_resources(self.apiclient, self.cleanup) # List snapshot and verify it gets properly listed although Primary Storage was removed snapshot_response = Snapshot.list(self.apiclient, id=snapshot.id) self.assertNotEqual(len(snapshot_response), 0, "Check list Snapshot response") self.assertEqual(snapshot_response[0].id, snapshot.id, "Check snapshot id") # Delete snapshot and verify it gets properly deleted (should not be listed) self.cleanup = [snapshot] cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] snapshot_response_2 = Snapshot.list(self.apiclient, id=snapshot.id) self.assertEqual(snapshot_response_2, None, "Check list Snapshot response") return
def setUpClass(cls): """ 1. Init ACS API and DB connection 2. Init Datera API connection 3. Create ACS Primary storage 4. Create ACS compute and disk offering. 5. Create ACS data disk without attaching to a VM """ logger.info("Setting up Class") # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() # Setup test data td = TestData() if cls.config.TestData and cls.config.TestData.Path: td.update(cls.config.TestData.Path) cls.testdata = td.testdata # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_name=cls.config.zones[0].name) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Set up datera connection datera = cls.testdata[TestData.Datera] cls.dt_client = get_api(username=datera[TestData.login], password=datera[TestData.password], hostname=datera[TestData.mvip], version="v2") # Create test account cls.account = Account.create(cls.apiClient, cls.testdata["account"], admin=1) # Set up connection to make customized API calls cls.user = User.create(cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor]) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering]) cls.disk_offering_new = DiskOffering.create( cls.apiClient, cls.testdata['testdiskofferings']['newsizeandiopsdo']) cls.supports_resign = cls._get_supports_resign() # Set up hypervisor specific connections if cls.cluster.hypervisortype.lower() == 'xenserver': cls.setUpXenServer() if cls.cluster.hypervisortype.lower() == 'kvm': cls.setUpKVM() # Create 1 data volume_1 cls.volume = Volume.create(cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) # Resources that are to be destroyed cls._cleanup = [ cls.volume, cls.compute_offering, cls.disk_offering, cls.disk_offering_new, cls.user, cls.account ]
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") import pprint cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None #=============================================================================== # # service_offering = list_service_offering( # cls.apiclient, # name="tags" # ) # if service_offering is not None: # cls.service_offering = service_offering[0] # else: # cls.service_offering = ServiceOffering.create( # cls.apiclient, # serviceOffering) #=============================================================================== service_offering_only = list_service_offering(cls.apiclient, name="cloud-test-dev-2") if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10) # Resources that are to be destroyed cls._cleanup = [cls.virtual_machine, cls.volume] cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpClass(cls): # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() # Setup test data td = TestData() cls.testdata = td.testdata # Get Resources from Cloud Infrastructure cls.domain = get_domain(cls.apiClient) cls.zone = get_zone(cls.apiClient, testclient.getZoneForTests()) cls.cluster = list_clusters(cls.apiClient)[0] cls.hypervisor = get_hypervisor_type(cls.apiClient) cls.template = get_template( cls.apiClient, cls.zone.id, account = "system" ) primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage2.get("name") ) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering( cls.apiClient, name="Small" ) assert disk_offering is not None service_offering = list_service_offering( cls.apiClient, name="cloud-test-dev-1" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiClient, serviceOffering) service_offering_only = list_service_offering( cls.apiClient, name="cloud-test-dev-2" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiClient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts( cls.apiClient, name="admin" ) cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine = VirtualMachine.create( cls.apiClient, {"name":"StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) # Resources that are to be destroyed cls._cleanup = [ cls.virtual_machine, cls.volume ]
def setUpClass(cls): # Set up API client testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata xenserver = cls.testdata[TestData.xenServer] # Set up xenAPI connection host_ip = "https://" + \ list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress # Set up XenAPI connection cls.xen_session_1 = XenAPI.Session(host_ip) cls.xen_session_1.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up xenAPI connection host_ip = "https://" + \ list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress # Set up XenAPI connection cls.xen_session_2 = XenAPI.Session(host_ip) cls.xen_session_2.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0] cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0] cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata["account"], admin=1 ) # Set up connection to make customized API calls cls.user = User.create( cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id ) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage ) cls.compute_offering_1 = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering1] ) cls.compute_offering_2 = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering2] ) cls.compute_offering_3 = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering3] ) cls.disk_offering_1 = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering1] ) cls.disk_offering_2 = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering2] ) # Resources that are to be destroyed cls._cleanup = [ cls.compute_offering_1, cls.compute_offering_2, cls.compute_offering_3, cls.disk_offering_1, cls.disk_offering_2, cls.user, cls.account ]
def setUpClass(cls): # Set up API client testclient = super(TestSnapshots, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() td = TestData() if cls.config.TestData and cls.config.TestData.Path: td.update(cls.config.TestData.Path) cls.testdata = td.testdata cls.supports_resign = cls._get_supports_resign() # Set up xenAPI connection hosts = list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId]) xenserver = cls.testdata[TestData.xenServer] for h in hosts: host_ip = "https://" + h.ipaddress try: cls.xen_session = XenAPI.Session(host_ip) cls.xen_session.xenapi.login_with_password( xenserver[TestData.username], xenserver[TestData.password]) break except XenAPI.Failure as e: pass # Set up datera connection datera = cls.testdata[TestData.Datera] cls.dt_client = dfs_sdk.DateraApi(username=datera[TestData.login], password=datera[TestData.password], hostname=datera[TestData.mvip]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template( cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create(cls.apiClient, cls.testdata["account"], admin=1) # Set up connection to make customized API calls cls.user = User.create(cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor]) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering]) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering]) # Resources that are to be destroyed cls._cleanup = [ cls.compute_offering, cls.disk_offering, cls.user, cls.account ]
def setUpCloudStack(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.name == cls.getClsConfig().mgtSvr[0].zone: cls.zone = z assert cls.zone is not None cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], accounttype = 1, domainid=cls.domain.id, roleid = 1 ) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="ssd" ) assert disk_offering is not None service_offering_only = list_service_offering( cls.apiclient, name="ssd" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, size=10 ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpClass(cls): # Set up API client testclient = super(TestUploadDownload, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create(cls.apiClient, cls.testdata[TestData.account], admin=1) # Set up connection to make customized API calls user = User.create(cls.apiClient, cls.testdata[TestData.user], account=cls.account.name, domainid=cls.domain.id) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor]) compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering]) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering], custom=True) # Create VM and volume for tests cls.virtual_machine = VirtualMachine.create( cls.apiClient, cls.testdata[TestData.virtualMachine], accountid=cls.account.name, zoneid=cls.zone.id, serviceofferingid=compute_offering.id, templateid=cls.template.id, domainid=cls.domain.id, startvm=True) cls._cleanup = [compute_offering, cls.disk_offering, user, cls.account]
def setUpClass(cls): # Set up API client testclient = super(TestManagedSystemVMs, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata cls._connect_to_hypervisor() # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = Zone(get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]).__dict__) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata["account"], admin=1 ) # Set up connection to make customized API calls cls.user = User.create( cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id ) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering] ) systemoffering = cls.testdata[TestData.systemOffering] systemoffering[TestData.name] = "Managed SSVM" systemoffering['systemvmtype'] = "secondarystoragevm" cls.secondary_storage_offering = ServiceOffering.create( cls.apiClient, systemoffering ) systemoffering[TestData.name] = "Managed CPVM" systemoffering['systemvmtype'] = "consoleproxy" cls.console_proxy_offering = ServiceOffering.create( cls.apiClient, systemoffering ) systemoffering[TestData.name] = "Managed VR" systemoffering['systemvmtype'] = "domainrouter" cls.virtual_router_offering = ServiceOffering.create( cls.apiClient, systemoffering ) # Resources that are to be destroyed cls._cleanup = [ cls.secondary_storage_offering, cls.console_proxy_offering, cls.virtual_router_offering, cls.compute_offering, cls.user, cls.account ]
def setUpClass(cls): # Set up API client testclient = super(TestPrimaryStorage, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() cls.testdata = TestData().testdata cls.zone = get_zone( cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) for cluster in list_clusters(cls.apiClient): if cluster.name == cls.testdata[TestData.clusterName]: cls.cluster = cluster list_template_response = list_templates(cls.apiClient, zoneid=cls.zone.id, templatefilter='all') for templates in list_template_response: if templates.name == cls.testdata[TestData.osName]: cls.template = templates cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) cls.xs_pool_master_ip = list_hosts( cls.apiClient, clusterid=cls.cluster.id) for host in cls.xs_pool_master_ip: if host.name == cls.testdata[TestData.hostName]: cls.xs_pool_master_ip = host.ipaddress host_ip = "https://" + cls.xs_pool_master_ip cls.xen_session = XenAPI.Session(host_ip) xenserver = cls.testdata[TestData.xenServer] cls.xen_session.xenapi.login_with_password( xenserver[TestData.username], xenserver[TestData.password]) datera = cls.testdata[TestData.Datera] cls.datera_api = DateraApi( username=datera[TestData.login], password=datera[TestData.password], hostname=datera[TestData.mvip]) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata[TestData.account], admin=1 ) # Set up connection to make customized API calls user = User.create( cls.apiClient, cls.testdata[TestData.user], account=cls.account.name, domainid=cls.domain.id ) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering] ) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering] ) cls._cleanup = [ cls.compute_offering, cls.disk_offering, user, cls.account ]
def setUpClass(cls): # Set up API client testclient = super(TestUploadDownload, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata[TestData.account], admin=1 ) # Set up connection to make customized API calls user = User.create( cls.apiClient, cls.testdata[TestData.user], account=cls.account.name, domainid=cls.domain.id ) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering] ) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering], custom=True ) # Create VM and volume for tests cls.virtual_machine = VirtualMachine.create( cls.apiClient, cls.testdata[TestData.virtualMachine], accountid=cls.account.name, zoneid=cls.zone.id, serviceofferingid=compute_offering.id, templateid=cls.template.id, domainid=cls.domain.id, startvm=True ) cls._cleanup = [ compute_offering, cls.disk_offering, user, cls.account ]
def setUpClass(cls): # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata cls.supports_resign = True sf_util.set_supports_resign(cls.supports_resign, cls.dbConnection) # Set up xenAPI connection host_ip = ( "https://" + list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId], name="XenServer-6.5-1")[0].ipaddress ) # Set up XenAPI connection cls.xen_session = XenAPI.Session(host_ip) xenserver = cls.testdata[TestData.xenServer] cls.xen_session.xenapi.login_with_password(xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection cls.sf_client = sf_api.SolidFireAPI(endpoint_dict=cls.testdata[TestData.solidFire]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, template_name=cls.testdata[TestData.templateName]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create(cls.apiClient, cls.testdata["account"], admin=1) # Set up connection to make customized API calls cls.user = User.create(cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor], ) cls.compute_offering = ServiceOffering.create(cls.apiClient, cls.testdata[TestData.computeOffering]) cls.disk_offering = DiskOffering.create(cls.apiClient, cls.testdata[TestData.diskOffering]) # Create VM and volume for tests cls.virtual_machine = VirtualMachine.create( cls.apiClient, cls.testdata[TestData.virtualMachine], accountid=cls.account.name, zoneid=cls.zone.id, serviceofferingid=cls.compute_offering.id, templateid=cls.template.id, domainid=cls.domain.id, startvm=True, ) cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) # Resources that are to be destroyed cls._cleanup = [cls.volume, cls.virtual_machine, cls.compute_offering, cls.disk_offering, cls.user, cls.account]
def setUpClass(cls): # Set up API client testclient = super(TestVMMigrationWithStorage, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata xenserver = cls.testdata[TestData.xenServer] # Set up xenAPI connection host_ip = "https://" + \ list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId1], name="XenServer-6.5-1")[0].ipaddress # Set up XenAPI connection cls.xen_session_1 = XenAPI.Session(host_ip) cls.xen_session_1.xenapi.login_with_password( xenserver[TestData.username], xenserver[TestData.password]) # Set up xenAPI connection host_ip = "https://" + \ list_hosts(cls.apiClient, clusterid=cls.testdata[TestData.clusterId2], name="XenServer-6.5-3")[0].ipaddress # Set up XenAPI connection cls.xen_session_2 = XenAPI.Session(host_ip) cls.xen_session_2.xenapi.login_with_password( xenserver[TestData.username], xenserver[TestData.password]) # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster_1 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId1])[0] cls.cluster_2 = list_clusters(cls.apiClient, id=cls.testdata[TestData.clusterId2])[0] cls.template = get_template(cls.apiClient, cls.zone.id, cls.configData["ostype"]) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create(cls.apiClient, cls.testdata["account"], admin=1) # Set up connection to make customized API calls cls.user = User.create(cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create(cls.apiClient, primarystorage) cls.compute_offering_1 = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering1]) cls.compute_offering_2 = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering2]) cls.compute_offering_3 = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering3]) cls.disk_offering_1 = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering1]) cls.disk_offering_2 = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering2]) # Resources that are to be destroyed cls._cleanup = [ cls.compute_offering_1, cls.compute_offering_2, cls.compute_offering_3, cls.disk_offering_1, cls.disk_offering_2, cls.user, cls.account ]
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) storage_pool2 = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage = storage_pool[0] cls.primary_storage2 = storage_pool2[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering(cls.apiclient, name="ssd2") if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None nfs_service_offerings = { "name": "nfs", "displaytext": "NFS service offerings", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "nfs" } nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary') nfs_service_offering = list_service_offering(cls.apiclient, name='nfs') if nfs_service_offering is None: nfs_service_offering = ServiceOffering.create( cls.apiclient, nfs_service_offerings) else: nfs_service_offering = nfs_service_offering[0] cls.nfs_service_offering = nfs_service_offering cls.nfs_storage_pool = nfs_storage_pool[0] cls.nfs_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.nfs_storage_pool.id) cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering2.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.primary_storage = StoragePool.update(cls.apiclient, id=cls.primary_storage.id, tags=["ssd, nfs, ssd2"]) cls.primary_storage2 = StoragePool.update(cls.apiclient, id=cls.primary_storage2.id, tags=["ssd, ssd2"]) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests") cls.primary_storage = list_storage_pools( cls.apiclient, name=primarystorage.get("name"))[0] cls.primary_storage2 = list_storage_pools( cls.apiclient, name=primarystorage2.get("name"))[0]
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute("select * from configuration where name='sp.migration.to.global.ids.completed'") cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute("update configuration set value='false' where name='sp.migration.to.global.ids.completed'") cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None service_offering = list_service_offering( cls.apiclient, name="ssd" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering( cls.apiclient, name="ssd2" ) if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts( cls.apiclient, name="admin" ) cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume(cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid = cls.virtual_machine5.id, type = "ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.helper.bypass_secondary(True) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) with open(cls.ARGS.cfg) as json_text: cfg.logger.info(cls.ARGS.cfg) cfg.logger.info(json_text) conf = json.load(json_text) cfg.logger.info(conf) zone = conf['mgtSvr'][0].get('zone') cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, zone_name=zone) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()]) cfg.logger.info( pprint.pformat("############################ %s" % cls.zone)) storpool_primary_storage = { "name": cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl( url=storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"]) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cls._cleanup.append(cls.service_offering) cfg.logger.info(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()]) storpool_primary_storage2 = { "name": cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage2["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"]) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 cls._cleanup.append(cls.service_offering2) os.killpg(cls.mvn_proc_grp, signal.SIGTERM) time.sleep(30) cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) disk_offering = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.volume8 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine5.id, type="ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create( cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def test_02_list_snapshots_with_removed_data_store(self): """Test listing volume snapshots with removed data stores """ # 1) Create new Primary Storage clusters = list_clusters(self.apiclient, zoneid=self.zone.id) assert isinstance(clusters, list) and len(clusters) > 0 storage = StoragePool.create(self.apiclient, self.services["nfs"], clusterid=clusters[0].id, zoneid=self.zone.id, podid=self.pod.id) self.assertEqual(storage.state, 'Up', "Check primary storage state") self.assertEqual(storage.type, 'NetworkFilesystem', "Check storage pool type") storage_pools_response = list_storage_pools(self.apiclient, id=storage.id) self.assertEqual(isinstance(storage_pools_response, list), True, "Check list response returns a valid list") self.assertNotEqual(len(storage_pools_response), 0, "Check list Hosts response") storage_response = storage_pools_response[0] self.assertEqual(storage_response.id, storage.id, "Check storage pool ID") self.assertEqual(storage.type, storage_response.type, "Check storage pool type ") # 2) Migrate VM ROOT volume to new Primary Storage volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine_with_disk.id, type='ROOT', listall=True) Volume.migrate(self.apiclient, storageid=storage.id, volumeid=volumes[0].id, livemigrate="true") volume_response = list_volumes( self.apiclient, id=volumes[0].id, ) self.assertNotEqual(len(volume_response), 0, "Check list Volumes response") volume_migrated = volume_response[0] self.assertEqual(volume_migrated.storageid, storage.id, "Check volume storage id") self.cleanup.append(self.virtual_machine_with_disk) self.cleanup.append(storage) # 3) Take snapshot of VM ROOT volume snapshot = Snapshot.create(self.apiclient, volume_migrated.id, account=self.account.name, domainid=self.account.domainid) self.debug("Snapshot created: ID - %s" % snapshot.id) # 4) Delete VM and created Primery Storage cleanup_resources(self.apiclient, self.cleanup) # 5) List snapshot and verify it gets properly listed although Primary Storage was removed snapshot_response = Snapshot.list(self.apiclient, id=snapshot.id) self.assertNotEqual(len(snapshot_response), 0, "Check list Snapshot response") self.assertEqual(snapshot_response[0].id, snapshot.id, "Check snapshot id") # 6) Delete snapshot and verify it gets properly deleted (should not be listed) self.cleanup = [snapshot] cleanup_resources(self.apiclient, self.cleanup) snapshot_response_2 = Snapshot.list(self.apiclient, id=snapshot.id) self.assertEqual(snapshot_response_2, None, "Check list Snapshot response") return
def setUpClass(cls): # Set up API client testclient = super(TestLinstorVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create( cls.apiClient, cls.testdata["account"], admin=1 ) # Set up connection to make customized API calls cls.user = User.create( cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id ) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], hypervisor=primarystorage[TestData.hypervisor] ) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering] ) cls.disk_offering = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOffering] ) if cls.testdata[TestData.migrationTests]: primarystorage_sameinst = cls.testdata[TestData.primaryStorageSameInstance] cls.primary_storage_same_inst = StoragePool.create( cls.apiClient, primarystorage_sameinst, scope=primarystorage_sameinst[TestData.scope], zoneid=cls.zone.id, provider=primarystorage_sameinst[TestData.provider], tags=primarystorage_sameinst[TestData.tags], hypervisor=primarystorage_sameinst[TestData.hypervisor] ) primarystorage_distinctinst = cls.testdata[TestData.primaryStorageDistinctInstance] cls.primary_storage_distinct_inst = StoragePool.create( cls.apiClient, primarystorage_distinctinst, scope=primarystorage_distinctinst[TestData.scope], zoneid=cls.zone.id, provider=primarystorage_distinctinst[TestData.provider], tags=primarystorage_distinctinst[TestData.tags], hypervisor=primarystorage_distinctinst[TestData.hypervisor] ) cls.disk_offering_same_inst = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOfferingSameInstance] ) cls.disk_offering_distinct_inst = DiskOffering.create( cls.apiClient, cls.testdata[TestData.diskOfferingDistinctInstance] ) # Create VM and volume for tests cls.virtual_machine = VirtualMachine.create( cls.apiClient, cls.testdata[TestData.virtualMachine], accountid=cls.account.name, zoneid=cls.zone.id, serviceofferingid=cls.compute_offering.id, templateid=cls.template.id, domainid=cls.domain.id, startvm=False ) TestLinstorVolumes._start_vm(cls.virtual_machine) cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) # Resources that are to be destroyed cls._cleanup = [ cls.volume, cls.virtual_machine, cls.compute_offering, cls.disk_offering, cls.user, cls.account ]
def setUpCloudStack(cls): super(MigrationUuidToGlobalIdLiveMigration, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") cls.disk_offering_20 = disk_offering_20[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.local_cluster = cls.helper.get_local_cluster() cls.host = cls.helper.list_hosts_by_cluster_id(cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster() cls.host_remote = cls.helper.list_hosts_by_cluster_id( cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_local_1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) #vm and volume on remote cls.virtual_machine_remote = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_remote1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine_remote) cls.volume_remote = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume_remote) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): # Set up API client testclient = super(TestOnlineStorageMigration, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.configData = testclient.getParsedTestDataConfig() cls.dbConnection = testclient.getDbConnection() cls.testdata = TestData().testdata sf_util.set_supports_resign(True, cls.dbConnection) cls._connect_to_hypervisor() # Set up SolidFire connection solidfire = cls.testdata[TestData.solidFire] cls.sfe = ElementFactory.create(solidfire[TestData.mvip], solidfire[TestData.username], solidfire[TestData.password]) # Get Resources from Cloud Infrastructure cls.zone = get_zone(cls.apiClient, zone_id=cls.testdata[TestData.zoneId]) cls.cluster = list_clusters(cls.apiClient)[0] cls.template = get_template(cls.apiClient, cls.zone.id, hypervisor=TestData.hypervisor_type) cls.domain = get_domain(cls.apiClient, cls.testdata[TestData.domainId]) # Create test account cls.account = Account.create(cls.apiClient, cls.testdata["account"], admin=1) # Set up connection to make customized API calls cls.user = User.create(cls.apiClient, cls.testdata["user"], account=cls.account.name, domainid=cls.domain.id) url = cls.testdata[TestData.url] api_url = "http://" + url + ":8080/client/api" userkeys = User.registerUserKeys(cls.apiClient, cls.user.id) cls.cs_api = SignedAPICall.CloudStack(api_url, userkeys.apikey, userkeys.secretkey) primarystorage = cls.testdata[TestData.primaryStorage] cls.primary_storage = StoragePool.create( cls.apiClient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, podid=cls.testdata[TestData.podId], clusterid=cls.cluster.id, tags=primarystorage[TestData.tags]) primarystorage2 = cls.testdata[TestData.primaryStorage2] cls.primary_storage_2 = StoragePool.create( cls.apiClient, primarystorage2, scope=primarystorage2[TestData.scope], zoneid=cls.zone.id, provider=primarystorage2[TestData.provider], tags=primarystorage2[TestData.tags], capacityiops=primarystorage2[TestData.capacityIops], capacitybytes=primarystorage2[TestData.capacityBytes], hypervisor=primarystorage2[TestData.hypervisor]) cls.compute_offering = ServiceOffering.create( cls.apiClient, cls.testdata[TestData.computeOffering]) # Resources that are to be destroyed cls._cleanup = [cls.compute_offering, cls.user, cls.account]
def test_02_list_snapshots_with_removed_data_store(self): """Test listing volume snapshots with removed data stores """ # 1 - Create new volume -> V # 2 - Create new Primary Storage -> PS # 3 - Attach and detach volume V from vm # 4 - Migrate volume V to PS # 5 - Take volume V snapshot -> S # 6 - List snapshot and verify it gets properly listed although Primary Storage was removed # Create new volume vol = Volume.create( self.apiclient, self.services["volume"], diskofferingid=self.disk_offering.id, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, ) self.cleanup.append(vol) self.assertIsNotNone(vol, "Failed to create volume") vol_res = Volume.list( self.apiclient, id=vol.id ) self.assertEqual( validateList(vol_res)[0], PASS, "Invalid response returned for list volumes") vol_uuid = vol_res[0].id # Create new Primary Storage clusters = list_clusters( self.apiclient, zoneid=self.zone.id ) assert isinstance(clusters,list) and len(clusters)>0 storage = StoragePool.create(self.apiclient, self.services["nfs2"], clusterid=clusters[0].id, zoneid=self.zone.id, podid=self.pod.id ) self.cleanup.append(self.virtual_machine_with_disk) self.cleanup.append(storage) self.assertEqual( storage.state, 'Up', "Check primary storage state" ) self.assertEqual( storage.type, 'NetworkFilesystem', "Check storage pool type" ) storage_pools_response = list_storage_pools(self.apiclient, id=storage.id) self.assertEqual( isinstance(storage_pools_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(storage_pools_response), 0, "Check list Hosts response" ) storage_response = storage_pools_response[0] self.assertEqual( storage_response.id, storage.id, "Check storage pool ID" ) self.assertEqual( storage.type, storage_response.type, "Check storage pool type " ) # Attach created volume to vm, then detach it to be able to migrate it self.virtual_machine_with_disk.stop(self.apiclient) self.virtual_machine_with_disk.attach_volume( self.apiclient, vol ) self.virtual_machine_with_disk.detach_volume( self.apiclient, vol ) # Migrate volume to new Primary Storage Volume.migrate(self.apiclient, storageid=storage.id, volumeid=vol.id ) volume_response = list_volumes( self.apiclient, id=vol.id, ) self.assertNotEqual( len(volume_response), 0, "Check list Volumes response" ) volume_migrated = volume_response[0] self.assertEqual( volume_migrated.storageid, storage.id, "Check volume storage id" ) # Take snapshot of new volume snapshot = Snapshot.create( self.apiclient, volume_migrated.id, account=self.account.name, domainid=self.account.domainid ) self.debug("Snapshot created: ID - %s" % snapshot.id) # Delete volume, VM and created Primary Storage cleanup_resources(self.apiclient, self.cleanup) # List snapshot and verify it gets properly listed although Primary Storage was removed snapshot_response = Snapshot.list( self.apiclient, id=snapshot.id ) self.assertNotEqual( len(snapshot_response), 0, "Check list Snapshot response" ) self.assertEqual( snapshot_response[0].id, snapshot.id, "Check snapshot id" ) # Delete snapshot and verify it gets properly deleted (should not be listed) self.cleanup = [snapshot] cleanup_resources(self.apiclient, self.cleanup) self.cleanup = [] snapshot_response_2 = Snapshot.list( self.apiclient, id=snapshot.id ) self.assertEqual( snapshot_response_2, None, "Check list Snapshot response" ) return