def test_01_create_service_offering(self): """Test to create service offering""" # Validate the following: # 1. createServiceOfferings should return a valid information # for newly created offering # 2. The Cloud Database contains the valid information service_offering = ServiceOffering.create( self.apiclient, self.services["service_offerings"]["tiny"] ) self.cleanup.append(service_offering) self.debug( "Created service offering with ID: %s" % service_offering.id) list_service_response = list_service_offering( self.apiclient, id=service_offering.id ) self.assertEqual( isinstance(list_service_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_service_response), 0, "Check Service offering is created" ) self.assertEqual( list_service_response[0].cpunumber, self.services["service_offerings"]["tiny"]["cpunumber"], "Check server id in createServiceOffering" ) self.assertEqual( list_service_response[0].cpuspeed, self.services["service_offerings"]["tiny"]["cpuspeed"], "Check cpuspeed in createServiceOffering" ) self.assertEqual( list_service_response[0].displaytext, self.services["service_offerings"]["tiny"]["displaytext"], "Check server displaytext in createServiceOfferings" ) self.assertEqual( list_service_response[0].memory, self.services["service_offerings"]["tiny"]["memory"], "Check memory in createServiceOffering" ) self.assertEqual( list_service_response[0].name, self.services["service_offerings"]["tiny"]["name"], "Check name in createServiceOffering" ) return
def test_02_create_iops_offering(self): """Test to create service io burst offering""" # Validate the following: # 1. createServiceOfferings should return a valid information # for newly created offering # 2. The Cloud Database contains the valid information svcs = self.services["service_offerings"]["tiny"] kws = {} for key in self.services["ioburst"]: if str(key).startswith("bytes") or str(key).startswith("iops"): kws[key] = self.services["ioburst"][key] else: svcs[key] = self.services["ioburst"][key] service_offering = ServiceOffering.create( self.apiclient, svcs, None, None, **kws ) self.cleanup.append(service_offering) self.debug( "Created service offering with ID: %s" % service_offering.id) list_service_response = list_service_offering( self.apiclient, id=service_offering.id ) self.assertEqual( isinstance(list_service_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_service_response), 0, "Check Service offering is created" ) for key in kws: k = str(key) mapped = 'disk' + k[:1].upper() + k[1:] self.assertEqual( list_service_response[0][mapped], kws[key], "Check " + str(key) + " => " + str(mapped) + " in createServiceOffering" ) return
def test_02_edit_domain_service_offering(self): """Test to update existing service offering""" # Validate the following: # 1. updateServiceOffering should return a valid information for the updated offering # 2. updateServiceOffering should fail when trying to add child domain but parent domain is # also passed # 3. updateServiceOffering should be able to add new domain to the offering self.debug("Updating service offering with ID: %s" % self.service_offering.id) cmd = updateServiceOffering.updateServiceOfferingCmd() # Add parameters for API call cmd.id = self.service_offering.id input_domainid ="{0},{1},{2}".format(self.domain_1.id, self.domain_11.id, self.domain_2.id) result_domainid = "{0},{1}".format(self.domain_1.id, self.domain_2.id) cmd.domainid = input_domainid self.apiclient.updateServiceOffering(cmd) list_service_response = list_service_offering( self.apiclient, id=self.service_offering.id ) self.assertEqual( isinstance(list_service_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_service_response), 0, "Check Service offering is updated" ) try: self.assertCountEqual( list_service_response[0].domainid.split(","), input_domainid.split(","), "Check child domainid in updateServiceOffering, should fail" ) self.fail("Child domain added to offering when parent domain already exist. Must be an error.") except AssertionError: self.debug("Child domain check successful") self.assertCountEqual( list_service_response[0].domainid.split(","), result_domainid.split(","), "Check domainid in updateServiceOffering" ) return
def delete_vgpu_service_offering(self, serviceoffering): """ delete vGPU Service Offering """ serviceoffering.delete(self.apiclient) list_service_response = list_service_offering(self.apiclient, id=serviceoffering.id) self.assertEqual( list_service_response, None, "Check if service offering exists in listServiceOfferings") return
def test_03_create_service_offering_with_cache_mode_type(self): """Test to create service offering with each one of the valid cache mode types : none, writeback and writethrough""" # Validate the following: # 1. createServiceOfferings should return a valid information # for newly created offering # 2. The Cloud Database contains the valid information cache_mode_types = ["none", "writeback", "writethrough"] for i in range(3): service_offering = ServiceOffering.create( self.apiclient, self.services["service_offerings"]["tiny"], cacheMode=cache_mode_types[i]) self.cleanup.append(service_offering) self.debug("Created service offering with ID: %s" % service_offering.id) list_service_response = list_service_offering( self.apiclient, id=service_offering.id) self.assertEqual(isinstance(list_service_response, list), True, "Check list response returns a valid list") self.assertNotEqual(len(list_service_response), 0, "Check Service offering is created") self.assertEqual( list_service_response[0].cpunumber, self.services["service_offerings"]["tiny"]["cpunumber"], "Check server id in createServiceOffering") self.assertEqual( list_service_response[0].cpuspeed, self.services["service_offerings"]["tiny"]["cpuspeed"], "Check cpuspeed in createServiceOffering") self.assertEqual( list_service_response[0].displaytext, self.services["service_offerings"]["tiny"]["displaytext"], "Check server displaytext in createServiceOfferings") self.assertEqual( list_service_response[0].memory, self.services["service_offerings"]["tiny"]["memory"], "Check memory in createServiceOffering") self.assertEqual( list_service_response[0].name, self.services["service_offerings"]["tiny"]["name"], "Check name in createServiceOffering") self.assertEqual(list_service_response[0].cacheMode, cache_mode_types[i], "Check cacheMode in createServiceOffering") return
def test_02_edit_service_offering(self): """Test to update existing service offering""" # Validate the following: # 1. updateServiceOffering should return # a valid information for newly created offering # Generate new name & displaytext from random data random_displaytext = random_gen() random_name = random_gen() self.debug("Updating service offering with ID: %s" % self.service_offering_1.id) cmd = updateServiceOffering.updateServiceOfferingCmd() # Add parameters for API call cmd.id = self.service_offering_1.id cmd.displaytext = random_displaytext cmd.name = random_name self.apiclient.updateServiceOffering(cmd) list_service_response = list_service_offering( self.apiclient, id=self.service_offering_1.id ) self.assertEqual( isinstance(list_service_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_service_response), 0, "Check Service offering is updated" ) self.assertEqual( list_service_response[0].displaytext, random_displaytext, "Check server displaytext in updateServiceOffering" ) self.assertEqual( list_service_response[0].name, random_name, "Check server name in updateServiceOffering" ) return
def delete_vgpu_service_offering(self,serviceoffering): """ delete vGPU Service Offering """ serviceoffering.delete(self.apiclient) list_service_response = list_service_offering( self.apiclient, id=serviceoffering.id ) self.assertEqual( list_service_response, None, "Check if service offering exists in listServiceOfferings" ) return
def test_03_delete_service_offering(self): """Test to delete service offering""" # Validate the following: # 1. deleteServiceOffering should return # a valid information for newly created offering self.debug("Deleting service offering with ID: %s" % self.service_offering_2.id) self.service_offering_2.delete(self.apiclient) list_service_response = list_service_offering( self.apiclient, id=self.service_offering_2.id) self.assertEqual( list_service_response, None, "Check if service offering exists in listDiskOfferings") return
def test_01_vm_deployment_with_compute_offering_with_ha_enabled(self): """ Test VM deployments (Create HA enabled Compute Service Offering and VM) """ # Steps, #1. Create a Compute service offering with the 'Offer HA' option selected. #2. Create a Guest VM with the compute service offering created above. # Validations, #1. Ensure that the offering is created and that in the UI the 'Offer HA' field is enabled (Yes) #The listServiceOffering API should list 'offerha' as true. #2. Select the newly created VM and ensure that the Compute offering field value lists the compute service offering that was selected. # Also, check that the HA Enabled field is enabled 'Yes'. #list and validate above created service offering with Ha enabled list_service_response = list_service_offering( self.apiclient, id=self.service_offering_with_ha.id) self.assertEqual( isinstance(list_service_response, list), True, "listServiceOfferings returned invalid object in response.") self.assertNotEqual(len(list_service_response), 0, "listServiceOfferings returned empty list.") self.assertEqual(list_service_response[0].offerha, True, "The service offering is not HA enabled") #create virtual machine with the service offering with Ha enabled virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_with_ha.id) vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual( isinstance(vms, list), True, "listVirtualMachines returned invalid object in response.") self.assertNotEqual(len(vms), 0, "listVirtualMachines returned empty list.") self.debug("Deployed VM on host: %s" % vms[0].hostid) self.assertEqual(vms[0].haenable, True, "VM not created with HA enable tag")
def test_03_delete_service_offering(self): """Test to delete service offering""" # Validate the following: # 1. deleteServiceOffering should return # a valid information for newly created offering self.debug("Deleting service offering with ID: %s" % self.service_offering_2.id) self.service_offering_2.delete(self.apiclient) list_service_response = list_service_offering( self.apiclient, id=self.service_offering_2.id ) self.assertEqual( list_service_response, None, "Check if service offering exists in listDiskOfferings" ) return
def setUpCloudStack(cls): cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestMigrateVMWithVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = cls.testdata[TestData.primaryStorage] cls.template_name = storpool_primary_storage.get("name") storpool_service_offerings = cls.testdata[TestData.serviceOffering] nfs_service_offerings = cls.testdata[TestData.serviceOfferingsPrimary] ceph_service_offerings = cls.testdata[TestData.serviceOfferingsCeph] nfs_disk_offerings = cls.testdata[TestData.nfsDiskOffering] ceph_disk_offerings = cls.testdata[TestData.cephDiskOffering] storage_pool = list_storage_pools(cls.apiclient, name=cls.template_name) nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary') ceph_primary_storage = cls.testdata[TestData.primaryStorage4] cls.ceph_storage_pool = list_storage_pools( cls.apiclient, name=ceph_primary_storage.get("name"))[0] service_offerings = list_service_offering(cls.apiclient, name=cls.template_name) nfs_service_offering = list_service_offering(cls.apiclient, name='nfs') ceph_service_offering = list_service_offering( cls.apiclient, name=ceph_primary_storage.get("name")) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] if nfs_service_offering is None: nfs_service_offering = ServiceOffering.create( cls.apiclient, nfs_service_offerings) else: nfs_service_offering = nfs_service_offering[0] if ceph_service_offering is None: ceph_service_offering = ServiceOffering.create( cls.apiclient, ceph_service_offerings) else: ceph_service_offering = ceph_service_offering[0] nfs_disk_offering = list_disk_offering(cls.apiclient, name="nfs") if nfs_disk_offering is None: nfs_disk_offering = DiskOffering.create(cls.apiclient, nfs_disk_offerings) else: cls.nfs_disk_offering = nfs_disk_offering[0] ceph_disk_offering = list_disk_offering(cls.apiclient, name="ceph") if ceph_disk_offering is None: cls.ceph_disk_offering = DiskOffering.create( cls.apiclient, ceph_disk_offerings) else: cls.ceph_disk_offering = ceph_disk_offering[0] template = get_template(cls.apiclient, cls.zone.id, account="system") cls.nfs_storage_pool = nfs_storage_pool[0] if cls.nfs_storage_pool.state == "Maintenance": cls.nfs_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.nfs_storage_pool.id) if cls.ceph_storage_pool.state == "Maintenance": cls.ceph_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.ceph_storage_pool.id) cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) cls.vm = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=nfs_service_offering.id, diskofferingid=cls.nfs_disk_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.vm2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=ceph_service_offering.id, diskofferingid=cls.ceph_disk_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.nfs_service_offering = nfs_service_offering cls.debug(pprint.pformat(cls.service_offering)) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.name == cls.getClsConfig().mgtSvr[0].zone: cls.zone = z assert cls.zone is not None cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], accounttype = 1, domainid=cls.domain.id, roleid = 1 ) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="ssd" ) assert disk_offering is not None service_offering_only = list_service_offering( cls.apiclient, name="ssd" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, size=10 ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) with open(cls.ARGS.cfg) as json_text: cfg.logger.info(cls.ARGS.cfg) cfg.logger.info(json_text) conf = json.load(json_text) cfg.logger.info(conf) zone = conf['mgtSvr'][0].get('zone') cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, zone_name=zone) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()]) cfg.logger.info( pprint.pformat("############################ %s" % cls.zone)) storpool_primary_storage = { "name": cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl( url=storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"]) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cls._cleanup.append(cls.service_offering) cfg.logger.info(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()]) storpool_primary_storage2 = { "name": cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage2["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"]) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 cls._cleanup.append(cls.service_offering2) os.killpg(cls.mvn_proc_grp, signal.SIGTERM) time.sleep(30) cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) disk_offering = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.volume8 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine5.id, type="ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create( cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute("select * from configuration where name='sp.migration.to.global.ids.completed'") cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute("update configuration set value='false' where name='sp.migration.to.global.ids.completed'") cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None service_offering = list_service_offering( cls.apiclient, name="ssd" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering( cls.apiclient, name="ssd2" ) if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts( cls.apiclient, name="admin" ) cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume(cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid = cls.virtual_machine5.id, type = "ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.helper.bypass_secondary(True) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def test_01_find_hosts_for_migration(self): """ Test find suitable and not-suitable list of hosts for migration """ # Steps, #1. Create a Compute service offering with the tag . #2. Create a Guest VM with the compute service offering created above. #3. find hosts to migrate the vm crated above # Validations, #1. Ensure that the offering is created with the tag #The listServiceOffering API should list show tag #2. Select the newly created VM and ensure that the Compute offering field value lists the compute service offering that was selected. #3. findHostsForMigration cmd should list both suitable and not-suitable hosts list_service_response = list_service_offering( self.apiclient, id=self.service_offering_with_tag.id) self.assertEqual( isinstance(list_service_response, list), True, "listServiceOfferings returned invalid object in response.") self.assertNotEqual(len(list_service_response), 0, "listServiceOfferings returned empty list.") self.assertEqual(list_service_response[0].hosttags, "PREMIUM", "The service offering having tag") #create virtual machine with the service offering with Ha enabled virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_with_tag.id) vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual( isinstance(vms, list), True, "listVirtualMachines returned invalid object in response.") self.assertNotEqual(len(vms), 0, "listVirtualMachines returned empty list.") self.debug("Deployed VM on host: %s" % vms[0].hostid) #verify that the virtual machine created on the host with tag list_hosts_response = list_hosts(self.apiclient, id=virtual_machine.hostid) self.assertEqual(isinstance(list_hosts_response, list), True, "listHosts returned invalid object in response.") self.assertNotEqual(len(list_hosts_response), 0, "listHosts returned empty list.") host = list_hosts_response[0] self.assertEqual( host.hosttags, "PREMIUM", "VM is created on a host having appropriate tag. %s" % host.uuid) try: list_hosts_response = Host.listForMigration( self.apiclient, virtualmachineid=virtual_machine.id, ) except Exception as e: raise Exception( "Exception while getting hosts list suitable for migration: %s" % e) self.assertEqual(isinstance(list_hosts_response, list), True, "listHosts returned invalid object in response.") self.assertNotEqual(len(list_hosts_response), 0, "listHosts returned empty response.") suitableHost = set() notSuitableHost = set() for host in list_hosts_response: if host.suitableformigration: suitableHost.add(host) else: notSuitableHost.add(host) self.assertTrue(notSuitableHost is not None, "notsuitablehost should not be None") self.debug("Suitable Hosts: %s" % suitableHost) self.debug("Not suitable Hosts: %s" % notSuitableHost)
def setUpClass(cls): testClient = super(TestDeployVM, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = get_hypervisor_type(cls.apiclient) # Setup test data td = TestData() cls.testdata = td.testdata # OS template template = cls.testdata[TestData.template] #StorPool primary storages primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) if storage_pool is None: cls.primary_storage = StoragePool.create( cls.apiclient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) else: cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage2.get("name") ) if storage_pool is None: cls.primary_storage2 = StoragePool.create( cls.apiclient, primarystorage2, scope=primarystorage2[TestData.scope], zoneid=cls.zone.id, provider=primarystorage2[TestData.provider], tags=primarystorage2[TestData.tags], capacityiops=primarystorage2[TestData.capacityIops], capacitybytes=primarystorage2[TestData.capacityBytes], hypervisor=primarystorage2[TestData.hypervisor] ) else: cls.primary_storage2 = storage_pool[0] cls.debug(primarystorage) cls.debug(primarystorage2) os_template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if os_template == FAILED: cls.template = Template.register( cls.apiclient, template, cls.zone.id, randomize_name = False ) else: cls.template = os_template cls.debug(template) # Set Zones and disk offerings cls.services["small"]["zoneid"] = cls.zone.id cls.services["small"]["template"] = cls.template.id cls.services["iso1"]["zoneid"] = cls.zone.id cls.account = list_accounts( cls.apiclient, name="admin" )[0] cls.debug(cls.account.id) service_offering_only = list_service_offering( cls.apiclient, name="cloud-test-dev-1" ) cls.service_offering = service_offering_only[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services['mode'] ) cls.cleanup = [ cls.virtual_machine ]
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) storage_pool2 = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage = storage_pool[0] cls.primary_storage2 = storage_pool2[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering(cls.apiclient, name="ssd2") if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None nfs_service_offerings = { "name": "nfs", "displaytext": "NFS service offerings", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "nfs" } nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary') nfs_service_offering = list_service_offering(cls.apiclient, name='nfs') if nfs_service_offering is None: nfs_service_offering = ServiceOffering.create( cls.apiclient, nfs_service_offerings) else: nfs_service_offering = nfs_service_offering[0] cls.nfs_service_offering = nfs_service_offering cls.nfs_storage_pool = nfs_storage_pool[0] cls.nfs_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.nfs_storage_pool.id) cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering2.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.primary_storage = StoragePool.update(cls.apiclient, id=cls.primary_storage.id, tags=["ssd, nfs, ssd2"]) cls.primary_storage2 = StoragePool.update(cls.apiclient, id=cls.primary_storage2.id, tags=["ssd, ssd2"]) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests") cls.primary_storage = list_storage_pools( cls.apiclient, name=primarystorage.get("name"))[0] cls.primary_storage2 = list_storage_pools( cls.apiclient, name=primarystorage2.get("name"))[0]
def setUpClass(cls): testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) storpool_primary_storage = { "name": "cloudLocal", "zoneid": cls.zone.id, "url": "cloudLocal", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "cloudLocal" } storpool_service_offerings = { "name": "tags", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "test_tags" } storage_pool = list_storage_pools(cls.apiclient, name='cloudLocal') service_offerings = list_service_offering(cls.apiclient, name='tags') disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template(apiclient=cls.apiclient, zone_id=cls.zone.id, template_filter='self', template_name="centos6", domain_id=cls.domain.id, hypervisor=cls.hypervisor) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.volume_1 = Volume.create( cls.apiclient, {"diskname": "StorPoolDisk-1"}, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, ) cls.volume_2 = Volume.create( cls.apiclient, {"diskname": "StorPoolDisk-2"}, zoneid=cls.zone.id, diskofferingid=disk_offerings[0].id, ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [] cls._cleanup.append(cls.virtual_machine) cls._cleanup.append(cls.volume_1) cls._cleanup.append(cls.volume_2) return
def setUpCloudStack(cls): cls._cleanup = [] cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z cls.debug("##################### zone %s" % cls.zone) td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = { "name": "ssd", TestData.scope: "ZONE", "url": "ssd", TestData.provider: "StorPool", "path": "/dev/storpool", TestData.capacityBytes: 2251799813685248, TestData.hypervisor: "KVM" } storpool_without_qos = { "name": "ssd", "displaytext": "ssd", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storpool_qos_template = { "name": "qos", "displaytext": "qos", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } disk_offering_ssd = { "name": "ssd", "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", "disksize": 10, "customizediops": False, "hypervisorsnapshotreserve": 200, TestData.tags: "ssd", "storagetype": "shared" } disk_offering_qos = { "name": "qos", "displaytext": "SP_DO_1 (5GB Min IOPS = 300; Max IOPS = 500)", "disksize": 10, "customizediops": False, "hypervisorsnapshotreserve": 200, TestData.tags: "ssd", "storagetype": "shared" } cls.template_name = storpool_primary_storage.get("name") cls.template_name_2 = "qos" storage_pool = list_storage_pools(cls.apiclient, name="ssd") cls.primary_storage = storage_pool[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) service_offerings_ssd = list_service_offering(cls.apiclient, name=cls.template_name) service_offerings_qos = list_service_offering(cls.apiclient, name=cls.template_name_2) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_without_qos) else: service_offerings_ssd = service_offerings_ssd[0] if service_offerings_qos is None: service_offerings_qos = ServiceOffering.create( cls.apiclient, storpool_qos_template) ResourceDetails.create(cls.apiclient, resourceid=service_offerings_qos.id, resourcetype="ServiceOffering", details={"SP_TEMPLATE": "qos"}, fordisplay=True) else: service_offerings_qos = service_offerings_qos[0] cls.service_offering_ssd = service_offerings_ssd cls.service_offering_qos = service_offerings_qos cls.disk_offering_ssd = list_disk_offering(cls.apiclient, name=cls.template_name) cls.disk_offering_qos = list_disk_offering(cls.apiclient, name=cls.template_name_2) if cls.disk_offering_ssd is None: cls.disk_offering_ssd = DiskOffering.create( cls.apiclient, disk_offering_ssd) else: cls.disk_offering_ssd = cls.disk_offering_ssd[0] if cls.disk_offering_qos is None: cls.disk_offering_qos = DiskOffering.create( cls.apiclient, disk_offering_qos) ResourceDetails.create(cls.apiclient, resourceid=cls.disk_offering_qos.id, resourcetype="DiskOffering", details={"SP_TEMPLATE": "qos"}, fordisplay=True) else: cls.disk_offering_qos = cls.disk_offering_qos[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offering_ssd.id cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) cls.volume_1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume_2 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering_ssd.id, hypervisor=cls.hypervisor, rootdisksize=10) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_1) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume_2) cls.template = template cls.hostid = cls.virtual_machine.hostid cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpClass(cls): cloudstackTestClient = super( TestMultipleVPNAccessonVPC, cls ).getClsTestClient() cls.debug("Obtain the Admin's API Client") cls.api_client = cloudstackTestClient.getApiClient() cls.debug("Get the dictionary information that will be used during CCP tests, from test_data.py present on the Client") cls.services = cloudstackTestClient.getParsedTestDataConfig() if cls.services is None: cls.debug("Services Object is None") raise Exception("Services Object is None") cls.debug("Procure the CloudStack Setup configuration Information") with open(cls.services["config_path"], 'rb') as fp: cls.pullconfig = json.load(fp) cls.debug("Update 'remote.access.vpn.client.iprange','remote.access.vpn.user.limit','max.account.primary.storage','max.account.public.ips','max.account.user.vms','max.account.volumes','max.account.cpus', Global Configuration Parameters") update_vpn_client_iprange = Configurations.update( cls.api_client, name="remote.access.vpn.client.iprange", value="10.1.2.1-10.1.2.120") cls.debug("'remote.access.vpn.client.iprange' Global Configuration Parameter Updated Successfully") update_vpn_user_limit = Configurations.update( cls.api_client, name="remote.access.vpn.user.limit", value=str(int(cls.services["vpnclient_count"]*2)) ) cls.debug("'remote.access.vpn.user.limit' Global Configuration Parameter Updated Successfully") update_max_account_primary_stg_limit = Configurations.update( cls.api_client, name="max.account.primary.storage", value=str(int(cls.services["vpnclient_count"]*20 + 100)) ) cls.debug("'max.account.primary.storage' Global Configuration Parameter Updated Successfully") update_max_account_public_ips_limit = Configurations.update( cls.api_client, name="max.account.public.ips", value=str(int(cls.services["vpnclient_count"]*2 + 10)) ) cls.debug("'max.account.public.ips' Global Configuration Parameter Updated Successfully") update_max_account_user_vms_limit = Configurations.update( cls.api_client, name="max.account.user.vms", value=str(int(cls.services["vpnclient_count"]*2)) ) cls.debug("'max.account.user.vms' Global Configuration Parameter Updated Successfully") update_max_account_volumes_limit = Configurations.update( cls.api_client, name="max.account.volumes", value=str(int(cls.services["vpnclient_count"]*2)) ) cls.debug("'max.account.volumes' Global Configuration Parameter Updated Successfully") update_max_account_cpus_limit = Configurations.update( cls.api_client, name="max.account.cpus", value=str(int(cls.services["vpnclient_count"]*2)) ) cls.debug("'max.account.cpus' Global Configuration Parameter Updated Successfully") cls.debug("Restart the Management Server") TestMultipleVPNAccessonVPC.restart_mgmt_server(cls.services["config_path"]) cls.debug("Completed restarting the Management Server") cls.debug("Wait for 120 seconds...") time.sleep(120) cls.debug("End of 120 seconds wait time....") # Get Zone, Domain and templates cls.domain = get_domain(cls.api_client) cls.zone = get_zone( cls.api_client, zone_name = cls.services["zone_vpn"]["name"]) cls.debug("Use an Existing 'Tiny Instance' Service Offering on the Setup") list_service_offerings = [] list_service_offerings = list_service_offering( cls.api_client, keyword="Tiny Instance", ) cls._cleanup = [] if list_service_offerings is not None: cls.debug("Found an Existing 'Tiny Instance' Service Offering on the Setup") cls.service_offering = list_service_offerings[0] else: cls.debug("Create a service offering which will be used for VM deployments in this test") cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"] ) cls.debug("Add the created service offering to the _cleanup queue") cls._cleanup.append(cls.service_offering) try: cls.debug("Create or Use Existing Account to own the VPN Clients, which is used to test Remote VPN Access to VPC") cls.api_client_vpn_client_reg_user = cloudstackTestClient.getUserApiClient( UserName="******", DomainName="ROOT" ) list_vpn_client_regular_user = User.list( cls.api_client, username="******" ) cls.debug("Procure the Account Name and DomainID Information of the Regular Account") cls.vpn_client_reg_acct_name = list_vpn_client_regular_user[0].account cls.vpn_client_reg_domain_id = list_vpn_client_regular_user[0].domainid list_vpn_client_regular_user_acct = Account.list( cls.api_client, name = cls.vpn_client_reg_acct_name, listall = True ) cls._cleanup.append(Account(list_vpn_client_regular_user_acct[0].__dict__)) # Register a Template that already has VPN client installed on it. The template registered here # has extra scripts to facilitate automated operations to execute Test Cases. # Template has pre-configured configuration files required for the VPN Client operations. # The following files are present on the registered template. The location of the files are locations # on a VM deployed from this template # 1. "/tmp/ipsec.conf" # 2. "/tmp/ipsec.secrets" # 3. "/tmp/options.xl2tpd.client" # 4. "/tmp/xl2tpd.conf" # 5 "/tmp/vpnclient_services.sh" # 6. "/tmp/firstconn_expectscript.exp" # 7. "/tmp/secondconn_expectscript.exp" cls.debug("Use an Existing VPN Client Template on the Setup") list_vpn_client_templates = list_templates( cls.api_client_vpn_client_reg_user, keyword="VPNClient", templatefilter="featured", zoneid = cls.zone.id ) if list_vpn_client_templates is not None: cls.debug("Found an Existing VPN Client Template on the Setup") cls.template = list_vpn_client_templates[0] else: cls.debug("Register a Template that already has VPN client installed on it") cls.template = Template.register( cls.api_client, cls.services["vpn_template"], zoneid=cls.zone.id, hypervisor='XenServer' ) cls._cleanup.append(cls.template) cls.debug("Sleep for {0} seconds specified in the dictionary before checking for Template's Availability".format(cls.services["sleep"])) time.sleep(cls.services["sleep"]) cls.debug("Procure Timeout Value from the dictionary") timeout = cls.services["timeout"] while True: list_template_response = list_templates( cls.api_client_vpn_client_reg_user, templatefilter='featured', id=cls.template.id, ) if isinstance(list_template_response, list): break elif timeout == 0: raise Exception("List template failed!") time.sleep(5) timeout = timeout - 1 cls.debug("Verify template response to check whether template is present") if list_template_response is None: raise Exception("Check whether the VPN Client Template is available") template_response = list_template_response[0] if template_response.isready == False: raise Exception("Template state is not ready, it is %r" % template_response.isready) # Queue that holds all the VPN Client VMs Information cls.vpnclientvms = [] cls.debug("Deploy {0} VPN Clients in the account".format(int(cls.services["vpnclient_count"]))) for vm in xrange(0,int(cls.services["vpnclient_count"])): cls.debug("Deploy a new VM {0} in first account. This VM which will be configured as VPN Client".format(int(vm))) new_vpnclient_vm = VirtualMachine.create( cls.api_client_vpn_client_reg_user, cls.services["virtual_machine"], zoneid=cls.zone.id, serviceofferingid=cls.service_offering.id, templateid=cls.template.id, ) cls.debug("Add new VM {0} to the vpnclientvms Queue".format(int(vm))) cls.vpnclientvms.append(new_vpnclient_vm) cls.debug("Allow SSH Access to the new VPN Client VM {0}".format(int(vm))) new_vpnclient_vm.access_ssh_over_nat( cls.api_client_vpn_client_reg_user, cls.services, new_vpnclient_vm, allow_egress=True ) cls.debug("VM for VPNClient Access Got Created with Public IP Address %s" % new_vpnclient_vm.public_ip) cls.debug("Create or Use existing Account in which we deploy VPCs and test remote access to them from the First Account's VMs present on isolated Network") cls.api_client_vpn_server_reg_user = cloudstackTestClient.getUserApiClient( UserName="******", DomainName="ROOT" ) list_vpn_server_regular_user = User.list( cls.api_client, username="******" ) cls.debug("Procure the Account Name and DomainID Information of the Regular Account") cls.vpn_server_reg_acct_name = list_vpn_server_regular_user[0].account cls.vpn_server_reg_domain_id = list_vpn_server_regular_user[0].domainid list_vpn_server_regular_user_acct = Account.list( cls.api_client, name = cls.vpn_server_reg_acct_name, listall = True ) cls._cleanup.append(Account(list_vpn_server_regular_user_acct[0].__dict__)) cls.debug("Use an Existing 'VPC off-' Service Offering on the Setup") list_available_vpc_offerings = list_vpc_offerings( cls.api_client, keyword="VPC off-", ) if list_available_vpc_offerings is not None: cls.debug("Found an Existing 'VPC off-' Service Offering on the Setup") cls.vpc_offering = VpcOffering(list_available_vpc_offerings[0].__dict__) else: cls.debug("Creating a VPC offering..") cls.vpc_offering = VpcOffering.create( cls.api_client, cls.services["vpc_offering"] ) # Add the created VPC Offering to __cleanup queue cls._cleanup.append(cls.vpc_offering) # Enable to created VPC Offering inorder to deploy VPCs with it cls.debug("Enabling the VPC offering created") cls.vpc_offering.update(cls.api_client, state='Enabled') cls.debug("Enabled the VPC Offering") # Create a VPC for the second account cls.debug("Creating a VPC in the account: %s" % cls.vpn_server_reg_acct_name) cls.firstvpc = VPC.create( cls.api_client_vpn_server_reg_user, cls.services["vpc_remote_vpn"], vpcofferingid=cls.vpc_offering.id, zoneid=cls.zone.id ) cls.debug("Use an Existing 'NET_OFF-RemoteAccessVPNTest-' Network Offering on the Setup") list_available_network_offerings = list_network_offerings( cls.api_client, keyword="NET_OFF-RemoteAccessVPNTest-", ) if list_available_network_offerings is not None: cls.debug("Found an Existing 'NET_OFF-RemoteAccessVPNTest-' Network Offering on the Setup") cls.network_off = NetworkOffering(list_available_network_offerings[0].__dict__) else: cls.debug('Create NetworkOffering for Networks in VPC') cls.services["vpc_network_offering"]["name"] = "NET_OFF-RemoteAccessVPNTest-"+ random_gen() cls.network_off = NetworkOffering.create( cls.api_client, cls.services["vpc_network_offering"], conservemode=False ) # Add the created Network Offering to __cleanup queue cls._cleanup.append(cls.network_off) # Enable Network offering cls.network_off.update(cls.api_client, state='Enabled') cls.debug('Created and Enabled NetworkOffering') cls.services["network"]["name"] = "NETWORK-" + random_gen() # Create First Network Tier in the First VPC created for second account using the network offering created above. cls.debug('Adding Network=%s' % cls.services["network"]) cls.firstnetworktier = Network.create( cls.api_client_vpn_server_reg_user, cls.services["network"], networkofferingid=cls.network_off.id, zoneid=cls.zone.id, gateway=cls.services["firstnetwork_tier"]["gateway"], netmask=cls.services["firstnetwork_tier"]["netmask"], vpcid=cls.firstvpc.id ) cls.debug("Created network with ID: %s" % cls.firstnetworktier.id) # Create Ingress and Egress NetworkACL rules for First Network Tier in the First VPC created for second account. cls.debug("Adding NetworkACL rules to make Network accessible for all Protocols and all CIDRs ") NetworkACL.create( cls.api_client_vpn_server_reg_user, cls.services["all_rule"], networkid=cls.firstnetworktier.id, traffictype='Ingress' ) NetworkACL.create( cls.api_client_vpn_server_reg_user, cls.services["all_rule"], networkid=cls.firstnetworktier.id, traffictype='Egress' ) listFirstVPC = VPC.list( cls.api_client_vpn_server_reg_user, id=cls.firstvpc.id ) cls.debug("Information about the VPC: {0}".format(str(listFirstVPC))) cls.debug("Obtain the source nat IP Address of the first VPC.") cls.listFirstVPCPublicIpAddress = list_publicIP( cls.api_client_vpn_server_reg_user, issourcenat="true", vpcid=listFirstVPC[0].id, listall="true" ) cls.debug("Information about the VPC's Source NAT IP Address: {0}".format(str(cls.listFirstVPCPublicIpAddress))) cls.debug("Enable Remote Access VPN on the source nat Public IP Address of the first VPC") cls.FirstVPNonFirstVPC = Vpn.create( cls.api_client_vpn_server_reg_user, cls.listFirstVPCPublicIpAddress[0].id ) cls.debug("Successfully Created First VPN on VPC with preshared key:"+ cls.FirstVPNonFirstVPC.presharedkey) cls.listfirstNetworkTier = list_networks( cls.api_client_vpn_server_reg_user, id=cls.firstnetworktier.id, listall=True ) cls.debug("Create a VM using the default template on the First Network Tier in the First VPC of the Second Account") cls.vm1 = VirtualMachine.create( cls.api_client_vpn_server_reg_user, cls.services["virtual_machine"], zoneid=cls.zone.id, serviceofferingid=cls.service_offering.id, templateid=cls.template.id, networkids=[str(cls.firstnetworktier.id)] ) cls.debug("First VM deployed in the first Network Tier") except Exception as e: cleanup_resources(cls.api_client, cls._cleanup) printex = traceback.format_exc() cls.debug("Exception Occurred : {0}".format(printex)) raise Exception("Warning: Exception during Setting Up the Test Suite Configuration : %s" % e) return
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") import pprint cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None #=============================================================================== # # service_offering = list_service_offering( # cls.apiclient, # name="tags" # ) # if service_offering is not None: # cls.service_offering = service_offering[0] # else: # cls.service_offering = ServiceOffering.create( # cls.apiclient, # serviceOffering) #=============================================================================== service_offering_only = list_service_offering(cls.apiclient, name="cloud-test-dev-2") if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10) # Resources that are to be destroyed cls._cleanup = [cls.virtual_machine, cls.volume] cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpClass(cls): cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.userapiclient = testClient.getUserApiClient( UserName="******", DomainName="ROOT") cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z storpool_primary_storage = { "name": "ssd", "zoneid": cls.zone.id, "url": "ssd", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "ssd" } storpool_service_offerings = { "name": "ssd", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storage_pool = list_storage_pools(cls.apiclient, name='ssd') service_offerings = list_service_offering(cls.apiclient, name='ssd') disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.disk_offerings = disk_offerings[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] template = get_template(cls.apiclient, cls.zone.id, account="system") if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings user = list_users(cls.apiclient, account='StorPoolUser', domainid=cls.domain.id) account = list_accounts(cls.apiclient, id=user[0].accountid) if account is None: role = Role.list(cls.apiclient, name='User') cmd = createAccount.createAccountCmd() cmd.email = '*****@*****.**' cmd.firstname = 'StorPoolUser' cmd.lastname = 'StorPoolUser' cmd.password = '******' cmd.username = '******' cmd.roleid = role[0].id account = cls.apiclient.createAccount(cmd) else: account = account[0] cls.account = account # cls.tmp_files = [] # cls.keypair = SSHKeyPair.create( # cls.apiclient, # name=random_gen() + ".pem", # account=cls.account.name, # domainid=cls.account.domainid) # # keyPairFilePath = tempfile.gettempdir() + os.sep + cls.keypair.name # # Clenaup at end of execution # cls.tmp_files.append(keyPairFilePath) # # cls.debug("File path: %s" % keyPairFilePath) # # f = open(keyPairFilePath, "w+") # f.write(cls.keypair.privatekey) # f.close() # # os.system("chmod 400 " + keyPairFilePath) # # cls.keyPairFilePath = keyPairFilePath cls.volume_1 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-1"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume_2 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-2"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-3"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.virtual_machine = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.virtual_machine2 = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [] cls._cleanup.append(cls.virtual_machine) cls._cleanup.append(cls.virtual_machine2) cls._cleanup.append(cls.volume_1) cls._cleanup.append(cls.volume_2) cls._cleanup.append(cls.volume) return
def test_01_vm_deployment_with_compute_offering_with_ha_enabled(self): """ Test VM deployments (Create HA enabled Compute Service Offering and VM) """ # Steps, #1. Create a Compute service offering with the 'Offer HA' option selected. #2. Create a Guest VM with the compute service offering created above. # Validations, #1. Ensure that the offering is created and that in the UI the 'Offer HA' field is enabled (Yes) #The listServiceOffering API should list 'offerha' as true. #2. Select the newly created VM and ensure that the Compute offering field value lists the compute service offering that was selected. # Also, check that the HA Enabled field is enabled 'Yes'. #list and validate above created service offering with Ha enabled list_service_response = list_service_offering( self.apiclient, id=self.service_offering_with_ha.id ) self.assertEqual( isinstance(list_service_response, list), True, "listServiceOfferings returned invalid object in response." ) self.assertNotEqual( len(list_service_response), 0, "listServiceOfferings returned empty list." ) self.assertEqual( list_service_response[0].offerha, True, "The service offering is not HA enabled" ) #create virtual machine with the service offering with Ha enabled virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_with_ha.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "listVirtualMachines returned invalid object in response." ) self.assertNotEqual( len(vms), 0, "listVirtualMachines returned empty list." ) self.debug("Deployed VM on host: %s" % vms[0].hostid) self.assertEqual( vms[0].haenable, True, "VM not created with HA enable tag" )
def setUpCloudStack(cls): super(MigrationUuidToGlobalIdLiveMigration, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") cls.disk_offering_20 = disk_offering_20[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.local_cluster = cls.helper.get_local_cluster() cls.host = cls.helper.list_hosts_by_cluster_id(cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster() cls.host_remote = cls.helper.list_hosts_by_cluster_id( cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_local_1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) #vm and volume on remote cls.virtual_machine_remote = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_remote1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine_remote) cls.volume_remote = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume_remote) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) #check that ROOT disk is created with uuid root_volume = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine3.id, type="ROOT") try: spvolume = cls.spapi.volumeList(volumeName=root_volume[0].id) except spapi.ApiError as err: cfg.logger.info("Root volume is not created with UUID") raise Exception(err) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.random_data_vm_snapshot1 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" volume_attached = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume) cls.helper.write_on_disks(cls.random_data_vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) MemorySnapshot = False cls.vm_snapshot1 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) cls.random_data_vm_snapshot2 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot2 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) #vm snapshot to be deleted without revert cls.random_data_vm_snapshot3 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot3, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot_for_delete = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot( cls.vm_snapshot_for_delete, cls.virtual_machine, cls.test_dir, cls.random_data) cls.snapshot_on_secondary = cls.helper.create_snapshot( False, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_on_secondary) cls.template_on_secondary = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_on_secondary.id) cls._cleanup.append(cls.template_on_secondary) cls.snapshot_bypassed = cls.helper.create_snapshot( True, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_bypassed) cls.template_bypased = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_bypassed.id) cls._cleanup.append(cls.template_bypased) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() # Setup test data td = TestData() cls.testdata = td.testdata # Get Resources from Cloud Infrastructure cls.domain = get_domain(cls.apiClient) cls.zone = get_zone(cls.apiClient, testclient.getZoneForTests()) cls.cluster = list_clusters(cls.apiClient)[0] cls.hypervisor = get_hypervisor_type(cls.apiClient) cls.template = get_template( cls.apiClient, cls.zone.id, account = "system" ) primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage2.get("name") ) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering( cls.apiClient, name="Small" ) assert disk_offering is not None service_offering = list_service_offering( cls.apiClient, name="cloud-test-dev-1" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiClient, serviceOffering) service_offering_only = list_service_offering( cls.apiClient, name="cloud-test-dev-2" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiClient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts( cls.apiClient, name="admin" ) cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine = VirtualMachine.create( cls.apiClient, {"name":"StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) # Resources that are to be destroyed cls._cleanup = [ cls.virtual_machine, cls.volume ]
def test_01_find_hosts_for_migration(self): """ Test find suitable and not-suitable list of hosts for migration """ # Steps, # 1. Create a Compute service offering with the tag . # 2. Create a Guest VM with the compute service offering created above. # 3. find hosts to migrate the vm crated above # Validations, # 1. Ensure that the offering is created with the tag # The listServiceOffering API should list show tag # 2. Select the newly created VM and ensure that the Compute offering field value lists the compute service offering that was selected. # 3. findHostsForMigration cmd should list both suitable and not-suitable hosts list_service_response = list_service_offering(self.apiclient, id=self.service_offering_with_tag.id) self.assertEqual( isinstance(list_service_response, list), True, "listServiceOfferings returned invalid object in response." ) self.assertNotEqual(len(list_service_response), 0, "listServiceOfferings returned empty list.") self.assertEqual(list_service_response[0].hosttags, "PREMIUM", "The service offering having tag") # create virtual machine with the service offering with Ha enabled virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering_with_tag.id, ) vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual(isinstance(vms, list), True, "listVirtualMachines returned invalid object in response.") self.assertNotEqual(len(vms), 0, "listVirtualMachines returned empty list.") self.debug("Deployed VM on host: %s" % vms[0].hostid) # verify that the virtual machine created on the host with tag list_hosts_response = list_hosts(self.apiclient, id=virtual_machine.hostid) self.assertEqual(isinstance(list_hosts_response, list), True, "listHosts returned invalid object in response.") self.assertNotEqual(len(list_hosts_response), 0, "listHosts returned empty list.") host = list_hosts_response[0] self.assertEqual(host.hosttags, "PREMIUM", "VM is created on a host having appropriate tag. %s" % host.uuid) try: list_hosts_response = Host.listForMigration(self.apiclient, virtualmachineid=virtual_machine.id) except Exception as e: raise Exception("Exception while getting hosts list suitable for migration: %s" % e) self.assertEqual(isinstance(list_hosts_response, list), True, "listHosts returned invalid object in response.") self.assertNotEqual(len(list_hosts_response), 0, "listHosts returned empty response.") suitableHost = set() notSuitableHost = set() for host in list_hosts_response: if host.suitableformigration: suitableHost.add(host) else: notSuitableHost.add(host) self.assertTrue(notSuitableHost is not None, "notsuitablehost should not be None") self.debug("Suitable Hosts: %s" % suitableHost) self.debug("Not suitable Hosts: %s" % notSuitableHost)
def setUpCloudStack(cls): cls._cleanup = [] cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestResizeVolumes, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() storpool_primary_storage = cls.testdata[TestData.primaryStorage] cls.template_name = storpool_primary_storage.get("name") storpool_service_offerings = cls.testdata[TestData.serviceOfferingsIops] storpool_disk_offerings = { "name": "iops", "displaytext": "Testing IOPS on StorPool", "customizediops": True, "storagetype": "shared", "tags" : cls.template_name, } storage_pool = list_storage_pools( cls.apiclient, name = cls.template_name ) service_offerings = list_service_offering( cls.apiclient, name='iops' ) disk_offerings = list_disk_offering( cls.apiclient, name="iops" ) if disk_offerings is None: disk_offerings = DiskOffering.create(cls.apiclient, storpool_disk_offerings, custom = True) else: disk_offerings = disk_offerings[0] cls.disk_offerings = disk_offerings if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] template = get_template( cls.apiclient, cls.zone.id, account = "system" ) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.account = cls.helper.create_account( cls.apiclient, cls.services["account"], accounttype = 1, domainid=cls.domain.id, ) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account = cls.account.name, domainid= cls.account.domainid, id = securitygroup.id) cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient, zoneid = cls.zone.id) cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient, zoneid = cls.zone.id) cls.host_remote = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.volume_1 = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.volume_2 = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.volume = cls.helper.create_custom_disk( cls.apiclient, {"diskname":"StorPoolDisk" }, zoneid=cls.zone.id, size = 5, miniops = 2000, maxiops = 5000, account=cls.account.name, domainid=cls.account.domainid, diskofferingid=cls.disk_offerings.id, ) cls.virtual_machine = cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, rootdisksize = 10 ) cls.virtual_machine2= cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, rootdisksize = 10 ) cls.virtual_machine_live_migration_1 = cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, hostid = cls.host_on_local_1.id, rootdisksize = 10 ) cls.virtual_machine_live_migration_2= cls.helper.create_vm_custom( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, account=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, minIops = 1000, maxIops = 5000, hostid = cls.host_on_remote1.id, rootdisksize = 10 ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None cls._cleanup = [] zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z td = TestData() cls.testdata = td.testdata cls.helper = StorPoolHelper() cls.account = cls.helper.create_account(cls.apiclient, cls.services["account"], accounttype=1, domainid=cls.domain.id, roleid=1) cls._cleanup.append(cls.account) securitygroup = SecurityGroup.list(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid)[0] cls.helper.set_securityGroups(cls.apiclient, account=cls.account.name, domainid=cls.account.domainid, id=securitygroup.id) storpool_primary_storage = cls.testdata[TestData.primaryStorage] storpool_service_offerings = cls.testdata[TestData.serviceOffering] cls.template_name = storpool_primary_storage.get("name") storage_pool = list_storage_pools(cls.apiclient, name=cls.template_name) service_offerings = list_service_offering(cls.apiclient, name=cls.template_name) disk_offerings = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") cls.disk_offerings = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.helper.get_local_cluster(cls.apiclient, zoneid=cls.zone.id) cls.host = cls.helper.list_hosts_by_cluster_id(cls.apiclient, cls.local_cluster.id) cls.remote_cluster = cls.helper.get_remote_cluster(cls.apiclient, zoneid=cls.zone.id) cls.host_remote = cls.helper.list_hosts_by_cluster_id( cls.apiclient, cls.remote_cluster.id) cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.services["diskofferingid"] = cls.disk_offerings.id cls.volume_1 = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.volume = Volume.create(cls.apiclient, cls.services, account=cls.account.name, domainid=cls.account.domainid) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host[0].id, rootdisksize=10) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host[0].id, rootdisksize=10) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host[0].id, rootdisksize=10) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): super(TestRenameObjectsWithUuids, cls).setUpClass() cls.spapi = spapi.Api.fromConfig(multiCluster=True) cls.helper = HelperUtil(cls) cls._cleanup = [] testClient = super(TestRenameObjectsWithUuids, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z storpool_primary_storage = { "name" : "ssd", "zoneid": cls.zone.id, "url": "ssd", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "ssd" } storpool_service_offerings = { "name": "ssd", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storage_pool = list_storage_pools( cls.apiclient, name='ssd' ) service_offerings = list_service_offering( cls.apiclient, name='ssd' ) disk_offerings = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) cls.disk_offering = disk_offerings[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] cls.debug(pprint.pformat(storage_pool)) if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create(cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] #The version of CentOS has to be supported template = get_template( cls.apiclient, cls.zone.id, account = "system" ) cls.pools = StoragePool.list(cls.apiclient, zoneid=cls.zone.id) cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings cls.debug(pprint.pformat(cls.service_offering)) cls.local_cluster = cls.get_local_cluster(zoneid = cls.zone.id) cls.host = cls.list_hosts_by_cluster_id(cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.get_remote_cluster(zoneid = cls.zone.id) cls.host_remote = cls.list_hosts_by_cluster_id(cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.volume_1 = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-1" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume_1) cls.volume = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid = cls.host_on_local_1.id, rootdisksize=10 ) cls.volume_on_remote = Volume.create( cls.apiclient, {"diskname":"StorPoolDisk-3" }, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id, ) cls._cleanup.append(cls.volume_on_remote) cls.virtual_machine_on_remote = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid = cls.host_on_remote1.id, rootdisksize=10 ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return