def test_02_accountSnapshotClean(self): """Test snapshot cleanup after account deletion """ # Validate the following # 1. listAccounts API should list out the newly created account # 2. listVirtualMachines() command should return the deployed VM. # State of this VM should be "Running" # 3. a)listSnapshots should list the snapshot that was created. # b)verify that secondary storage NFS share contains the reqd volume # under /secondary/snapshots/$accountid/$volumeid/$snapshot_id # 4. a)listAccounts should not list account that is deleted # b) snapshot image($snapshot_id) should be deleted from the # /secondary/snapshots/$accountid/$volumeid/ try: accounts = list_accounts(self.apiclient, id=self.account.id) self.assertEqual(isinstance(accounts, list), True, "Check list response returns a valid list") self.assertNotEqual(len(accounts), 0, "Check list Accounts response") # Verify the snapshot was created or not snapshots = list_snapshots(self.apiclient, id=self.snapshot.id) self.assertEqual(isinstance(snapshots, list), True, "Check list response returns a valid list") self.assertNotEqual(snapshots, None, "No such snapshot %s found" % self.snapshot.id) self.assertEqual(snapshots[0].id, self.snapshot.id, "Check snapshot id in list resources call") self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was not found on NFS") except Exception as e: self._cleanup.append(self.account) self.fail("Exception occured: %s" % e) self.debug("Deleting account: %s" % self.account.name) # Delete account self.account.delete(self.apiclient) # Wait for account cleanup interval wait_for_cleanup(self.apiclient, configs=["account.cleanup.interval"]) with self.assertRaises(Exception): accounts = list_accounts(self.apiclient, id=self.account.id) self.assertFalse( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was still found on NFS after account gc") return
def test_02_accountSnapshotClean(self): """Test snapshot cleanup after account deletion """ # Validate the following # 1. listAccounts API should list out the newly created account # 2. listVirtualMachines() command should return the deployed VM. # State of this VM should be "Running" # 3. a)listSnapshots should list the snapshot that was created. # b)verify that secondary storage NFS share contains the reqd volume # under /secondary/snapshots/$accountid/$volumeid/$snapshot_id # 4. a)listAccounts should not list account that is deleted # b) snapshot image($snapshot_id) should be deleted from the # /secondary/snapshots/$accountid/$volumeid/ try: accounts = list_accounts( self.apiclient, id=self.account.id ) self.assertEqual( isinstance(accounts, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(accounts), 0, "Check list Accounts response" ) # Verify the snapshot was created or not snapshots = list_snapshots( self.apiclient, id=self.snapshot.id ) self.assertEqual( isinstance(snapshots, list), True, "Check list response returns a valid list" ) self.assertNotEqual( snapshots, None, "No such snapshot %s found" % self.snapshot.id ) self.assertEqual( snapshots[0].id, self.snapshot.id, "Check snapshot id in list resources call" ) self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was not found on NFS") except Exception as e: self._cleanup.append(self.account) self.fail("Exception occured: %s" % e) self.debug("Deleting account: %s" % self.account.name) # Delete account self.account.delete(self.apiclient) # Wait for account cleanup interval wait_for_cleanup(self.apiclient, configs=["account.cleanup.interval"]) with self.assertRaises(Exception): accounts = list_accounts( self.apiclient, id=self.account.id ) self.assertFalse(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, self.snapshot.id), "Snapshot was still found on NFS after account gc") return
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) with open(cls.ARGS.cfg) as json_text: cfg.logger.info(cls.ARGS.cfg) cfg.logger.info(json_text) conf = json.load(json_text) cfg.logger.info(conf) zone = conf['mgtSvr'][0].get('zone') cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, zone_name=zone) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()]) cfg.logger.info( pprint.pformat("############################ %s" % cls.zone)) storpool_primary_storage = { "name": cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl( url=storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"]) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cls._cleanup.append(cls.service_offering) cfg.logger.info(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()]) storpool_primary_storage2 = { "name": cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage2["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"]) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 cls._cleanup.append(cls.service_offering2) os.killpg(cls.mvn_proc_grp, signal.SIGTERM) time.sleep(30) cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) disk_offering = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.volume8 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine5.id, type="ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create( cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def test_01_create_account(self): """Test Create Account and user for that account """ # Validate the following # 1. Create an Account. Verify the account is created. # 2. Create User associated with that account. Verify the created user # Create an account account = Account.create( self.apiclient, self.services["account"] ) self.debug("Created account: %s" % account.name) self._cleanup.append(account) list_accounts_response = list_accounts( self.apiclient, id=account.id ) self.assertEqual( isinstance(list_accounts_response, list), True, "Check list accounts for valid data" ) self.assertNotEqual( len(list_accounts_response), 0, "Check List Account response" ) account_response = list_accounts_response[0] self.assertEqual( account.accounttype, account_response.accounttype, "Check Account Type of Created account" ) self.assertEqual( account.name, account_response.name, "Check Account Name of Created account" ) # Create an User associated with account user = User.create( self.apiclient, self.services["user"], account=account.name, domainid=account.domainid ) self.debug("Created user: %s" % user.id) list_users_response = list_users( self.apiclient, id=user.id ) self.assertEqual( isinstance(list_users_response, list), True, "Check list users for valid data" ) self.assertNotEqual( len(list_users_response), 0, "Check List User response" ) user_response = list_users_response[0] self.assertEqual( user.username, user_response.username, "Check username of Created user" ) self.assertEqual( user.state, user_response.state, "Check state of created user" ) return
def setUpClass(cls): # Set up API client testclient = super(TestVolumes, cls).getClsTestClient() cls.apiClient = testclient.getApiClient() cls.dbConnection = testclient.getDbConnection() cls.services = testclient.getParsedTestDataConfig() # Setup test data td = TestData() cls.testdata = td.testdata # Get Resources from Cloud Infrastructure cls.domain = get_domain(cls.apiClient) cls.zone = get_zone(cls.apiClient, testclient.getZoneForTests()) cls.cluster = list_clusters(cls.apiClient)[0] cls.hypervisor = get_hypervisor_type(cls.apiClient) cls.template = get_template( cls.apiClient, cls.zone.id, account = "system" ) primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools( cls.apiClient, name = primarystorage2.get("name") ) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering( cls.apiClient, name="Small" ) assert disk_offering is not None service_offering = list_service_offering( cls.apiClient, name="cloud-test-dev-1" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiClient, serviceOffering) service_offering_only = list_service_offering( cls.apiClient, name="cloud-test-dev-2" ) if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiClient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts( cls.apiClient, name="admin" ) cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create( cls.apiClient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine = VirtualMachine.create( cls.apiClient, {"name":"StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10 ) # Resources that are to be destroyed cls._cleanup = [ cls.virtual_machine, cls.volume ]
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) storage_pool2 = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage = storage_pool[0] cls.primary_storage2 = storage_pool2[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering(cls.apiclient, name="ssd2") if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None nfs_service_offerings = { "name": "nfs", "displaytext": "NFS service offerings", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "nfs" } nfs_storage_pool = list_storage_pools(cls.apiclient, name='primary') nfs_service_offering = list_service_offering(cls.apiclient, name='nfs') if nfs_service_offering is None: nfs_service_offering = ServiceOffering.create( cls.apiclient, nfs_service_offerings) else: nfs_service_offering = nfs_service_offering[0] cls.nfs_service_offering = nfs_service_offering cls.nfs_storage_pool = nfs_storage_pool[0] cls.nfs_storage_pool = StoragePool.cancelMaintenance( cls.apiclient, cls.nfs_storage_pool.id) cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.nfs_service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering2.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.primary_storage = StoragePool.update(cls.apiclient, id=cls.primary_storage.id, tags=["ssd, nfs, ssd2"]) cls.primary_storage2 = StoragePool.update(cls.apiclient, id=cls.primary_storage2.id, tags=["ssd, ssd2"]) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests") cls.primary_storage = list_storage_pools( cls.apiclient, name=primarystorage.get("name"))[0] cls.primary_storage2 = list_storage_pools( cls.apiclient, name=primarystorage2.get("name"))[0]
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False # Setup test data td = TestData() cls.testdata = td.testdata cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported template = get_template(cls.apiclient, cls.zone.id, account="system") import pprint cls.debug(pprint.pformat(template)) cls.debug(pprint.pformat(cls.hypervisor)) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.template = template primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOfferingOnly = cls.testdata[TestData.serviceOfferingOnly] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage2.get("name")) cls.primary_storage2 = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None #=============================================================================== # # service_offering = list_service_offering( # cls.apiclient, # name="tags" # ) # if service_offering is not None: # cls.service_offering = service_offering[0] # else: # cls.service_offering = ServiceOffering.create( # cls.apiclient, # serviceOffering) #=============================================================================== service_offering_only = list_service_offering(cls.apiclient, name="cloud-test-dev-2") if service_offering_only is not None: cls.service_offering_only = service_offering_only[0] else: cls.service_offering_only = ServiceOffering.create( cls.apiclient, serviceOfferingOnly) assert cls.service_offering_only is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] # Create 1 data volume_1 cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%d" % random.randint(0, 100)}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering_only.id, hypervisor=cls.hypervisor, rootdisksize=10) # Resources that are to be destroyed cls._cleanup = [cls.virtual_machine, cls.volume] cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def setUpCloudStack(cls): super(MigrationUuidToGlobalIdLiveMigration, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") cls.disk_offering_20 = disk_offering_20[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.local_cluster = cls.helper.get_local_cluster() cls.host = cls.helper.list_hosts_by_cluster_id(cls.local_cluster.id) assert len(cls.host) > 1, "Hosts list is less than 1" cls.host_on_local_1 = cls.host[0] cls.host_on_local_2 = cls.host[1] cls.remote_cluster = cls.helper.get_remote_cluster() cls.host_remote = cls.helper.list_hosts_by_cluster_id( cls.remote_cluster.id) assert len(cls.host_remote) > 1, "Hosts list is less than 1" cls.host_on_remote1 = cls.host_remote[0] cls.host_on_remote2 = cls.host_remote[1] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_local_1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) #vm and volume on remote cls.virtual_machine_remote = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, hostid=cls.host_on_remote1.id, rootdisksize=10) cls._cleanup.append(cls.virtual_machine_remote) cls.volume_remote = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume_remote) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute("select * from configuration where name='sp.migration.to.global.ids.completed'") cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute("update configuration set value='false' where name='sp.migration.to.global.ids.completed'") cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None service_offering = list_service_offering( cls.apiclient, name="ssd" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering( cls.apiclient, name="ssd2" ) if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts( cls.apiclient, name="admin" ) cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume(cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid = cls.virtual_machine5.id, type = "ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.helper.bypass_secondary(True) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): testClient = super(TestVMLifeCycle, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() cls.hypervisor = get_hypervisor_type(cls.apiclient) # Get Zone, Domain and templates domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.domain = domain template = get_template( apiclient=cls.apiclient, zone_id=cls.zone.id, account = "system" ) if template == FAILED: assert False, "get_template() failed to return template with description %s" % cls.services["ostype"] cls.template = template # Set Zones and disk offerings cls.services["small"]["zoneid"] = cls.zone.id cls.services["small"]["template"] = template.id cls.services["iso1"]["zoneid"] = cls.zone.id # Create VMs, NAT Rules etc cls.account = list_accounts( cls.apiclient, name="admin" )[0] cls.small_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["small"], tags="cloud-test-dev-1" ) cls.medium_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["medium"], tags="cloud-test-dev-1" ) #create small and large virtual machines cls.small_virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.small_offering.id, ) cls.medium_virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.medium_offering.id, ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.small_offering.id, ) cls._cleanup = [ cls.small_offering, cls.medium_offering, cls.medium_virtual_machine, cls.virtual_machine ]
def setUpClass(cls): testClient = super(TestDeployVM, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) cls.services['mode'] = cls.zone.networktype cls.hypervisor = get_hypervisor_type(cls.apiclient) # Setup test data td = TestData() cls.testdata = td.testdata # OS template template = cls.testdata[TestData.template] #StorPool primary storages primarystorage = cls.testdata[TestData.primaryStorage] primarystorage2 = cls.testdata[TestData.primaryStorage2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) if storage_pool is None: cls.primary_storage = StoragePool.create( cls.apiclient, primarystorage, scope=primarystorage[TestData.scope], zoneid=cls.zone.id, provider=primarystorage[TestData.provider], tags=primarystorage[TestData.tags], capacityiops=primarystorage[TestData.capacityIops], capacitybytes=primarystorage[TestData.capacityBytes], hypervisor=primarystorage[TestData.hypervisor] ) else: cls.primary_storage = storage_pool[0] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage2.get("name") ) if storage_pool is None: cls.primary_storage2 = StoragePool.create( cls.apiclient, primarystorage2, scope=primarystorage2[TestData.scope], zoneid=cls.zone.id, provider=primarystorage2[TestData.provider], tags=primarystorage2[TestData.tags], capacityiops=primarystorage2[TestData.capacityIops], capacitybytes=primarystorage2[TestData.capacityBytes], hypervisor=primarystorage2[TestData.hypervisor] ) else: cls.primary_storage2 = storage_pool[0] cls.debug(primarystorage) cls.debug(primarystorage2) os_template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if os_template == FAILED: cls.template = Template.register( cls.apiclient, template, cls.zone.id, randomize_name = False ) else: cls.template = os_template cls.debug(template) # Set Zones and disk offerings cls.services["small"]["zoneid"] = cls.zone.id cls.services["small"]["template"] = cls.template.id cls.services["iso1"]["zoneid"] = cls.zone.id cls.account = list_accounts( cls.apiclient, name="admin" )[0] cls.debug(cls.account.id) service_offering_only = list_service_offering( cls.apiclient, name="cloud-test-dev-1" ) cls.service_offering = service_offering_only[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.services['mode'] ) cls.cleanup = [ cls.virtual_machine ]
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalId, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGTERM) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] storage_pool = list_storage_pools(cls.apiclient, name=primarystorage.get("name")) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering(cls.apiclient, name="Small") assert disk_offering is not None service_offering = list_service_offering(cls.apiclient, name="ssd") if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None cls.disk_offering = disk_offering[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) #check that ROOT disk is created with uuid root_volume = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine3.id, type="ROOT") try: spvolume = cls.spapi.volumeList(volumeName=root_volume[0].id) except spapi.ApiError as err: cfg.logger.info("Root volume is not created with UUID") raise Exception(err) cls.volume = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume) cls.random_data_vm_snapshot1 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" volume_attached = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume) cls.helper.write_on_disks(cls.random_data_vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) MemorySnapshot = False cls.vm_snapshot1 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot1, cls.virtual_machine, cls.test_dir, cls.random_data) cls.random_data_vm_snapshot2 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot2 = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot(cls.vm_snapshot2, cls.virtual_machine, cls.test_dir, cls.random_data) #vm snapshot to be deleted without revert cls.random_data_vm_snapshot3 = random_gen(size=100) cls.helper.write_on_disks(cls.random_data_vm_snapshot3, cls.virtual_machine, cls.test_dir, cls.random_data) cls.vm_snapshot_for_delete = cls.helper.create_vm_snapshot( MemorySnapshot, cls.virtual_machine) cls.helper.delete_random_data_after_vmsnpashot( cls.vm_snapshot_for_delete, cls.virtual_machine, cls.test_dir, cls.random_data) cls.snapshot_on_secondary = cls.helper.create_snapshot( False, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_on_secondary) cls.template_on_secondary = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_on_secondary.id) cls._cleanup.append(cls.template_on_secondary) cls.snapshot_bypassed = cls.helper.create_snapshot( True, cls.virtual_machine2) cls._cleanup.append(cls.snapshot_bypassed) cls.template_bypased = cls.helper.create_template_from_snapshot( cls.services, snapshotid=cls.snapshot_bypassed.id) cls._cleanup.append(cls.template_bypased) #change to latest commit with globalId implementation cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def setUpClass(cls): cls.spapi = spapi.Api.fromConfig(multiCluster=True) testClient = super(TestStoragePool, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls.userapiclient = testClient.getUserApiClient( UserName="******", DomainName="ROOT") cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in ("hyperv", "lxc"): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = None zones = list_zones(cls.apiclient) for z in zones: if z.internaldns1 == cls.getClsConfig().mgtSvr[0].mgtSvrIp: cls.zone = z storpool_primary_storage = { "name": "ssd", "zoneid": cls.zone.id, "url": "ssd", "scope": "zone", "capacitybytes": 4500000, "capacityiops": 155466464221111121, "hypervisor": "kvm", "provider": "StorPool", "tags": "ssd" } storpool_service_offerings = { "name": "ssd", "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": "ssd" } storage_pool = list_storage_pools(cls.apiclient, name='ssd') service_offerings = list_service_offering(cls.apiclient, name='ssd') disk_offerings = list_disk_offering(cls.apiclient, name="Small") cls.disk_offerings = disk_offerings[0] if storage_pool is None: storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.storage_pool = storage_pool cls.debug(pprint.pformat(storage_pool)) if service_offerings is None: service_offerings = ServiceOffering.create( cls.apiclient, storpool_service_offerings) else: service_offerings = service_offerings[0] template = get_template(cls.apiclient, cls.zone.id, account="system") if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id cls.service_offering = service_offerings user = list_users(cls.apiclient, account='StorPoolUser', domainid=cls.domain.id) account = list_accounts(cls.apiclient, id=user[0].accountid) if account is None: role = Role.list(cls.apiclient, name='User') cmd = createAccount.createAccountCmd() cmd.email = '*****@*****.**' cmd.firstname = 'StorPoolUser' cmd.lastname = 'StorPoolUser' cmd.password = '******' cmd.username = '******' cmd.roleid = role[0].id account = cls.apiclient.createAccount(cmd) else: account = account[0] cls.account = account # cls.tmp_files = [] # cls.keypair = SSHKeyPair.create( # cls.apiclient, # name=random_gen() + ".pem", # account=cls.account.name, # domainid=cls.account.domainid) # # keyPairFilePath = tempfile.gettempdir() + os.sep + cls.keypair.name # # Clenaup at end of execution # cls.tmp_files.append(keyPairFilePath) # # cls.debug("File path: %s" % keyPairFilePath) # # f = open(keyPairFilePath, "w+") # f.write(cls.keypair.privatekey) # f.close() # # os.system("chmod 400 " + keyPairFilePath) # # cls.keyPairFilePath = keyPairFilePath cls.volume_1 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-1"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume_2 = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-2"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.volume = Volume.create( cls.userapiclient, {"diskname": "StorPoolDisk-3"}, zoneid=cls.zone.id, diskofferingid=cls.disk_offerings.id, ) cls.virtual_machine = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.virtual_machine2 = VirtualMachine.create( cls.userapiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10, ) cls.template = template cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [] cls._cleanup.append(cls.virtual_machine) cls._cleanup.append(cls.virtual_machine2) cls._cleanup.append(cls.volume_1) cls._cleanup.append(cls.volume_2) cls._cleanup.append(cls.volume) return