def test_10_attach_detach_instances_with_glId(self): volume = Volume.create( self.apiclient, {"diskname":"StorPoolDisk-GlId-%d" % random.randint(0, 100) }, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) vm = VirtualMachine.create( self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) vm.attach_volume(self.apiclient, volume) list = list_volumes(self.apiclient,virtualmachineid = vm.id, id = volume.id) list_root = list_volumes(self.apiclient,virtualmachineid = vm.id, type = "ROOT") self.assertIsNotNone(list, "Volume was not attached") self.assertIsNotNone(list_root, "ROOT volume is missing") self.helper.storpool_volume_globalid(list[0]) self.helper.storpool_volume_globalid(list_root[0]) vm.stop(self.apiclient, forced=True) detached = vm.detach_volume(self.apiclient, list[0]) self.assertIsNone(detached.virtualmachineid, "Volume was not detached from vm") Volume.delete(volume, self.apiclient) vm.delete(self.apiclient, expunge=True)
def test_11_volume_from_snapshot_with_uuid(self): #snapshot_uuid1 self.helper.storpool_snapshot_uuid(self.snapshot_uuid1) volume = self.helper.create_volume(zoneid = self.zone.id, snapshotid = self.snapshot_uuid1.id) self.assertIsNotNone(volume, "Could not create volume from snapshot") self.helper.storpool_volume_globalid(volume) Volume.delete(volume, self.apiclient)
def test_06_resize_detach_vol_globalid(self): volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine2.attach_volume(self.apiclient, volume) self.virtual_machine2.detach_volume(self.apiclient, volume) listvol = Volume.list(self.apiclient, id=volume.id) self.helper.resizing_volume(listvol[0], globalid=True) Volume.delete(volume, self.apiclient)
def test_09_attach_detach_vol_glId(self): volume = Volume.create(self.apiclient, {"diskname":"StorPoolDisk-GlId-%d" % random.randint(0, 100) }, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine3.attach_volume(self.apiclient, volume) list = list_volumes(self.apiclient,virtualmachineid = self.virtual_machine3.id, id = volume.id) self.assertIsNotNone(list, "Volume was not attached") self.helper.storpool_volume_globalid(list[0]) self.virtual_machine3.stop(self.apiclient, forced=True) detached = self.virtual_machine3.detach_volume(self.apiclient, list[0]) self.assertIsNone(detached.virtualmachineid, "Volume was not detached from vm") Volume.delete(volume, self.apiclient)
def test_03_resize_attached_volume_globalid(self): self.assertEqual(VirtualMachine.RUNNING, self.virtual_machine2.state, "Running") volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-GlId-%d" % random.randint(0, 100)}, zoneid=self.zone.id, diskofferingid=self.disk_offering.id, ) self.virtual_machine2.attach_volume(self.apiclient, volume) listvol = Volume.list(self.apiclient, virtualmachineid=self.virtual_machine2.id, id=volume.id) self.helper.resizing_volume(listvol[0], globalid=True) self.virtual_machine2.detach_volume(self.apiclient, volume) Volume.delete(volume, self.apiclient)
def test_02_create_volume_on_new_primary_storage(self): ''' Test create Virtual machine on new StorPool's primary storage ''' volume = Volume.create( self.apiclient, {"diskname": "StorPoolDisk-1"}, zoneid=self.zone.id, diskofferingid=self.sp_disk_offering.id, ) virtual_machine = VirtualMachine.create( self.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=self.zone.id, templateid=self.template.id, serviceofferingid=self.serviceOfferings.id, hypervisor=self.hypervisor, rootdisksize=10) virtual_machine.attach_volume(self.apiclient, volume) volumes = list_volumes( self.apiclient, virtualmachineid=virtual_machine.id, ) for vol in volumes: name = vol.path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) if spvolume[0].templateName != self.template_name: raise Exception( "Storpool volume's template %s is not with the same template %s" % (spvolume[0].templateName, self.template_name)) except spapi.ApiError as err: raise Exception(err) virtual_machine.stop(self.apiclient, forced=True) virtual_machine.detach_volume(self.apiclient, volume) Volume.delete(volume, self.apiclient) virtual_machine.delete(self.apiclient, expunge=True)
def test_06_create_volume_from_non_native_snapshot(self): old_supports_resign = self.supports_resign self._set_supports_resign(False) primary_storage_db_id = self._get_cs_storage_pool_db_id( self.primary_storage) virtual_machine = VirtualMachine.create( self.apiClient, self.testdata[TestData.virtualMachine], accountid=self.account.name, zoneid=self.zone.id, serviceofferingid=self.compute_offering.id, templateid=self.template.id, domainid=self.domain.id, startvm=True) self.assertEqual(virtual_machine.state.lower(), "running", TestSnapshots._vm_not_in_running_state_err_msg) list_volumes_response = list_volumes( self.apiClient, virtualmachineid=virtual_machine.id, listall=True) self._check_list( list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) vm_1_root_volume = list_volumes_response[0] dt_volume_1 = self._get_dt_volume_for_cs_volume(vm_1_root_volume) self.assertNotEqual(dt_volume_1, None, TestSnapshots._should_be_a_valid_volume_err) vol_snap_a = self._create_and_test_non_native_snapshot( vm_1_root_volume.id, primary_storage_db_id, 1, TestSnapshots._should_only_be_one_snapshot_in_list_err_msg) services = { "diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "size": 100, "ispublic": True } volume_created_from_snapshot = Volume.create_from_snapshot( self.apiClient, vol_snap_a.id, services, account=self.account.name, domainid=self.domain.id) dt_snapshot_volume = self._get_dt_volume_for_cs_volume( volume_created_from_snapshot) self.assertNotEqual(dt_snapshot_volume, None, TestSnapshots._should_be_a_valid_volume_err) volume_created_from_snapshot = virtual_machine.attach_volume( self.apiClient, volume_created_from_snapshot) self._delete_and_test_non_native_snapshot(vol_snap_a) virtual_machine.delete(self.apiClient, True) list_volumes_response = list_volumes(self.apiClient, listall=True) self._check_list( list_volumes_response, 1, TestSnapshots._should_only_be_one_volume_in_list_err_msg) data_volume = list_volumes_response[0] data_volume_2 = Volume(data_volume.__dict__) data_volume_2.delete(self.apiClient) self._get_dt_volume_for_cs_volume(data_volume, should_exist=False) self._set_supports_resign(old_supports_resign)
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) with open(cls.ARGS.cfg) as json_text: cfg.logger.info(cls.ARGS.cfg) cfg.logger.info(json_text) conf = json.load(json_text) cfg.logger.info(conf) zone = conf['mgtSvr'][0].get('zone') cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute( "select * from configuration where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute( "update configuration set value='false' where name='sp.migration.to.global.ids.completed'" ) cfg.logger.info("DB configuration table was updated %s", update) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, zone_name=zone) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) cls.host = list_hosts(cls.apiclient, zoneid=cls.zone.id) #The version of CentOS has to be supported cls.template = get_template(cls.apiclient, cls.zone.id, account="system") if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id cls.sp_template_1 = "-".join(["test-ssd-b", random_gen()]) cfg.logger.info( pprint.pformat("############################ %s" % cls.zone)) storpool_primary_storage = { "name": cls.sp_template_1, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_1, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 155466, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_1 } cls.storpool_primary_storage = storpool_primary_storage host, port, auth = cls.getCfgFromUrl( url=storpool_primary_storage["url"]) cls.spapi = spapi.Api(host=host, port=port, auth=auth) storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage) else: storage_pool = storage_pool[0] cls.primary_storage = storage_pool storpool_service_offerings_ssd = { "name": cls.sp_template_1, "displaytext": "SP_CO_2 (Min IOPS = 10,000; Max IOPS = 15,000)", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "hypervisorsnapshotreserve": 200, "tags": cls.sp_template_1 } service_offerings_ssd = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd["name"]) if service_offerings_ssd is None: service_offerings_ssd = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd) else: service_offerings_ssd = service_offerings_ssd[0] cls.service_offering = service_offerings_ssd cls._cleanup.append(cls.service_offering) cfg.logger.info(pprint.pformat(cls.service_offering)) cls.sp_template_2 = "-".join(["test-ssd2-b", random_gen()]) storpool_primary_storage2 = { "name": cls.sp_template_2, "zoneid": cls.zone.id, "url": "SP_API_HTTP=10.2.87.30:81;SP_AUTH_TOKEN=1234567890;SP_TEMPLATE=%s" % cls.sp_template_2, "scope": "zone", "capacitybytes": 564325555333, "capacityiops": 1554, "hypervisor": "kvm", "provider": "StorPool", "tags": cls.sp_template_2 } cls.storpool_primary_storage2 = storpool_primary_storage2 storage_pool = list_storage_pools( cls.apiclient, name=storpool_primary_storage2["name"]) if storage_pool is None: newTemplate = sptypes.VolumeTemplateCreateDesc( name=storpool_primary_storage2["name"], placeAll="ssd", placeTail="ssd", placeHead="ssd", replication=1) template_on_local = cls.spapi.volumeTemplateCreate(newTemplate) storage_pool = StoragePool.create(cls.apiclient, storpool_primary_storage2) else: storage_pool = storage_pool[0] cls.primary_storage2 = storage_pool storpool_service_offerings_ssd2 = { "name": cls.sp_template_2, "displaytext": "SP_CO_2", "cpunumber": 1, "cpuspeed": 500, "memory": 512, "storagetype": "shared", "customizediops": False, "tags": cls.sp_template_2 } service_offerings_ssd2 = list_service_offering( cls.apiclient, name=storpool_service_offerings_ssd2["name"]) if service_offerings_ssd2 is None: service_offerings_ssd2 = ServiceOffering.create( cls.apiclient, storpool_service_offerings_ssd2) else: service_offerings_ssd2 = service_offerings_ssd2[0] cls.service_offering2 = service_offerings_ssd2 cls._cleanup.append(cls.service_offering2) os.killpg(cls.mvn_proc_grp, signal.SIGTERM) time.sleep(30) cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) disk_offering = list_disk_offering(cls.apiclient, name="Small") disk_offering_20 = list_disk_offering(cls.apiclient, name="Medium") disk_offering_100 = list_disk_offering(cls.apiclient, name="Large") assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts(cls.apiclient, name="admin") cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name": "StorPool-%s" % uuid.uuid4()}, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.volume8 = Volume.create(cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume( cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id=cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume8) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid=cls.virtual_machine5.id, type="ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create( cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id=cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id=list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def test_08_delete_snapshot_of_deleted_volume(self): ''' Delete snapshot and template if volume is already deleted, bypassing secondary ''' Configurations.update(self.apiclient, name = "sp.bypass.secondary.storage", value = "true") volume = Volume.create( self.apiclient, {"diskname":"StorPoolDisk-Delete" }, zoneid = self.zone.id, diskofferingid = self.disk_offerings.id, account=self.account.name, domainid=self.account.domainid, ) delete = volume self.virtual_machine2.attach_volume( self.apiclient, volume ) self.virtual_machine2.detach_volume( self.apiclient, volume ) volume = list_volumes(self.apiclient, id = volume.id, listall = True,) name = volume[0].path.split("/")[3] try: spvolume = self.spapi.volumeList(volumeName="~" + name) except spapi.ApiError as err: raise Exception(err) snapshot = Snapshot.create( self.apiclient, volume_id = volume[0].id ) try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) if snapshot_details is not None: flag = False for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] try: sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) self.debug('################ %s' % sp_snapshot) flag = True except spapi.ApiError as err: raise Exception(err) if flag == False: raise Exception("Could not find snapshot in snapshot details") except Exception as err: raise Exception(err) template = self.helper.create_template_from_snapshot(self.apiclient, self.services, snapshotid = snapshot.id) Volume.delete(delete, self.apiclient, ) Snapshot.delete(snapshot, self.apiclient) flag = False try: cmd = getVolumeSnapshotDetails.getVolumeSnapshotDetailsCmd() cmd.snapshotid = snapshot.id snapshot_details = self.apiclient.getVolumeSnapshotDetails(cmd) if snapshot_details is not None: try: for s in snapshot_details: if s["snapshotDetailsName"] == snapshot.id: name = s["snapshotDetailsValue"].split("/")[3] sp_snapshot = self.spapi.snapshotList(snapshotName = "~" + name) self.debug('################ The snapshot had to be deleted %s' % sp_snapshot) flag = True except spapi.ApiError as err: flag = False if flag is True: raise Exception("Snapshot was not deleted") except Exception as err: self.debug('Snapshot was deleted %s' % err) Template.delete(template, self.apiclient, zoneid = self.zone.id)
def setUpCloudStack(cls): super(TestMigrationFromUuidToGlobalIdVolumes, cls).setUpClass() cls._cleanup = [] cls.helper = HelperUtil(cls) cls.helper.build_commit(cls.ARGS.uuid, cls.ARGS) cfg.logger.info("Starting CloudStack") cls.mvn_proc = subprocess.Popen( ['mvn', '-pl', ':cloud-client-ui', 'jetty:run'], cwd=cls.ARGS.forked, preexec_fn=os.setsid, stdout=cfg.misc, stderr=subprocess.STDOUT, ) cls.mvn_proc_grp = os.getpgid(cls.mvn_proc.pid) cfg.logger.info("Started CloudStack in process group %d", cls.mvn_proc_grp) cfg.logger.info("Waiting for a while to give it a chance to start") proc = subprocess.Popen(["tail", "-f", cfg.misc_name], shell=False, bufsize=0, stdout=subprocess.PIPE) while True: line = proc.stdout.readline() if not line: cfg.logger.info("tail ended, was this expected?") cfg.logger.info("Stopping CloudStack") os.killpg(cls.mvn_proc_grp, signal.SIGINT) break if "[INFO] Started Jetty Server" in line: cfg.logger.info("got it!") break proc.terminate() proc.wait() time.sleep(15) cfg.logger.info("Processing with the setup") cls.obj_marvininit = cls.helper.marvin_init(cls.ARGS.cfg) cls.testClient = cls.obj_marvininit.getTestClient() cls.apiclient = cls.testClient.getApiClient() dbclient = cls.testClient.getDbConnection() v = dbclient.execute("select * from configuration where name='sp.migration.to.global.ids.completed'") cfg.logger.info("Configuration setting for update of db is %s", v) if len(v) > 0: update = dbclient.execute("update configuration set value='false' where name='sp.migration.to.global.ids.completed'") cfg.logger.info("DB configuration table was updated %s", update) cls.spapi = spapi.Api.fromConfig(multiCluster=True) td = TestData() cls.testdata = td.testdata cls.services = cls.testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.cluster = list_clusters(cls.apiclient)[0] cls.hypervisor = get_hypervisor_type(cls.apiclient) #The version of CentOS has to be supported cls.template = get_template( cls.apiclient, cls.zone.id, account = "system" ) if cls.template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = cls.template.ostypeid cls.services["zoneid"] = cls.zone.id primarystorage = cls.testdata[TestData.primaryStorage] serviceOffering = cls.testdata[TestData.serviceOffering] serviceOffering2 = cls.testdata[TestData.serviceOfferingssd2] storage_pool = list_storage_pools( cls.apiclient, name = primarystorage.get("name") ) cls.primary_storage = storage_pool[0] disk_offering = list_disk_offering( cls.apiclient, name="Small" ) disk_offering_20 = list_disk_offering( cls.apiclient, name="Medium" ) disk_offering_100 = list_disk_offering( cls.apiclient, name="Large" ) assert disk_offering is not None assert disk_offering_20 is not None assert disk_offering_100 is not None service_offering = list_service_offering( cls.apiclient, name="ssd" ) if service_offering is not None: cls.service_offering = service_offering[0] else: cls.service_offering = ServiceOffering.create( cls.apiclient, serviceOffering) assert cls.service_offering is not None service_offering2 = list_service_offering( cls.apiclient, name="ssd2" ) if service_offering2 is not None: cls.service_offering2 = service_offering2[0] else: cls.service_offering2 = ServiceOffering.create( cls.apiclient, serviceOffering2) assert cls.service_offering2 is not None cls.disk_offering = disk_offering[0] cls.disk_offering_20 = disk_offering_20[0] cls.disk_offering_100 = disk_offering_100[0] account = list_accounts( cls.apiclient, name="admin" ) cls.account = account[0] cls.virtual_machine = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine) cls.virtual_machine2 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine2) cls.virtual_machine3 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine3) cls.virtual_machine4 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine4) cls.virtual_machine5 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine5) cls.virtual_machine6 = VirtualMachine.create( cls.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=cls.zone.id, templateid=cls.template.id, serviceofferingid=cls.service_offering.id, hypervisor=cls.hypervisor, rootdisksize=10 ) cls._cleanup.append(cls.virtual_machine6) cls.volume1 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_1], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume1) cls.volume2 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_2], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume2) cls.volume3 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_3], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume3) cls.volume4 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_4], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume4) cls.volume5 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_5], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume5) cls.volume6 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_6], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls._cleanup.append(cls.volume6) cls.volume7 = Volume.create( cls.apiclient, cls.testdata[TestData.volume_7], account=cls.account.name, domainid=cls.domain.id, zoneid=cls.zone.id, diskofferingid=cls.disk_offering.id ) cls.virtual_machine.stop(cls.apiclient, forced=True) cls.volume_on_sp_1 = cls.virtual_machine.attach_volume(cls.apiclient, cls.volume1) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume3) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume3) vol = list_volumes(cls.apiclient, id = cls.volume3.id) cls.volume_on_sp_3 = vol[0] cls.virtual_machine.attach_volume(cls.apiclient, cls.volume2) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume2) cls.virtual_machine3.attach_volume(cls.apiclient, cls.volume4) cls.virtual_machine3.detach_volume(cls.apiclient, cls.volume4) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume5) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume6) cls.virtual_machine.attach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.detach_volume(cls.apiclient, cls.volume7) cls.virtual_machine.start(cls.apiclient) list_root = list_volumes(cls.apiclient, virtualmachineid = cls.virtual_machine5.id, type = "ROOT") cls.snapshot_uuid1 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid1) cls.snapshot_uuid2 = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid2) #Snapshot on secondary cls.helper.bypass_secondary(False) cls.snapshot_uuid_on_secondary = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_on_secondary) cls.snapshot_uuid3 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid3) cls.snapshot_uuid4 = Snapshot.create(cls.apiclient, volume_id = cls.volume7.id) cls._cleanup.append(cls.snapshot_uuid4) cls.helper.bypass_secondary(True) cls.snapshot_uuid_bypassed = Snapshot.create(cls.apiclient, volume_id = list_root[0].id) cls._cleanup.append(cls.snapshot_uuid_bypassed) Volume.delete(cls.volume7, cls.apiclient) cls.helper.switch_to_globalid_commit(cls.ARGS.globalid, cls.ARGS) cfg.logger.info("The setup is done, proceeding with the tests")
def test_01_upload_and_download_snapshot(self): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=self.virtual_machine.id, listall=True) sf_util.check_list(list_volumes_response, 1, self, "There should only be one volume in this list.") vm_root_volume = list_volumes_response[0] ### Perform tests related to uploading a QCOW2 file to secondary storage and then moving it to managed storage volume_name = "Volume-A" services = {"format": TestData.file_type, "diskname": volume_name} uploaded_volume = Volume.upload(self.apiClient, services, self.zone.id, account=self.account.name, domainid=self.account.domainid, url=TestData.volume_url, diskofferingid=self.disk_offering.id) self._wait_for_volume_state(uploaded_volume.id, "Uploaded") uploaded_volume_id = sf_util.get_cs_volume_db_id( self.dbConnection, uploaded_volume) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual(len(result), 1, TestUploadDownload.assertText) install_path = self._get_install_path( result[0][TestData.install_path_index]) self._verify_uploaded_volume_present(install_path) uploaded_volume = self.virtual_machine.attach_volume( self.apiClient, uploaded_volume) uploaded_volume = sf_util.check_and_get_cs_volume( self, uploaded_volume.id, volume_name, self) sf_account_id = sf_util.get_sf_account_id( self.cs_api, self.account.id, self.primary_storage.id, self, "The SolidFire account ID should be a non-zero integer.") sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) self.assertNotEqual( len(sf_volumes), 0, "The length of the response for the SolidFire-volume query should not be zero." ) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, uploaded_volume.name, self) sf_volume_size = sf_util.get_volume_size_with_hsr( self.cs_api, uploaded_volume, self) sf_util.check_size_and_iops(sf_volume, uploaded_volume, sf_volume_size, self) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_util.check_vag(sf_volume, sf_vag_id, self) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual(len(result), 0, TestUploadDownload.assertText2) self._verify_uploaded_volume_not_present(install_path) ### Perform tests related to extracting the contents of a volume on managed storage to a QCOW2 file ### and downloading the file try: # for data disk Volume.extract(self.apiClient, uploaded_volume.id, self.zone.id, TestData.download_mode) raise Exception( "The volume extraction (for the data disk) did not fail (as expected)." ) except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise vm_root_volume_id = sf_util.get_cs_volume_db_id( self.dbConnection, vm_root_volume) try: # for root disk Volume.extract(self.apiClient, vm_root_volume.id, self.zone.id, TestData.download_mode) raise Exception( "The volume extraction (for the root disk) did not fail (as expected)." ) except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise self.virtual_machine.stop(self.apiClient) self._extract_volume_and_verify( uploaded_volume_id, "Unable to locate the extracted file for the data disk (attached)") result = self._get_volume_store_ref_row(vm_root_volume_id) self.assertEqual(len(result), 0, TestUploadDownload.assertText2) self._extract_volume_and_verify( vm_root_volume_id, "Unable to locate the extracted file for the root disk") uploaded_volume = self.virtual_machine.detach_volume( self.apiClient, uploaded_volume) self._extract_volume_and_verify( uploaded_volume_id, "Unable to locate the extracted file for the data disk (detached)") uploaded_volume = Volume(uploaded_volume.__dict__) uploaded_volume.delete(self.apiClient)
def test_01_upload_and_download_snapshot(self): list_volumes_response = list_volumes( self.apiClient, virtualmachineid=self.virtual_machine.id, listall=True ) sf_util.check_list(list_volumes_response, 1, self, "There should only be one volume in this list.") vm_root_volume = list_volumes_response[0] ### Perform tests related to uploading a QCOW2 file to secondary storage and then moving it to managed storage volume_name = "Volume-A" services = {"format": TestData.file_type, "diskname": volume_name} uploaded_volume = Volume.upload(self.apiClient, services, self.zone.id, account=self.account.name, domainid=self.account.domainid, url=TestData.volume_url, diskofferingid=self.disk_offering.id) self._wait_for_volume_state(uploaded_volume.id, "Uploaded") uploaded_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, uploaded_volume) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual( len(result), 1, TestUploadDownload.assertText ) install_path = self._get_install_path(result[0][TestData.install_path_index]) self._verify_uploaded_volume_present(install_path) uploaded_volume = self.virtual_machine.attach_volume( self.apiClient, uploaded_volume ) uploaded_volume = sf_util.check_and_get_cs_volume(self, uploaded_volume.id, volume_name, self) sf_account_id = sf_util.get_sf_account_id(self.cs_api, self.account.id, self.primary_storage.id, self, "The SolidFire account ID should be a non-zero integer.") sf_volumes = sf_util.get_active_sf_volumes(self.sfe, sf_account_id) self.assertNotEqual( len(sf_volumes), 0, "The length of the response for the SolidFire-volume query should not be zero." ) sf_volume = sf_util.check_and_get_sf_volume(sf_volumes, uploaded_volume.name, self) sf_volume_size = sf_util.get_volume_size_with_hsr(self.cs_api, uploaded_volume, self) sf_util.check_size_and_iops(sf_volume, uploaded_volume, sf_volume_size, self) sf_vag_id = sf_util.get_vag_id(self.cs_api, self.cluster.id, self.primary_storage.id, self) sf_util.check_vag(sf_volume, sf_vag_id, self) result = self._get_volume_store_ref_row(uploaded_volume_id) self.assertEqual( len(result), 0, TestUploadDownload.assertText2 ) self._verify_uploaded_volume_not_present(install_path) ### Perform tests related to extracting the contents of a volume on managed storage to a QCOW2 file ### and downloading the file try: # for data disk Volume.extract(self.apiClient, uploaded_volume.id, self.zone.id, TestData.download_mode) raise Exception("The volume extraction (for the data disk) did not fail (as expected).") except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise vm_root_volume_id = sf_util.get_cs_volume_db_id(self.dbConnection, vm_root_volume) try: # for root disk Volume.extract(self.apiClient, vm_root_volume.id, self.zone.id, TestData.download_mode) raise Exception("The volume extraction (for the root disk) did not fail (as expected).") except Exception as e: if TestUploadDownload.errorText in str(e): pass else: raise self.virtual_machine.stop(self.apiClient) self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (attached)") result = self._get_volume_store_ref_row(vm_root_volume_id) self.assertEqual( len(result), 0, TestUploadDownload.assertText2 ) self._extract_volume_and_verify(vm_root_volume_id, "Unable to locate the extracted file for the root disk") uploaded_volume = self.virtual_machine.detach_volume( self.apiClient, uploaded_volume ) self._extract_volume_and_verify(uploaded_volume_id, "Unable to locate the extracted file for the data disk (detached)") uploaded_volume = Volume(uploaded_volume.__dict__) uploaded_volume.delete(self.apiClient)