def test_14_create_vm_on_second_cluster_with_template_from_first(self): """ Create Virtual Machine On Working Cluster With Template Created on Another """ volume = Volume.list( self.apiclient, virtualmachineid = self.vm_cluster.id, type = "ROOT", listall= True ) snapshot = Snapshot.create( self.apiclient, volume[0].id, account=self.account.name, domainid=self.account.domainid, ) template = self.helper.create_template_from_snapshot( self.apiclient, self.services, snapshotid = snapshot.id ) cluster = Cluster.update( self.apiclient, id = self.local_cluster.id, allocationstate = "Disabled" ) virtual_machine = VirtualMachine.create(self.apiclient, {"name":"StorPool-%s" % uuid.uuid4() }, zoneid=self.zone.id, templateid=template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, hypervisor=self.hypervisor, rootdisksize=10 ) ssh_client = virtual_machine.get_ssh_client(reconnect=True) cluster = Cluster.update( self.apiclient, id = self.local_cluster.id, allocationstate = "Enabled" ) self._cleanup.append(template)
def update_cluster(apiclient, state, cluster_id, managed_state): """ Function to Enable/Disable cluster """ cluster_status = Cluster.update(apiclient, id=cluster_id, allocationstate=state, managedstate=managed_state) return cluster_status.managedstate, cluster_status.allocationstate
def update_cluster(self, state, cluster_id, managed_state): """ Function to Enable/Disable cluster """ cluster_status = Cluster.update( self.apiclient, id=cluster_id, allocationstate=state, managedstate=managed_state ) return cluster_status.managedstate,cluster_status.allocationstate
def cleanUpCloudStack(cls): try: clusters = Cluster.list(cls.apiclient, allocationstate="Disabled") if clusters is not None: for c in clusters: cluster = Cluster.update(cls.apiclient, id=c.id, allocationstate="Enabled") # Cleanup resources used cleanup_resources(cls.apiclient, cls._cleanup) except Exception as e: raise Exception("Warning: Exception during cleanup : %s" % e) return
def test_managed_clustered_filesystems_limit(self): args = { "id": self.testdata[TestData.clusterId2], TestData.allocationstate: "Disabled" } Cluster.update(self.apiClient, **args) virtual_machine_names = {"name": "TestVM1", "displayname": "Test VM 1"} virtual_machine_1 = self._create_vm(virtual_machine_names) list_volumes_response = list_volumes( self.apiClient, virtualmachineid=virtual_machine_1.id, listall=True) sf_util.check_list( list_volumes_response, 1, self, TestManagedClusteredFilesystems. _should_only_be_one_volume_in_list_err_msg) vm_1_root_volume = list_volumes_response[0] virtual_machine_names = {"name": "TestVM2", "displayname": "Test VM 2"} virtual_machine_2 = self._create_vm(virtual_machine_names) virtual_machine_names = {"name": "TestVM3", "displayname": "Test VM 3"} class VMStartedException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) try: # The VM should fail to be created as there should be an insufficient number of clustered filesystems # remaining in the compute cluster. self._create_vm(virtual_machine_names) raise VMStartedException("The VM should have failed to start.") except VMStartedException: raise except Exception: pass vol_snap = Snapshot.create(self.apiClient, volume_id=vm_1_root_volume.id) services = { "diskname": "Vol-1", "zoneid": self.testdata[TestData.zoneId], "ispublic": True } volume_created_from_snapshot_1 = Volume.create_from_snapshot( self.apiClient, vol_snap.id, services, account=self.account.name, domainid=self.domain.id) class VolumeAttachedException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs) try: # The volume should fail to be attached as there should be an insufficient number of clustered filesystems # remaining in the compute cluster. virtual_machine_2.attach_volume(self.apiClient, volume_created_from_snapshot_1) raise VolumeAttachedException( TestManagedClusteredFilesystems. _volume_should_have_failed_to_attach_to_vm) except VolumeAttachedException: raise except Exception: pass args = { "id": self.testdata[TestData.clusterId2], TestData.allocationstate: "Enabled" } Cluster.update(self.apiClient, **args) try: # The volume should fail to be attached as there should be an insufficient number of clustered filesystems # remaining in the compute cluster. virtual_machine_2.attach_volume(self.apiClient, volume_created_from_snapshot_1) raise VolumeAttachedException( TestManagedClusteredFilesystems. _volume_should_have_failed_to_attach_to_vm) except VolumeAttachedException: raise except Exception: pass virtual_machine_names = {"name": "TestVMA", "displayname": "Test VM A"} virtual_machine_a = self._create_vm(virtual_machine_names) host_for_vm_1 = list_hosts(self.apiClient, id=virtual_machine_1.hostid)[0] host_for_vm_a = list_hosts(self.apiClient, id=virtual_machine_a.hostid)[0] self.assertTrue(host_for_vm_1.clusterid != host_for_vm_a.clusterid, "VMs 1 and VM a should be in different clusters.") virtual_machine_1.delete(self.apiClient, True) volume_created_from_snapshot_1 = virtual_machine_2.attach_volume( self.apiClient, volume_created_from_snapshot_1) virtual_machine_2.detach_volume(self.apiClient, volume_created_from_snapshot_1) volume_created_from_snapshot_1 = virtual_machine_2.attach_volume( self.apiClient, volume_created_from_snapshot_1) services = { "diskname": "Vol-2", "zoneid": self.testdata[TestData.zoneId], "ispublic": True } volume_created_from_snapshot_2 = Volume.create_from_snapshot( self.apiClient, vol_snap.id, services, account=self.account.name, domainid=self.domain.id) try: # The volume should fail to be attached as there should be an insufficient number of clustered filesystems # remaining in the compute cluster. virtual_machine_2.attach_volume(self.apiClient, volume_created_from_snapshot_2) raise VolumeAttachedException( TestManagedClusteredFilesystems. _volume_should_have_failed_to_attach_to_vm) except VolumeAttachedException: raise except Exception: pass virtual_machine_a.attach_volume(self.apiClient, volume_created_from_snapshot_2)