def createLogs(self, test_module_name=None, log_cfg=None, user_provided_logpath=None): ''' @Name : createLogs @Desc : Gets the Logger with file paths initialized and created @Inputs :test_module_name: Test Module Name to use for logs while creating log folder path log_cfg: Log Configuration provided inside of Configuration user_provided_logpath:LogPath provided by user If user provided log path is available, then one in cfg will not be picked up. @Output : SUCCESS\FAILED ''' try: temp_ts = time.strftime("%b_%d_%Y_%H_%M_%S", time.localtime()) if test_module_name is None: temp_path = temp_ts + "_" + random_gen() else: temp_path = str(test_module_name) + \ "__" + str(temp_ts) + "_" + random_gen() if user_provided_logpath: temp_dir = user_provided_logpath elif ((log_cfg is not None) and ('LogFolderPath' in log_cfg.__dict__.keys()) and (log_cfg.__dict__.get('LogFolderPath') is not None)): temp_dir = \ log_cfg.__dict__.get('LogFolderPath') + "/MarvinLogs" self.__logFolderDir = temp_dir + "//" + temp_path print "\n==== Log Folder Path: %s. " \ "All logs will be available here ====" \ % str(self.__logFolderDir) os.makedirs(self.__logFolderDir) ''' Log File Paths 1. FailedExceptionLog 2. RunLog contains the complete Run Information for Test Run 3. ResultFile contains the TC result information for Test Run ''' tc_failed_exception_log = \ self.__logFolderDir + "/failed_plus_exceptions.txt" tc_run_log = self.__logFolderDir + "/runinfo.txt" if self.__setLogHandler(tc_run_log, log_level=logging.DEBUG) != FAILED: self.__setLogHandler(tc_failed_exception_log, log_level=logging.FATAL) return SUCCESS return FAILED except Exception as e: print "\n Exception Occurred Under createLogs :%s" % \ GetDetailExceptionInfo(e) return FAILED
def test_02_edit_iso(self): """Test Edit ISO """ # Validate the following: # 1. UI should show the edited values for ISO # 2. database (vm_template table) should have updated values # Generate random values for updating ISO name and Display text new_displayText = random_gen() new_name = random_gen() self.debug("Updating ISO permissions for ISO: %s" % self.iso_1.id) cmd = updateIso.updateIsoCmd() # Assign new values to attributes cmd.id = self.iso_1.id cmd.displaytext = new_displayText cmd.name = new_name cmd.bootable = self.services["bootable"] cmd.passwordenabled = self.services["passwordenabled"] self.apiclient.updateIso(cmd) # Check whether attributes are updated in ISO using listIsos list_iso_response = list_isos( self.apiclient, id=self.iso_1.id ) self.assertEqual( isinstance(list_iso_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_iso_response), 0, "Check template available in List ISOs" ) iso_response = list_iso_response[0] self.assertEqual( iso_response.displaytext, new_displayText, "Check display text of updated ISO" ) self.assertEqual( iso_response.bootable, self.services["bootable"], "Check if image is bootable of updated ISO" ) self.assertEqual( iso_response.ostypeid, self.services["ostypeid"], "Check OSTypeID of updated ISO" ) return
def test_instance_name_with_hyphens(self): """ Test the instance name with hyphens """ # Validate the following # 1. Set the vm.instancename.flag to true. # 2. Add the virtual machine with display name with hyphens # Reading display name property if not is_config_suitable( apiclient=self.apiclient, name='vm.instancename.flag', value='true'): self.skipTest('vm.instancename.flag should be true. skipping') self.services["virtual_machine"]["displayname"] = random_gen( chars=string.ascii_uppercase) + "-" + random_gen( chars=string.ascii_uppercase) self.debug("Deploying an instance in account: %s" % self.account.name) virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.debug( "Checking if the virtual machine is created properly or not?") vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List vms should retuen a valid name" ) vm = vms[0] self.assertEqual( vm.state, "Running", "Vm state should be running after deployment" ) self.debug("Display name: %s" % vm.displayname) return
def test_02_edit_service_offering(self): """Test to update existing service offering""" # Validate the following: # 1. updateServiceOffering should return # a valid information for newly created offering # Generate new name & displaytext from random data random_displaytext = random_gen() random_name = random_gen() self.debug("Updating service offering with ID: %s" % self.service_offering_1.id) cmd = updateServiceOffering.updateServiceOfferingCmd() # Add parameters for API call cmd.id = self.service_offering_1.id cmd.displaytext = random_displaytext cmd.name = random_name self.apiclient.updateServiceOffering(cmd) list_service_response = list_service_offering( self.apiclient, id=self.service_offering_1.id ) self.assertEqual( isinstance(list_service_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_service_response), 0, "Check Service offering is updated" ) self.assertEqual( list_service_response[0].displaytext, random_displaytext, "Check server displaytext in updateServiceOffering" ) self.assertEqual( list_service_response[0].name, random_name, "Check server name in updateServiceOffering" ) return
def test_03_add_invalid_kubernetes_supported_version(self): """Test to trying to add a new unsupported Kubernetes supported version # Validate the following: # 1. API should return an error """ version = 'invalid' name = 'v' + version + '-' + random_gen() try: version_response = self.addKubernetesSupportedVersion( version, name, self.zone.id, self.kubernetes_version_iso_url) self.debug( "Invalid Kubernetes supported added with ID: %s. Deleting it and failing test." % version_response.id) self.waitForKubernetesSupportedVersionIsoReadyState( version_response.id) self.deleteKubernetesSupportedVersion(version_response.id, True) self.fail( "Invalid Kubernetes supported version has been added. Must be an error." ) except CloudstackAPIException as e: self.debug( "Unsupported version error check successful, API failure: %s" % e) return
def test_05_unsupported_chars_in_display_name(self): """ Test Unsupported chars in the display name (eg: Spaces,Exclamation,yet to get unsupported chars from the dev) """ # Validate the following # 1) Set the Global Setting vm.instancename.flag to true # 2) While creating VM give a Display name which has unsupported chars # Gives an error message "Instance name can not be longer than 63 # characters. Only ASCII letters a~z, A~Z, digits 0~9, hyphen are # allowed. Must start with a letter and end with a letter or digit self.debug("Creating VM with unsupported chars in display name") display_names = ["!hkzs566", "asdh asd", "!dsf d"] for display_name in display_names: self.debug("Display name: %s" % display_name) self.services["virtual_machine"]["displayname"] = display_name self.services["virtual_machine"]["name"] = random_gen( chars=string.ascii_uppercase) with self.assertRaises(Exception): # Spawn an instance in that network VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) return
def test_02_deploy_kubernetes_ha_cluster(self): """Test to deploy a new Kubernetes cluster # Validate the following: # 1. createKubernetesCluster should return valid info for new cluster # 2. The Cloud Database contains the valid information """ if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) if self.setup_failed == True: self.skipTest("Setup incomplete") name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) cluster_response = self.createKubernetesCluster( name, self.kuberetes_version_3.id, 1, 2) self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_3.id, 1, 2) self.debug( "Kubernetes cluster with ID: %s successfully deployed, now deleting it" % cluster_response.id) self.deleteAndVerifyKubernetesCluster(cluster_response.id) self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) return
def test_03_deploy_invalid_kubernetes_ha_cluster(self): """Test to deploy a new Kubernetes cluster # Validate the following: # 1. createKubernetesCluster should return valid info for new cluster # 2. The Cloud Database contains the valid information """ if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) if self.setup_failed == True: self.skipTest("Setup incomplete") name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) try: cluster_response = self.createKubernetesCluster( name, self.kuberetes_version_2.id, 1, 2) self.debug( "Invslid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id) self.deleteKubernetesCluster(cluster_response.id) self.fail( "HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error." ) except CloudstackAPIException as e: self.debug( "HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) return
def test_04_edit_display_name(self): """ Test Edit the Display name Through the UI. """ # Validate the following # 1) Set the Global Setting vm.instancename.flag to true # 2) Create a VM give a Display name. # 3) Once the VM is created stop the VM. # 4) Edit the VM Display name. The Display name will be changed but the # internal name will not be changed. The VM functionality must not # be effected. self.services["virtual_machine"]["name"] = "TestVM4" # Spawn an instance in that network self.debug("Deploying VM in account: %s" % self.account.name) virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.debug("Checking if the virtual machine is created properly or not?") vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual(isinstance(vms, list), True, "List vms should retuen a valid name") vm = vms[0] self.assertEqual(vm.state, "Running", "Vm state should be running after deployment") self.assertEqual( vm.displayname, self.services["virtual_machine"]["displayname"], "Vm display name should match the given name", ) old_internal_name = vm.instancename self.debug("Stopping the instance: %s" % vm.name) try: virtual_machine.stop(self.apiclient) except Exception as e: self.fail("Failed to stop instance: %s, %s" % (vm.name, e)) self.debug("Update the display name of the instance") try: virtual_machine.update(self.apiclient, displayname=random_gen()) except Exception as e: self.fail("Failed to update the virtual machine name: %s, %s" % (virtual_machine.name, e)) self.debug("Start the instance: %s" % virtual_machine.name) virtual_machine.start(self.apiclient) self.debug("Checking if the instance is working properly after update") vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual(isinstance(vms, list), True, "List vms should retuen a valid name") vm = vms[0] self.assertEqual(vm.state, "Running", "Vm state should be running after deployment") self.assertEqual(vm.instancename, old_internal_name, "Vm internal name should not be changed after update") return
def test_06_deploy_invalid_kubernetes_ha_cluster(self): """Test to deploy an invalid HA Kubernetes cluster # Validate the following: # 1. createKubernetesCluster should fail as version doesn't support HA """ if self.setup_failed == True: self.fail("Setup incomplete") name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) try: cluster_response = self.createKubernetesCluster( name, self.kubernetes_version_2.id, 1, 2) self.debug( "Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % cluster_response.id) self.deleteKubernetesClusterAndVerify(cluster_response.id, False, True) self.fail( "HA Kubernetes cluster deployed with Kubernetes supported version below version 1.16.0. Must be an error." ) except CloudstackAPIException as e: self.debug( "HA Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) return
def createZone(self, zone, rec=0): try: zoneresponse = self.__apiClient.createZone(zone) if zoneresponse.id: self.__addToCleanUp("Zone", zoneresponse.id) self.__tcRunLogger.\ debug("Zone Name : %s Id : %s Created Successfully" % (str(zone.name), str(zoneresponse.id))) return zoneresponse.id else: self.__tcRunLogger.\ exception("====Zone : %s Creation Failed=====" % str(zone.name)) print "\n====Zone : %s Creation Failed=====" % str(zone.name) if not rec: zone.name = zone.name + "_" + random_gen() self.__tcRunLogger.\ debug("====Recreating Zone With New Name : " "%s" % zone.name) print "\n====Recreating Zone With New Name ====", \ str(zone.name) return self.createZone(zone, 1) except Exception as e: print "\nException Occurred under createZone : %s" % \ GetDetailExceptionInfo(e) self.__tcRunLogger.exception("====Create Zone Failed ===") return FAILED
def create_vpc_private_gateway(self, apiclient, vpc, vlan_id, associated_network=None, expected=True): self.services["private_gateway"][ "name"] = "Test Network Isolated - " + random_gen() associated_network_id = None if associated_network: associated_network_id = associated_network.id private_gateway = None try: private_gateway = PrivateGateway.create( apiclient, vpcid=vpc.id, gateway=self.services["private_gateway"]["gateway"], ipaddress=self.services["private_gateway"]["ipaddress"], netmask=self.services["private_gateway"]["netmask"], vlan=vlan_id, associatednetworkid=associated_network_id) except Exception as ex: private_gateway = None if expected: self.fail( f"Failed to create private gateway, but expected to succeed : {ex}" ) if private_gateway and not expected: self.fail( "private gateway is created successfully, but expected to fail" ) return private_gateway
def create_template_from_snapshot(self, apiclient, services, snapshotid=None, volumeid=None): """Create template from Volume""" # Create template from Virtual machine and Volume ID cmd = createTemplate.createTemplateCmd() cmd.displaytext = "StorPool_Template" cmd.name = "-".join(["StorPool-", random_gen()]) if "ostypeid" in services: cmd.ostypeid = services["ostypeid"] elif "ostype" in services: # Find OSTypeId from Os type sub_cmd = listOsTypes.listOsTypesCmd() sub_cmd.description = services["ostype"] ostypes = apiclient.listOsTypes(sub_cmd) if not isinstance(ostypes, list): raise Exception("Unable to find Ostype id with desc: %s" % services["ostype"]) cmd.ostypeid = ostypes[0].id else: raise Exception( "Unable to find Ostype is required for creating template") cmd.isfeatured = True cmd.ispublic = True cmd.isextractable = False if snapshotid: cmd.snapshotid = snapshotid if volumeid: cmd.volumeid = volumeid return Template(apiclient.createTemplate(cmd).__dict__)
def test_07_deploy_and_scale_kubernetes_cluster(self): """Test to deploy a new Kubernetes cluster and check for failure while tying to scale it # Validate the following: # 1. createKubernetesCluster should return valid info for new cluster # 2. The Cloud Database contains the valid information # 3. scaleKubernetesCluster should return valid info for the cluster when it is scaled up # 4. scaleKubernetesCluster should return valid info for the cluster when it is scaled down """ if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) if self.setup_failed == True: self.skipTest("Setup incomplete") name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) cluster_response = self.createKubernetesCluster( name, self.kuberetes_version_2.id) self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id) self.debug( "Kubernetes cluster with ID: %s successfully deployed, now upscaling it" % cluster_response.id) try: cluster_response = self.scaleKubernetesCluster( cluster_response.id, 2) except Exception as e: self.deleteKubernetesCluster(cluster_response.id) self.fail("Failed to upscale Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterScale(cluster_response, 2) self.debug( "Kubernetes cluster with ID: %s successfully upscaled, now downscaling it" % cluster_response.id) try: cluster_response = self.scaleKubernetesCluster( cluster_response.id, 1) except Exception as e: self.deleteKubernetesCluster(cluster_response.id) self.fail("Failed to downscale Kubernetes cluster due to: %s" % e) self.verifyKubernetesClusterScale(cluster_response) self.debug( "Kubernetes cluster with ID: %s successfully downscaled, now deleting it" % cluster_response.id) self.deleteAndVerifyKubernetesCluster(cluster_response.id) self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) return
def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.services["virtual_machine"]["name"] =\ random_gen(chars=string.ascii_uppercase) self.cleanup = [] return
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() hypervisor = testClient.getHypervisorInfo() if hypervisor.lower() in (KVM.lower(), "hyperv", "lxc"): raise unittest.SkipTest( "VM snapshot feature is not supported on KVM, Hyper-V or LXC") cls.apiclient = testClient.getApiClient() cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) template = get_template( cls.apiclient, cls.zone.id, cls.services["ostype"] ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["server"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id # Create VMs, NAT Rules etc cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"] ) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["server"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype ) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" cls._cleanup = [ cls.service_offering, cls.account, ] return
def create_account(cls, apiclient, services, accounttype=None, domainid=None, roleid=None): """Creates an account""" cmd = createAccount.createAccountCmd() # 0 - User, 1 - Root Admin, 2 - Domain Admin if accounttype: cmd.accounttype = accounttype else: cmd.accounttype = 1 cmd.email = services["email"] cmd.firstname = services["firstname"] cmd.lastname = services["lastname"] cmd.password = services["password"] username = services["username"] # Limit account username to 99 chars to avoid failure # 6 chars start string + 85 chars apiclientid + 6 chars random string + 2 chars joining hyphen string = 99 username = username[:6] apiclientid = apiclient.id[-85:] if len( apiclient.id) > 85 else apiclient.id cmd.username = "******".join([username, random_gen(id=apiclientid, size=6)]) if "accountUUID" in services: cmd.accountid = "-".join([services["accountUUID"], random_gen()]) if "userUUID" in services: cmd.userid = "-".join([services["userUUID"], random_gen()]) if domainid: cmd.domainid = domainid if roleid: cmd.roleid = roleid account = apiclient.createAccount(cmd) return Account(account.__dict__)
def configure_Stickiness_Policy(self, lb_rule, method, paramDict=None): """Configure the stickiness policy on lb rule""" try: result = lb_rule.createSticky( self.apiclient, methodname=method, name="-".join([method, random_gen()]), param=paramDict ) self.debug("Response: %s" % result) return result except Exception as e: self.fail("Configure sticky policy failed with exception: %s" % e)
def setUpClass(cls): testClient = super(TestVmSnapshot, cls).getClsTestClient() cls.apiclient = testClient.getApiClient() cls._cleanup = [] cls.unsupportedHypervisor = False cls.hypervisor = testClient.getHypervisorInfo() if cls.hypervisor.lower() in (KVM.lower()): cls.unsupportedHypervisor = True return cls.services = testClient.getParsedTestDataConfig() # Get Zone, Domain and templates cls.domain = get_domain(cls.apiclient) cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests()) template = get_template( cls.apiclient, cls.zone.id, cls.services["ostype"] ) if template == FAILED: assert False, "get_template() failed to return template\ with description %s" % cls.services["ostype"] cls.services["domainid"] = cls.domain.id cls.services["small"]["zoneid"] = cls.zone.id cls.services["templates"]["ostypeid"] = template.ostypeid cls.services["zoneid"] = cls.zone.id # Create VMs, NAT Rules etc cls.account = Account.create( cls.apiclient, cls.services["account"], domainid=cls.domain.id ) cls._cleanup.append(cls.account) cls.service_offering = ServiceOffering.create( cls.apiclient, cls.services["service_offerings"]["tiny"] ) cls._cleanup.append(cls.service_offering) cls.virtual_machine = VirtualMachine.create( cls.apiclient, cls.services["small"], templateid=template.id, accountid=cls.account.name, domainid=cls.account.domainid, serviceofferingid=cls.service_offering.id, mode=cls.zone.networktype ) cls.random_data_0 = random_gen(size=100) cls.test_dir = "/tmp" cls.random_data = "random.data" return
def finalize(self, result): try: if not self.__userLogPath: src = self.__logFolderPath log_cfg = self.__parsedConfig.logger tmp = log_cfg.__dict__.get('LogFolderPath') + "/MarvinLogs" dst = tmp + "//" + random_gen() mod_name = "test_suite" if self.__testModName: mod_name = self.__testModName.split(".") if len(mod_name) > 2: mod_name = mod_name[-2] if mod_name: dst = tmp + "/" + mod_name + "_" + random_gen() cmd = "mv " + src + " " + dst os.system(cmd) print "===final results are now copied to: %s===" % str(dst) except Exception, e: print "=== Exception occurred under finalize :%s ===" % \ str(GetDetailExceptionInfo(e))
def create_aff_grp(self, aff_grp=None, acc=None, domainid=None): aff_grp["name"] = "aff_grp_" + random_gen(size=6) try: aff_grp = AffinityGroup.create(self.apiclient, aff_grp, acc, domainid) return aff_grp except Exception as e: raise Exception("Error: Creation of Affinity Group failed : %s" %e)
def finalize(self, result): try: if not self.__userLogPath: src = self.__logFolderPath log_cfg = self.__parsedConfig.logger tmp = log_cfg.__dict__.get('LogFolderPath') + "/MarvinLogs" dst = tmp + "//" + random_gen() mod_name = "test_suite" if self.__testModName: mod_name = self.__testModName.split(".") if len(mod_name) > 2: mod_name = mod_name[-2] if mod_name: dst = tmp + "/" + mod_name + "_" + random_gen() cmd = "mv " + src + " " + dst os.system(cmd) print "===final results are now copied to: %s===" % str(dst) except Exception as e: print "=== Exception occurred under finalize :%s ===" % \ str(GetDetailExceptionInfo(e))
def configure_Stickiness_Policy(self, lb_rule, method, paramDict=None): """Configure the stickiness policy on lb rule""" try: result = lb_rule.createSticky(self.apiclient, methodname=method, name="-".join([method, random_gen()]), param=paramDict) self.debug("Response: %s" % result) return result except Exception as e: self.fail("Configure sticky policy failed with exception: %s" % e)
def addKubernetesSupportedVersion(cls, semantic_version, iso_url): addKubernetesSupportedVersionCmd = addKubernetesSupportedVersion.addKubernetesSupportedVersionCmd() addKubernetesSupportedVersionCmd.semanticversion = semantic_version addKubernetesSupportedVersionCmd.name = 'v' + semantic_version + '-' + random_gen() addKubernetesSupportedVersionCmd.url = iso_url addKubernetesSupportedVersionCmd.mincpunumber = 2 addKubernetesSupportedVersionCmd.minmemory = 2048 kubernetes_version = cls.apiclient.addKubernetesSupportedVersion(addKubernetesSupportedVersionCmd) cls.debug("Waiting for Kubernetes version with ID %s to be ready" % kubernetes_version.id) cls.waitForKubernetesSupportedVersionIsoReadyState(kubernetes_version.id) kubernetes_version = cls.listKubernetesSupportedVersion(kubernetes_version.id) return kubernetes_version
def test_01_create_vpc_offering(self): """Test to create vpc offering # Validate the following: # 1. createVPCOfferings should return valid info for new offering # 2. The Cloud Database contains the valid information """ offering_data_domainid = "{0},{1}".format(self.domain_11.id, self.domain_2.id) offering_data = self.localservices["vpc_offering"] cmd = createVPCOffering.createVPCOfferingCmd() cmd.name = "-".join([offering_data["name"], random_gen()]) cmd.displaytext = offering_data["displaytext"] cmd.supportedServices = offering_data["supportedservices"] cmd.domainid = offering_data_domainid if "serviceProviderList" in offering_data: for service, provider in offering_data[ "serviceProviderList"].items(): providers = provider if isinstance(provider, str): providers = [provider] for provider_item in providers: cmd.serviceproviderlist.append({ 'service': service, 'provider': provider_item }) vpc_offering = VpcOffering( self.apiclient.createVPCOffering(cmd).__dict__) self.cleanup.append(vpc_offering) self.debug("Created Vpc offering with ID: %s" % vpc_offering.id) cmd = listVPCOfferings.listVPCOfferingsCmd() cmd.id = vpc_offering.id list_vpc_response = self.apiclient.listVPCOfferings(cmd) self.assertEqual(isinstance(list_vpc_response, list), True, "Check list response returns a valid list") self.assertNotEqual(len(list_vpc_response), 0, "Check Vpc offering is created") vpc_response = list_vpc_response[0] self.assertEqual(vpc_response.id, vpc_offering.id, "Check server id in createVPCOffering") self.assertEqual(vpc_response.displaytext, self.localservices["vpc_offering"]["displaytext"], "Check server displaytext in createVPCOffering") self.assertItemsEqual(vpc_response.domainid.split(","), offering_data_domainid.split(","), "Check domainid in createVPCOffering") return
def getValidKubernetesCluster(self, size=1, control_nodes=1): cluster = k8s_cluster version = self.kubernetes_version_2 if control_nodes != 1: version = self.kubernetes_version_3 valid = True if cluster == None: valid = False self.debug("No existing cluster available, k8s_cluster: %s" % cluster) if valid == True and cluster.id == None: valid = False self.debug( "ID for existing cluster not found, k8s_cluster ID: %s" % cluster.id) if valid == True: cluster_id = cluster.id cluster = self.listKubernetesCluster(cluster_id) if cluster == None: valid = False self.debug( "Existing cluster, k8s_cluster ID: %s not returned by list API" % cluster_id) if valid == True: try: self.verifyKubernetesCluster(cluster, cluster.name, None, size, control_nodes) self.debug( "Existing Kubernetes cluster available with name %s" % cluster.name) except AssertionError as error: valid = False self.debug( "Existing cluster failed verification due to %s, need to deploy a new one" % error) if valid == False: name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) try: self.deleteAllLeftoverClusters() cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) except Exception as ex: self.fail("Kubernetes cluster deployment failed: %s" % ex) except AssertionError as err: self.fail( "Kubernetes cluster deployment failed during cluster verification: %s" % err) return cluster
def test_03_duplicate_name(self): """ Test the duplicate name when old VM is in non-expunged state """ # Validate the following # 1. Set the vm.instancename.flag to true. # 2. Add the virtual machine with display name same as that of # non-expunged virtual machine. The proper error should pop # out saying the duplicate names are not allowed # Reading display name property if not is_config_suitable(apiclient=self.apiclient, name='vm.instancename.flag', value='true'): self.skipTest('vm.instancename.flag should be true. skipping') self.services["virtual_machine"]["displayname"] = random_gen( chars=string.ascii_uppercase) self.debug("Deploying an instance in account: %s" % self.account.name) virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.cleanup.append(virtual_machine) self.debug( "Checking if the virtual machine is created properly or not?") vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual(isinstance(vms, list), True, "List vms should retuen a valid name") vm = vms[0] self.assertEqual(vm.state, "Running", "Vm state should be running after deployment") self.debug("Display name: %s" % vm.displayname) self.debug("Deplying another virtual machine with same name") with self.assertRaises(Exception): VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) return
def test_06_deploy_and_invalid_upgrade_kubernetes_cluster(self): """Test to deploy a new Kubernetes cluster and check for failure while tying to upgrade it to a lower version # Validate the following: # 1. createKubernetesCluster should return valid info for new cluster # 2. The Cloud Database contains the valid information # 3. upgradeKubernetesCluster should fail """ if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) if self.setup_failed == True: self.skipTest("Setup incomplete") name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) cluster_response = self.createKubernetesCluster( name, self.kuberetes_version_2.id) self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_2.id) self.debug( "Kubernetes cluster with ID: %s successfully deployed, now scaling it" % cluster_response.id) try: cluster_response = self.upgradeKubernetesCluster( cluster_response.id, self.kuberetes_version_1.id) self.debug( "Invalid CKS Kubernetes HA cluster deployed with ID: %s. Deleting it and failing test." % kuberetes_version_1.id) self.deleteKubernetesCluster(cluster_response.id) self.fail( "Kubernetes cluster upgraded to a lower Kubernetes supported version. Must be an error." ) except Exception as e: self.debug( "Upgrading Kubernetes cluster with invalid Kubernetes supported version check successful, API failure: %s" % e) self.debug("Deleting Kubernetes cluster with ID: %s" % cluster_response.id) self.deleteAndVerifyKubernetesCluster(cluster_response.id) self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) return
def test_01_create_vm_snapshots_with_globalId(self): '''Create vmsnapshot from virtual machine created with uuid''' global vm_snapshot_glId global random_data_vm_snapshot_glid random_data_vm_snapshot_glid = random_gen(size=100) self.helper.write_on_disks(random_data_vm_snapshot_glid, self.virtual_machine, self.test_dir, self.random_data) MemorySnapshot = False vm_snapshot_glId = self.helper.create_vm_snapshot( MemorySnapshot, self.virtual_machine) self.helper.delete_random_data_after_vmsnpashot( vm_snapshot_glId, self.virtual_machine, self.test_dir, self.random_data)
def createNewKubernetesCluster(self, version, size, control_nodes): name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) try: cluster = self.createKubernetesCluster(name, version.id, size, control_nodes) self.verifyKubernetesCluster(cluster, name, version.id, size, control_nodes) except Exception as ex: self.fail("Kubernetes cluster deployment failed: %s" % ex) except AssertionError as err: self.fail( "Kubernetes cluster deployment failed during cluster verification: %s" % err) return cluster
def create_aff_grp(self, api_client=None, aff_grp=None, aff_grp_name=None, projectid=None): if not api_client: api_client = self.api_client if aff_grp is None: aff_grp = self.services["host_anti_affinity"] if aff_grp_name is None: aff_grp["name"] = "aff_grp_" + random_gen(size=6) else: aff_grp["name"] = aff_grp_name if projectid is None: projectid = self.project.id try: return AffinityGroup.create(api_client, aff_grp, None, None, projectid) except Exception as e: raise Exception("Error: Creation of Affinity Group failed : %s" % e)
def create_Volume_from_Snapshot(self, snapshot): try: self.debug("Creating volume from snapshot: %s" % snapshot.name) cmd = createVolume.createVolumeCmd() cmd.name = "-".join( [self.services["volume"]["diskname"], random_gen()]) cmd.snapshotid = snapshot.id cmd.zoneid = self.zone.id cmd.size = self.services["volume"]["size"] cmd.account = self.account.name cmd.domainid = self.account.domainid return cmd except Exception as e: self.fail("Failed to create volume from snapshot: %s - %s" % (snapshot.name, e))
def create_Volume_from_Snapshot(self, snapshot): try: self.debug("Creating volume from snapshot: %s" % snapshot.name) cmd = createVolume.createVolumeCmd() cmd.name = "-".join([ self.services["volume"]["diskname"], random_gen()]) cmd.snapshotid = snapshot.id cmd.zoneid = self.zone.id cmd.size = self.services["volume"]["size"] cmd.account = self.account.name cmd.domainid = self.account.domainid return cmd except Exception as e: self.fail("Failed to create volume from snapshot: %s - %s" % (snapshot.name, e))
def test_05_deploy_and_upgrade_kubernetes_ha_cluster(self): """Test to deploy a new HA Kubernetes cluster and upgrade it to newer version # Validate the following: # 1. createKubernetesCluster should return valid info for new cluster # 2. The Cloud Database contains the valid information # 3. upgradeKubernetesCluster should return valid info for the cluster """ if self.hypervisor.lower() not in ["kvm", "vmware", "xenserver"]: self.skipTest("CKS not supported for hypervisor: %s" % self.hypervisor.lower()) if self.setup_failed == True: self.skipTest("Setup incomplete") name = 'testcluster-' + random_gen() self.debug("Creating for Kubernetes cluster with name %s" % name) cluster_response = self.createKubernetesCluster( name, self.kuberetes_version_3.id, 1, 2) self.verifyKubernetesCluster(cluster_response, name, self.kuberetes_version_3.id, 1, 2) self.debug( "Kubernetes cluster with ID: %s successfully deployed, now upgrading it" % cluster_response.id) try: cluster_response = self.upgradeKubernetesCluster( cluster_response.id, self.kuberetes_version_4.id) except Exception as e: self.deleteKubernetesCluster(cluster_response.id) self.fail("Failed to upgrade Kubernetes HA cluster due to: %s" % e) self.verifyKubernetesClusterUpgrade(cluster_response, self.kuberetes_version_4.id) self.debug( "Kubernetes cluster with ID: %s successfully upgraded, now deleting it" % cluster_response.id) self.deleteAndVerifyKubernetesCluster(cluster_response.id) self.debug("Kubernetes cluster with ID: %s successfully deleted" % cluster_response.id) return
def test_vm_instance_name_duplicate_different_accounts(self): """ @Desc: Test whether cloudstack allows duplicate vm instance names in the diff networks @Steps: Step1: Set the vm.instancename.flag to true. Step2: Deploy a VM with name say webserver01 from account1 Internal name should be i-<userid>-<vmid>-webserver01 Step3: Now deploy VM with the same name "webserver01" from account2. Step4: Deployment of VM with same name should fail """ if not is_config_suitable( apiclient=self.apiclient, name='vm.instancename.flag', value='true'): self.skipTest('vm.instancename.flag should be true. skipping') # Step2: Deploy a VM with name say webserver01 from account1 self.debug("Deploying VM in account: %s" % self.account.name) self.services["virtual_machine2"][ "displayname"] = random_gen(chars=string.ascii_uppercase) self.services["virtual_machine2"]["zoneid"] = self.zone.id self.services["virtual_machine2"]["template"] = self.template.id vm1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine2"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.cleanup.append(vm1) # Step3: Now deploy VM with the same name "webserver01" from account2. self.debug("Deploying VM in account: %s" % self.account_2.name) with self.assertRaises(Exception): vm2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine2"], accountid=self.account_2.name, domainid=self.account_2.domainid, serviceofferingid=self.service_offering.id, ) self.cleanup.append(vm2) # Step4: Deployment of VM with same name should fail return
def deploy_domain(self, domain_data): if domain_data['name'] == 'ROOT': domain_list = Domain.list( api_client=self.api_client, name=domain_data['name'] ) domain = domain_list[0] else: self.logger.debug('>>> DOMAIN => Creating "%s"...', domain_data['name']) domain = Domain.create( api_client=self.api_client, name=domain_data['name'] + ('-' + random_gen() if self.randomizeNames else '') ) self.logger.debug('>>> DOMAIN => ID: %s => Name: %s => Path: %s => State: %s', domain.id, domain.name, domain.path, domain.state) self.deploy_accounts(domain_data['accounts'], domain)
def create_from_snapshot(cls, apiclient, snapshot_id, services, account=None, domainid=None): """Create volume from snapshot""" cmd = createVolume.createVolumeCmd() cmd.isAsync = "false" cmd.name = "-".join([services["diskname"], random_gen()]) cmd.snapshotid = snapshot_id cmd.zoneid = services["zoneid"] if "size" in services: cmd.size = services["size"] if services["ispublic"]: cmd.ispublic = services["ispublic"] else: cmd.ispublic = False if account: cmd.account = account else: cmd.account = services["account"] if domainid: cmd.domainid = domainid else: cmd.domainid = services["domainid"] return Volume(apiclient.createVolume(cmd).__dict__)
def create_Template_from_Snapshot(self, snapshot): try: self.debug("Creating template from snapshot: %s" % snapshot.name) cmd = createTemplate.createTemplateCmd() cmd.displaytext = self.services["template"]["displaytext"] cmd.name = "-".join([self.services["template"]["name"], random_gen()]) ncmd = listOsTypes.listOsTypesCmd() ncmd.description = self.services["template"]["ostype"] ostypes = self.apiclient.listOsTypes(ncmd) if not isinstance(ostypes, list): raise Exception( "Unable to find Ostype id with desc: %s" % self.services["template"]["ostype"]) cmd.ostypeid = ostypes[0].id cmd.snapshotid = snapshot.id return cmd except Exception as e: self.fail("Failed to create template from snapshot: %s - %s" % (snapshot.name, e))
def test_01_custom_hostname_instancename_false(self): """ Verify custom hostname for the instance when vm.instancename.flag=false """ # Validate the following # 1. Set the vm.instancename.flog to false. Hostname and displayname # should be UUID # 2. Give the user provided display name. Internal name should be # i-<userid>-<vmid>-instance name (It should not contain display name) if not is_config_suitable( apiclient=self.apiclient, name='vm.instancename.flag', value='false'): self.skipTest('vm.instancename.flag should be false. skipping') self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network self.services["virtual_machine"]["displayname"] = random_gen( chars=string.ascii_uppercase) virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.debug( "Checking if the virtual machine is created properly or not?") vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List vms should retuen a valid name" ) vm = vms[0] self.assertEqual( vm.state, "Running", "Vm state should be running after deployment" ) self.debug("vm.displayname: %s, original: %s" % (vm.displayname, self.services["virtual_machine"]["displayname"])) self.assertEqual( vm.displayname, self.services["virtual_machine"]["displayname"], "Vm display name should match the given name" ) # Fetch account ID and VMID from database to check internal name self.debug("select id from account where uuid = '%s';" % self.account.id) qresultset = self.dbclient.execute( "select id from account where uuid = '%s';" % self.account.id ) self.assertEqual( isinstance(qresultset, list), True, "Check DB query result set for valid data" ) self.assertNotEqual( len(qresultset), 0, "Check DB Query result set" ) qresult = qresultset[0] account_id = qresult[0] self.debug("select id from vm_instance where uuid = '%s';" % vm.id) qresultset = self.dbclient.execute( "select id from vm_instance where uuid = '%s';" % vm.id) self.assertEqual( isinstance(qresultset, list), True, "Check DB query result set for valid data" ) self.assertNotEqual( len(qresultset), 0, "Check DB Query result set" ) qresult = qresultset[0] self.debug("Query result: %s" % qresult) vmid = qresult[0] self.debug("Fetching the global config value for instance.name") configs = Configurations.list( self.apiclient, name="instance.name", listall=True ) config = configs[0] self.debug("Config value : %s" % config) instance_name = config.value self.debug("Instance.name: %s" % instance_name) # internal Name = i-<user ID>-<VM ID>-<instance_name> # internal_name = "i-" + str(account_id) + "-" + str(vmid) + "-" + # instance_name internal_name = "i-%s-%s-%s" % (str(account_id), str(vmid), instance_name) self.debug("Internal name: %s" % internal_name) self.debug("vm instance name : %s" % vm.instancename) self.assertEqual( vm.instancename, internal_name, "VM internal name should match with that of the format" ) return
def setUp(self): self.apiClient = self.testClient.getApiClient() self.userApiClient = self.testClient.getUserApiClient(account='test'+utils.random_gen(), 'ROOT')
def test_03_snapshot_detachedDisk(self): """Test snapshot from detached disk """ # Validate the following # 1. login in VM and write some data on data disk(use fdisk to # partition datadisk,fdisk, and make filesystem using # mkfs.ext3) # 2. Detach the data disk and write some data on data disk # 3. perform the snapshot on the detached volume # 4. listvolumes with VM id shouldn't show the detached volume # 5. listSnapshots should list the snapshot that was created # 6. verify backup_snap_id was non null in the `snapshots` table if self.hypervisor.lower() in ['hyperv']: self.skipTest("Snapshots feature is not supported on Hyper-V") volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertEqual( isinstance(volumes, list), True, "Check list response returns a valid list" ) volume = volumes[0] random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) try: ssh_client = self.virtual_machine.get_ssh_client() # Format partition using ext3 format_volume_to_ext3( ssh_client, self.services["volume"][self.hypervisor]["datadiskdevice_1"] ) cmds = [ "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount %s1 %s" % (self.services["volume"][ self.hypervisor]["datadiskdevice_1"], self.services["paths"]["mount_dir"]), "pushd %s" % self.services["paths"]["mount_dir"], "mkdir -p %s/{%s,%s} " % (self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["sub_lvl_dir2"]), "echo %s > %s/%s/%s" % (random_data_0, self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"]), "echo %s > %s/%s/%s" % (random_data_1, self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"]), "sync", "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug(ssh_client.execute(c)) # detach volume from VM cmd = detachVolume.detachVolumeCmd() cmd.id = volume.id self.apiclient.detachVolume(cmd) # Create snapshot from detached volume snapshot = Snapshot.create(self.apiclient, volume.id) volumes = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', listall=True ) self.assertEqual( volumes, None, "Check Volume is detached" ) # Verify the snapshot was created or not snapshots = list_snapshots( self.apiclient, id=snapshot.id ) self.assertNotEqual( snapshots, None, "Check if result exists in list snapshots call" ) self.assertEqual( snapshots[0].id, snapshot.id, "Check snapshot id in list resources call" ) except Exception as e: self.fail("SSH failed for VM with IP: %s - %s" % (self.virtual_machine.ssh_ip, e)) qresultset = self.dbclient.execute( "select id from snapshots where uuid = '%s';" % snapshot.id ) self.assertNotEqual( len(qresultset), 0, "Check DB Query result set" ) qresult = qresultset[0] self.assertNotEqual( str(qresult[0]), 'NULL', "Check if backup_snap_id is not null" ) return
def __createUserApiClient(self, UserName, DomainName, acctType=0): ''' @Name : ___createUserApiClient @Desc : Creates a User API Client with given UserName\DomainName Parameters @Input: UserName: Username to be created in cloudstack DomainName: Domain under which the above account be created accType: Type of Account EX: Root,Non Root etc @Output: Return the API client for the user ''' try: if not self.isAdminContext(): return self.__apiClient listDomain = listDomains.listDomainsCmd() listDomain.listall = True listDomain.name = DomainName try: domains = self.__apiClient.listDomains(listDomain) domId = domains[0].id except: cdomain = createDomain.createDomainCmd() cdomain.name = DomainName domain = self.__apiClient.createDomain(cdomain) domId = domain.id cmd = listAccounts.listAccountsCmd() cmd.name = UserName cmd.domainid = domId try: accounts = self.__apiClient.listAccounts(cmd) acctId = accounts[0].id except: createAcctCmd = createAccount.createAccountCmd() createAcctCmd.accounttype = acctType createAcctCmd.domainid = domId createAcctCmd.email = "test-" + random_gen()\ + "@cloudstack.org" createAcctCmd.firstname = UserName createAcctCmd.lastname = UserName createAcctCmd.password = '******' createAcctCmd.username = UserName acct = self.__apiClient.createAccount(createAcctCmd) acctId = acct.id listuser = listUsers.listUsersCmd() listuser.username = UserName listuserRes = self.__apiClient.listUsers(listuser) userId = listuserRes[0].id apiKey = listuserRes[0].apikey securityKey = listuserRes[0].secretkey if apiKey is None: ret = self.__getKeys(userId) if ret != FAILED: mgtDetails = self.__mgmtDetails mgtDetails.apiKey = ret[0] mgtDetails.securityKey = ret[1] else: self.__logger.error("__createUserApiClient: " "User API Client Creation." " While Registering User Failed") return FAILED else: mgtDetails = self.__mgmtDetails mgtDetails.apiKey = apiKey mgtDetails.securityKey = securityKey newUserConnection =\ CSConnection(mgtDetails, self.__csConnection.asyncTimeout, self.__csConnection.logger) self.__userApiClient = CloudStackAPIClient(newUserConnection) self.__userApiClient.connection = newUserConnection self.__userApiClient.hypervisor = self.__hypervisor return self.__userApiClient except Exception as e: self.__logger.exception("Exception Occurred " "Under getUserApiClient : %s" % GetDetailExceptionInfo(e)) return FAILED
def test_07_template_from_snapshot(self): """Create Template from snapshot """ # 1. Login to machine; create temp/test directories on data volume # 2. Snapshot the Volume # 3. Create Template from snapshot # 4. Deploy Virtual machine using this template # 5. Login to newly created virtual machine # 6. Compare data in the root disk with the one that was written on the # volume, it should match if self.hypervisor.lower() in ['hyperv']: self.skipTest("Snapshots feature is not supported on Hyper-V") userapiclient = self.testClient.getUserApiClient( UserName=self.account.name, DomainName=self.account.domain) random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) try: # Login to virtual machine ssh_client = self.virtual_machine.get_ssh_client() cmds = [ "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount %s1 %s" % ( self.services["volume"][self.hypervisor]["rootdiskdevice"], self.services["paths"]["mount_dir"] ), "mkdir -p %s/%s/{%s,%s} " % ( self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["sub_lvl_dir2"] ), "echo %s > %s/%s/%s/%s" % ( random_data_0, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"] ), "echo %s > %s/%s/%s/%s" % ( random_data_1, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"] ), "sync", ] for c in cmds: self.debug(c) result = ssh_client.execute(c) self.debug(result) except Exception as e: self.fail("SSH failed for VM with IP address: %s" % self.virtual_machine.ipaddress) # Unmount the Volume cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug(c) ssh_client.execute(c) volumes = list_volumes( userapiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True ) self.assertEqual( isinstance(volumes, list), True, "Check list response returns a valid list" ) volume = volumes[0] # Create a snapshot of volume snapshot = Snapshot.create( userapiclient, volume.id, account=self.account.name, domainid=self.account.domainid ) self.debug("Snapshot created from volume ID: %s" % volume.id) # Generate template from the snapshot template = Template.create_from_snapshot( userapiclient, snapshot, self.services["templates"] ) self.cleanup.append(template) self.debug("Template created from snapshot ID: %s" % snapshot.id) # Verify created template templates = list_templates( userapiclient, templatefilter=self.services["templates"]["templatefilter"], id=template.id ) self.assertNotEqual( templates, None, "Check if result exists in list item call" ) self.assertEqual( templates[0].id, template.id, "Check new template id in list resources call" ) self.debug("Deploying new VM from template: %s" % template.id) # Deploy new virtual machine using template new_virtual_machine = VirtualMachine.create( userapiclient, self.services["server_without_disk"], templateid=template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.services["mode"] ) try: # Login to VM & mount directory ssh = new_virtual_machine.get_ssh_client() cmds = [ "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount %s1 %s" % ( self.services["volume"][self.hypervisor]["rootdiskdevice"], self.services["paths"]["mount_dir"] ) ] for c in cmds: ssh.execute(c) returned_data_0 = ssh.execute("cat %s/%s/%s/%s" % ( self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"] )) self.debug(returned_data_0) returned_data_1 = ssh.execute("cat %s/%s/%s/%s" % ( self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"] )) self.debug(returned_data_1) except Exception as e: self.fail("SSH failed for VM with IP address: %s" % new_virtual_machine.ipaddress) # Verify returned data self.assertEqual( random_data_0, returned_data_0[0], "Verify newly attached volume contents with existing one" ) self.assertEqual( random_data_1, returned_data_1[0], "Verify newly attached volume contents with existing one" ) # Unmount the volume cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] try: for c in cmds: self.debug(c) ssh_client.execute(c) except Exception as e: self.fail("SSH failed for VM with IP address: %s, Exception: %s" % (new_virtual_machine.ipaddress, e)) return
def test_updateDomainAdminDetails(self): """Test update domain admin details """ # Steps for test scenario # 2. update the user details (firstname, lastname, user) with # updateUser API # 3. listUsers in the account # 4. delete the account # Validate the following # 1. listAccounts should show account created successfully # 2. updateUser API should return valid response # 3. user should be updated with new details self.debug("Creating a domain admin account") self.account = Account.create( self.apiclient, self.services["account"], admin=True, domainid=self.domain.id ) self._cleanup.append(self.account) # Fetching the user details of account self.debug( "Fetching user details for account: %s" % self.account.name) users = User.list( self.apiclient, account=self.account.name, domainid=self.account.domainid ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.debug("Updating the details of user: %s" % user_1.name) firstname = random_gen() lastname = random_gen() self.debug("New firstname: %s, lastname: %s" % (firstname, lastname)) User.update( self.apiclient, user_1.id, firstname=firstname, lastname=lastname ) # Fetching the user details of account self.debug( "Fetching user details for user: %s" % user_1.name) users = User.list( self.apiclient, id=user_1.id, listall=True ) self.assertEqual( isinstance(users, list), True, "List users should return a valid list for account" ) user_1 = users[0] self.assertEqual( user_1.firstname, firstname, "User's first name should be updated with new one" ) self.assertEqual( user_1.lastname, lastname, "User's last name should be updated with new one" ) return
def test_01_volume_from_snapshot(self): """Test Creating snapshot from volume having spaces in name(KVM) """ # Validate the following # 1. Create a virtual machine and data volume # 2. Attach data volume to VM # 3. Login to machine; create temp/test directories on data volume # and write some random data # 4. Snapshot the Volume # 5. Create another Volume from snapshot # 6. Mount/Attach volume to another virtual machine # 7. Compare data, data should match if self.hypervisor.lower() in ['hyperv']: self.skipTest("Snapshots feature is not supported on Hyper-V") random_data_0 = random_gen(size=100) random_data_1 = random_gen(size=100) self.debug("random_data_0 : %s" % random_data_0) self.debug("random_data_1: %s" % random_data_1) try: ssh_client = self.virtual_machine.get_ssh_client() except Exception as e: self.fail("SSH failed for VM: %s" % self.virtual_machine.ipaddress) volume = Volume.create( self.apiclient, self.services["volume"], zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid, diskofferingid=self.disk_offering.id ) self.debug("Created volume with ID: %s" % volume.id) self.virtual_machine.attach_volume( self.apiclient, volume ) self.debug("Attach volume: %s to VM: %s" % (volume.id, self.virtual_machine.id)) self.debug("Formatting volume: %s to ext3" % volume.id) # Format partition using ext3 # Note that this is the second data disk partition of virtual machine # as it was already containing data disk before attaching the new # volume, Hence datadiskdevice_2 format_volume_to_ext3( ssh_client, self.services["volume"][self.hypervisor]["datadiskdevice_2"] ) cmds = [ "fdisk -l", "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount -t ext3 %s1 %s" % (self.services["volume"][ self.hypervisor]["datadiskdevice_2"], self.services["paths"]["mount_dir"]), "mkdir -p %s/%s/{%s,%s} " % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["sub_lvl_dir2"]), "echo %s > %s/%s/%s/%s" % (random_data_0, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"]), "echo %s > %s/%s/%s/%s" % (random_data_1, self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"]), "cat %s/%s/%s/%s" % (self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"])] for c in cmds: self.debug("Command: %s" % c) result = ssh_client.execute(c) self.debug(result) # Unmount the Sec Storage cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) list_volume_response = Volume.list( self.apiclient, virtualmachineid=self.virtual_machine.id, type='DATADISK', id=volume.id ) self.assertEqual( isinstance(list_volume_response, list), True, "Check list volume response for valid data" ) volume_response = list_volume_response[0] # Create snapshot from attached volume snapshot = Snapshot.create( self.apiclient, volume_response.id, account=self.account.name, domainid=self.account.domainid ) self.debug("Created snapshot: %s" % snapshot.id) # Create volume from snapshot volume_from_snapshot = Volume.create_from_snapshot( self.apiclient, snapshot.id, self.services["volume"], account=self.account.name, domainid=self.account.domainid ) # Detach the volume from virtual machine self.virtual_machine.detach_volume( self.apiclient, volume ) self.debug("Detached volume: %s from VM: %s" % (volume.id, self.virtual_machine.id)) self.debug("Created Volume: %s from Snapshot: %s" % ( volume_from_snapshot.id, snapshot.id)) volumes = Volume.list( self.apiclient, id=volume_from_snapshot.id ) self.assertEqual( isinstance(volumes, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(volumes), None, "Check Volume list Length" ) self.assertEqual( volumes[0].id, volume_from_snapshot.id, "Check Volume in the List Volumes" ) # Attaching volume to new VM new_virtual_machine = VirtualMachine.create( self.apiclient, self.services["server_without_disk"], templateid=self.template.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.services["mode"] ) self.debug("Deployed new VM for account: %s" % self.account.name) # self.cleanup.append(new_virtual_machine) self.debug("Attaching volume: %s to VM: %s" % ( volume_from_snapshot.id, new_virtual_machine.id )) new_virtual_machine.attach_volume( self.apiclient, volume_from_snapshot ) # Rebooting is required so that newly attached disks are detected self.debug("Rebooting : %s" % new_virtual_machine.id) new_virtual_machine.reboot(self.apiclient) try: # Login to VM to verify test directories and files ssh = new_virtual_machine.get_ssh_client() # Mount datadiskdevice_1 because this is the first data disk of the # new virtual machine cmds = [ "fdisk -l", "mkdir -p %s" % self.services["paths"]["mount_dir"], "mount -t ext3 %s1 %s" % (self.services["volume"][ self.hypervisor]["datadiskdevice_1"], self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) result = ssh.execute(c) self.debug(result) returned_data_0 = ssh.execute( "cat %s/%s/%s/%s" % ( self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir1"], self.services["paths"]["random_data"] )) returned_data_1 = ssh.execute( "cat %s/%s/%s/%s" % ( self.services["paths"]["mount_dir"], self.services["paths"]["sub_dir"], self.services["paths"]["sub_lvl_dir2"], self.services["paths"]["random_data"] )) except Exception as e: self.fail("SSH access failed for VM: %s, Exception: %s" % (new_virtual_machine.ipaddress, e)) self.debug("returned_data_0: %s" % returned_data_0[0]) self.debug("returned_data_1: %s" % returned_data_1[0]) # Verify returned data self.assertEqual( random_data_0, returned_data_0[0], "Verify newly attached volume contents with existing one" ) self.assertEqual( random_data_1, returned_data_1[0], "Verify newly attached volume contents with existing one" ) # Unmount the Sec Storage cmds = [ "umount %s" % (self.services["paths"]["mount_dir"]), ] for c in cmds: self.debug("Command: %s" % c) ssh_client.execute(c) return
def test_03_duplicate_name(self): """ Test the duplicate name when old VM is in non-expunged state """ # Validate the following # 1. Set the vm.instancename.flag to true. # 2. Add the virtual machine with display name same as that of # non-expunged virtual machine. The proper error should pop # out saying the duplicate names are not allowed # Reading display name property if not is_config_suitable( apiclient=self.apiclient, name='vm.instancename.flag', value='true'): self.skipTest('vm.instancename.flag should be true. skipping') self.services["virtual_machine"]["displayname"] = random_gen( chars=string.ascii_uppercase) self.debug("Deploying an instance in account: %s" % self.account.name) virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) self.cleanup.append(virtual_machine) self.debug( "Checking if the virtual machine is created properly or not?") vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List vms should retuen a valid name" ) vm = vms[0] self.assertEqual( vm.state, "Running", "Vm state should be running after deployment" ) self.debug("Display name: %s" % vm.displayname) self.debug("Deplying another virtual machine with same name") with self.assertRaises(Exception): VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, ) return
def test_01_clusters(self): """Test Add clusters & hosts # Validate the following: # 1. Verify hypervisortype returned by API is Xen/KVM/VWare # 2. Verify that the cluster is in 'Enabled' allocation state # 3. Verify that the host is added successfully and in Up state # with listHosts API response #Create clusters with Hypervisor type XEN/KVM/VWare """ for k, v in self.services["clusters"].items(): v["clustername"] = v["clustername"] + "-" + random_gen() cluster = Cluster.create( self.apiclient, v, zoneid=self.zone.id, podid=self.pod.id, hypervisor=v["hypervisor"].lower() ) self.debug( "Created Cluster for hypervisor type %s & ID: %s" % ( v["hypervisor"], cluster.id )) self.assertEqual( cluster.hypervisortype.lower(), v["hypervisor"].lower(), "Check hypervisor type is " + v["hypervisor"] + " or not" ) self.assertEqual( cluster.allocationstate, 'Enabled', "Check whether allocation state of cluster is enabled" ) # If host is externally managed host is already added with cluster response = list_hosts( self.apiclient, clusterid=cluster.id ) if not response: hypervisor_type = str(cluster.hypervisortype.lower()) host = Host.create( self.apiclient, cluster, self.services["hosts"][hypervisor_type], zoneid=self.zone.id, podid=self.pod.id, hypervisor=v["hypervisor"].lower() ) if host == FAILED: self.fail("Host Creation Failed") self.debug( "Created host (ID: %s) in cluster ID %s" % ( host.id, cluster.id )) # Cleanup Host & Cluster self.cleanup.append(host) self.cleanup.append(cluster) list_hosts_response = list_hosts( self.apiclient, clusterid=cluster.id ) self.assertEqual( isinstance(list_hosts_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_hosts_response), 0, "Check list Hosts response" ) host_response = list_hosts_response[0] # Check if host is Up and running self.assertEqual( host_response.state, 'Up', "Check if state of host is Up or not" ) # Verify List Cluster Response has newly added cluster list_cluster_response = list_clusters( self.apiclient, id=cluster.id ) self.assertEqual( isinstance(list_cluster_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_cluster_response), 0, "Check list Hosts response" ) cluster_response = list_cluster_response[0] self.assertEqual( cluster_response.id, cluster.id, "Check cluster ID with list clusters response" ) self.assertEqual( cluster_response.hypervisortype.lower(), cluster.hypervisortype.lower(), "Check hypervisor type with is " + v["hypervisor"] + " or not" ) return
def test_02_edit_template(self): """Test Edit template """ # Validate the following: # 1. UI should show the edited values for template # 2. database (vm_template table) should have updated values new_displayText = random_gen() new_name = random_gen() self.template_1.update(self.apiclient, displaytext = new_displayText, name = new_name, bootable = self.services["bootable"], passwordenabled = self.services["passwordenabled"]) self.debug("Edited template with new name: %s" % new_name) # Sleep to ensure update reflected across all the calls time.sleep(self.services["sleep"]) timeout = self.services["timeout"] while True: # Verify template response for updated attributes list_template_response = Template.list( self.apiclient, templatefilter=\ self.services["templatefilter"], id=self.template_1.id, account=self.account.name, domainid=self.account.domainid ) if isinstance(list_template_response, list): break elif timeout == 0: raise Exception("List Template failed!") time.sleep(10) timeout = timeout -1 self.assertEqual( isinstance(list_template_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_template_response), 0, "Check template available in List Templates" ) template_response = list_template_response[0] self.debug("New Name: %s" % new_displayText) self.debug("Name in Template response: %s" % template_response.displaytext) self.assertEqual( template_response.displaytext, new_displayText, "Check display text of updated template" ) self.assertEqual( template_response.name, new_name, "Check name of updated template" ) self.assertEqual( str(template_response.passwordenabled).lower(), str(self.services["passwordenabled"]).lower(), "Check passwordenabled field of updated template" ) self.assertEqual( template_response.ostypeid, self.services["ostypeid"], "Check OSTypeID of updated template" ) return