def test_deployvm_userdispersing(self): """Test deploy VMs using user dispersion planner """ self.service_offering_userdispersing = ServiceOffering.create( self.apiclient, self.services["service_offering"], deploymentplanner='UserDispersingPlanner' ) self.virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userdispersing.id, templateid=self.template.id ) self.virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userdispersing.id, templateid=self.template.id ) list_vm_1 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_1.id) list_vm_2 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_2.id) self.assertEqual( isinstance(list_vm_1, list), True, "List VM response was not a valid list" ) self.assertEqual( isinstance(list_vm_2, list), True, "List VM response was not a valid list" ) vm1 = list_vm_1[0] vm2 = list_vm_2[0] self.assertEqual( vm1.state, "Running", msg="VM is not in Running state" ) self.assertEqual( vm2.state, "Running", msg="VM is not in Running state" ) vm1clusterid = filter(lambda c: c.id == vm1.hostid, self.hosts)[0].clusterid vm2clusterid = filter(lambda c: c.id == vm2.hostid, self.hosts)[0].clusterid if vm1clusterid == vm2clusterid: self.debug("VMs (%s, %s) meant to be dispersed are deployed in the same cluster %s" % ( vm1.id, vm2.id, vm1clusterid))
def test_deployvm_userconcentrated(self): """Test deploy VMs using user concentrated planner """ self.service_offering_userconcentrated = ServiceOffering.create( self.apiclient, self.services["service_offering"], deploymentplanner='UserConcentratedPodPlanner' ) self.virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userconcentrated.id, templateid=self.template.id ) self.virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userconcentrated.id, templateid=self.template.id ) list_vm_1 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_1.id) list_vm_2 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_2.id) self.assertEqual( isinstance(list_vm_1, list), True, "List VM response was not a valid list" ) self.assertEqual( isinstance(list_vm_2, list), True, "List VM response was not a valid list" ) vm1 = list_vm_1[0] vm2 = list_vm_2[0] self.assertEqual( vm1.state, "Running", msg="VM is not in Running state" ) self.assertEqual( vm2.state, "Running", msg="VM is not in Running state" ) self.assertNotEqual( vm1.hostid, vm2.hostid, msg="VMs meant to be concentrated are deployed on the different hosts" )
def test_deployvm_userconcentrated(self): """Test deploy VMs using user concentrated planner """ self.service_offering_userconcentrated = ServiceOffering.create( self.apiclient, self.services["service_offering"], deploymentplanner='UserConcentratedPodPlanner') self.virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userconcentrated.id, templateid=self.template.id) self.virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userconcentrated.id, templateid=self.template.id) list_vm_1 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_1.id) list_vm_2 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_2.id) self.assertEqual(isinstance(list_vm_1, list), True, "List VM response was not a valid list") self.assertEqual(isinstance(list_vm_2, list), True, "List VM response was not a valid list") vm1 = list_vm_1[0] vm2 = list_vm_2[0] self.assertEqual(vm1.state, "Running", msg="VM is not in Running state") self.assertEqual(vm2.state, "Running", msg="VM is not in Running state") vm1clusterid = filter(lambda c: c.id == vm1.hostid, self.hosts)[0].clusterid vm2clusterid = filter(lambda c: c.id == vm2.hostid, self.hosts)[0].clusterid vm1podid = filter(lambda p: p.id == vm1clusterid, self.clusters)[0].podid vm2podid = filter(lambda p: p.id == vm2clusterid, self.clusters)[0].podid self.assertEqual( vm1podid, vm2podid, msg= "VMs (%s, %s) meant to be pod concentrated are deployed on different pods (%s, %s)" % (vm1.id, vm2.id, vm1clusterid, vm2clusterid))
def test_deployvm_userdispersing(self): """Test deploy VMs using user dispersion planner """ self.service_offering_userdispersing = ServiceOffering.create( self.apiclient, self.services["service_offering"], deploymentplanner='UserDispersingPlanner') self.virtual_machine_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userdispersing.id, templateid=self.template.id) self.virtual_machine_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_userdispersing.id, templateid=self.template.id) list_vm_1 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_1.id) list_vm_2 = VirtualMachine.list(self.apiclient, id=self.virtual_machine_2.id) self.assertEqual(isinstance(list_vm_1, list), True, "List VM response was not a valid list") self.assertEqual(isinstance(list_vm_2, list), True, "List VM response was not a valid list") vm1 = list_vm_1[0] vm2 = list_vm_2[0] self.assertEqual(vm1.state, "Running", msg="VM is not in Running state") self.assertEqual(vm2.state, "Running", msg="VM is not in Running state") vm1clusterid = filter(lambda c: c.id == vm1.hostid, self.hosts)[0].clusterid vm2clusterid = filter(lambda c: c.id == vm2.hostid, self.hosts)[0].clusterid if vm1clusterid == vm2clusterid: self.debug( "VMs (%s, %s) meant to be dispersed are deployed in the same cluster %s" % (vm1.id, vm2.id, vm1clusterid))
def test_02_VPC_LBRulesAndVMListing(self): """ Test case no 211 and 228: List only VMs suitable for the Virtual Network on VPC for LB Rule """ # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Create a Network offering - NO1 with all supported services # 3. Add network1(10.1.1.1/24) using N01 to this VPC. # 4. Add network2(10.1.2.1/24) using N01 to this VPC. # 5. Deploy vm1 and vm2 in network1 on primary host. # 6. Deploy vm3 and vm4 in network2 on secondary host. # 7. Use the Create LB rule for vm1 and vm2 in network1. # 9. List LB rule for network1 list vms on network1 for selection of LB rule. network_1 = self.create_Network(self.services["network_offering"]) network_2 = self.create_Network( self.services["network_offering_no_lb"], '10.1.2.1') vm_1 = self.create_VM_in_Network(network_1) vm_2 = self.create_VM_in_Network(network_1) vm_3 = self.create_VM_in_Network(network_2) self.debug('vm_3=%s' % vm_3.id) vm_4 = self.create_VM_in_Network(network_2) self.debug('vm_4=%s' % vm_4.id) public_ip_1 = self.acquire_Public_IP(network_1) lb_rule = self.create_LB_Rule(public_ip_1, network_1, [vm_1, vm_2]) lb_rules = LoadBalancerRule.list(self.apiclient, id=lb_rule.id, listall=True) self.failIfEqual(lb_rules, None, "Failed to list the LB Rule") vms = VirtualMachine.list(self.apiclient, networkid=network_1.id, listall=True) self.failIfEqual( vms, None, "Failed to list the VMs in network=%s" % network_1.name) return
def createInstance(self, account, service_off, networks=None, api_client=None): """Creates an instance in account""" if api_client is None: api_client = self.apiclient self.debug("Deploying an instance in account: %s" % account.name) try: vm = VirtualMachine.create(api_client, self.services["virtual_machine"], templateid=self.template.id, accountid=account.name, domainid=account.domainid, networkids=networks, serviceofferingid=service_off.id) vms = VirtualMachine.list(api_client, id=vm.id, listall=True) self.assertIsInstance(vms, list, "List VMs should return a valid response") self.assertEqual(vms[0].state, "Running", "Vm state should be running after deployment") return vm except Exception as e: self.fail("Failed to deploy an instance: %s" % e)
def test_deployvm_firstfit(self): """Test to deploy vm with a first fit offering """ # FIXME: How do we know that first fit actually happened? self.service_offering_firstfit = ServiceOffering.create( self.apiclient, self.services["service_offering"], deploymentplanner="FirstFitPlanner" ) self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_firstfit.id, templateid=self.template.id, ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug("Verify listVirtualMachines response for virtual machine: %s" % self.virtual_machine.id) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.assertNotEqual(len(list_vms), 0, "List VM response was empty") vm = list_vms[0] self.assertEqual(vm.state, "Running", msg="VM is not in Running state")
def test_deployvm_firstfit(self): """Test to deploy vm with a first fit offering """ #FIXME: How do we know that first fit actually happened? self.service_offering_firstfit = ServiceOffering.create( self.apiclient, self.services["service_offering"], deploymentplanner='FirstFitPlanner') self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering_firstfit.id, templateid=self.template.id) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.assertNotEqual(len(list_vms), 0, "List VM response was empty") vm = list_vms[0] self.assertEqual(vm.state, "Running", msg="VM is not in Running state")
def test_01_deploy_instance_with_is_volatile_offering(self): """ Test deploy an instance with service offerings with IsVolatile set. """ # Validate the following # 1. Service offerings were created successfully # 2. Vms were successfully deployed with the service offerings. self.debug("Checking if deployed VMs are in running state...") vms = VirtualMachine.list(self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) for vm in vms: self.debug("VM name: %s, VM state: %s" % (vm.name, vm.state)) self.debug("%s" % vm) self.assertEqual( vm.state, "Running", "Vm state should be running for each VM deployed") return
def DestroyVM(self, id): mocnictouse = mocnic("127.0.0.1") vm = VirtualMachine({"id": id, "nic": [mocnictouse]}, self.services) vm.delete(self.apiclient) list_vm_response = VirtualMachine.list( self.apiclient, id=id ) self.assertEqual( isinstance(list_vm_response, list), True, "Check list response returns a valid list" ) self.assertNotEqual( len(list_vm_response), 0, "Check VM avaliable in List Virtual Machines" ) self.assertEqual( list_vm_response[0].state, "Destroyed", "Check virtual machine is in destroyed state" ) return
def test_01_deploy_instance_with_is_volatile_offering(self): """ Test deploy an instance with service offerings with IsVolatile set. """ # Validate the following # 1. Service offerings were created successfully # 2. Vms were successfully deployed with the service offerings. self.debug("Checking if deployed VMs are in running state...") vms = VirtualMachine.list( self.apiclient, account=self.account.name, domainid=self.account.domainid, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) for vm in vms: self.debug("VM name: %s, VM state: %s" % (vm.name, vm.state)) self.debug("%s" %vm) self.assertEqual( vm.state, "Running", "Vm state should be running for each VM deployed" ) return
def createInstance(self, account, service_off, networks=None, api_client=None): """Creates an instance in account""" if api_client is None: api_client = self.apiclient self.debug("Deploying an instance in account: %s" % account.name) try: vm = VirtualMachine.create( api_client, self.services["virtual_machine"], templateid=self.template.id, accountid=account.name, domainid=account.domainid, networkids=networks, serviceofferingid=service_off.id) vms = VirtualMachine.list(api_client, id=vm.id, listall=True) self.assertIsInstance(vms, list, "List VMs should return a valid response") self.assertEqual(vms[0].state, "Running", "Vm state should be running after deployment") return vm except Exception as e: self.fail("Failed to deploy an instance: %s" % e)
def setUp(self): self.testdata = TestData().testdata self.apiclient = self.testClient.getApiClient() # Get Zone, Domain and Default Built-in template self.domain = get_domain(self.apiclient, self.testdata) self.zone = get_zone(self.apiclient, self.testdata) self.testdata["mode"] = self.zone.networktype self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"]) #create a user account self.account = Account.create(self.apiclient, self.testdata["account"], domainid=self.domain.id) #create a service offering self.service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offering"]["small"]) #build cleanup list self.cleanup = [self.service_offering, self.account] # Validate the following: # 1. Virtual Machine is accessible via SSH # 2. listVirtualMachines returns accurate information self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.assertNotEqual(len(list_vms), 0, "List VM response was empty") vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") self.assertEqual(vm.name, self.virtual_machine.name, "Virtual Machine names do not match") self.assertEqual(vm.state, "Running", msg="VM is not in Running state")
def test_nic_secondaryip_add_remove(self): list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) vm = list_vms[0] nicid = vm.nic[0].id cmd = addIpToNicCmd() cmd.nicid = nicid response = self.apiclient.addIpToNic(cmd) self.debug('IP address acquired to nic is =%s' % response.ipaddress) #remove the ip from nic list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) vmid = self.virtual_machine.id cmd = listNicsCmd() cmd.virtualmachineid = vmid list_nics = self.apiclient.listNics(cmd) nic = list_nics[0] ipid = nic.secondaryip[0].id; cmd = removeIpFromNicCmd() cmd.id = ipid response = self.apiclient.removeIpFromNic(cmd)
def test_deploy_vm(self): """Test Deploy Virtual Machine # Validate the following: # 1. Virtual Machine is accessible via SSH # 2. listVirtualMachines returns accurate information """ self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.assertEqual( vm.id, self.virtual_machine.id, "Virtual Machine ids do not match" ) self.assertEqual( vm.name, self.virtual_machine.name, "Virtual Machine names do not match" ) self.assertEqual( vm.state, "Running", msg="VM is not in Running state" )
def test_nic_secondaryip_add_remove(self): list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) vm = list_vms[0] nicid = vm.nic[0].id cmd = addIpToNicCmd() cmd.nicid = nicid response = self.apiclient.addIpToNic(cmd) self.debug('IP address acquired to nic is =%s' % response.ipaddress) #remove the ip from nic list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) vmid = self.virtual_machine.id cmd = listNicsCmd() cmd.virtualmachineid = vmid list_nics = self.apiclient.listNics(cmd) nic = list_nics[0] ipid = nic.secondaryip[0].id cmd = removeIpFromNicCmd() cmd.id = ipid response = self.apiclient.removeIpFromNic(cmd)
def test_01_VPC_LBRulesListing(self): """ Test case no 210 and 227: List Load Balancing Rules belonging to a VPC """ # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Create a Network offering - NO1 with all supported services # 3. Add network1(10.1.1.1/24) using N01 to this VPC. # 4. Add network2(10.1.2.1/24) using N01 to this VPC. # 5. Deploy vm1 and vm2 in network1. # 6. Deploy vm3 and vm4 in network2. # 7. Use the Create LB rule for vm1 and vm2 in network1. # 8. Use the Create LB rule for vm3 amd vm4 in network2, should fail # because it's no_lb offering # 9. List LB rule network_1 = self.create_Network(self.services["network_offering"]) network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1') self.debug("deploying VMs in network: %s" % network_2.name) vm_1 = self.create_VM_in_Network(network_1) vm_2 = self.create_VM_in_Network(network_1) vm_3 = self.create_VM_in_Network(network_2) vm_4 = self.create_VM_in_Network(network_2) public_ip_1 = self.acquire_Public_IP(network_1) lb_rule1 = self.create_LB_Rule(public_ip_1, network_1, [vm_1, vm_2]) # public_ip_2 = self.acquire_Public_IP(network_2) with self.assertRaises(Exception): self.create_LB_Rule(public_ip_2, network_2, [vm_3, vm_4]) lb_rules = LoadBalancerRule.list(self.apiclient, id=lb_rule1.id, listall=True ) self.failIfEqual(lb_rules, None, "Failed to list the LB Rule" ) vms = VirtualMachine.list(self.apiclient, networkid=network_1.id, listall=True ) self.failIfEqual(vms, None, "Failed to list the VMs in network=%s" % network_1.name ) return
def test_deploy_vm_multiple(self): """Test Multiple Deploy Virtual Machine # Validate the following: # 1. deploy 2 virtual machines # 2. listVirtualMachines using 'ids' parameter returns accurate information """ self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id ) self.virtual_machine2 = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine2"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id ) list_vms = VirtualMachine.list(self.apiclient, ids=[self.virtual_machine.id, self.virtual_machine2.id], listAll=True) self.debug( "Verify listVirtualMachines response for virtual machines: %s, %s" % (self.virtual_machine.id, self.virtual_machine2.id) ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertEqual( len(list_vms), 2, "List VM response was empty, expected 2 VMs" )
def test_02_VPC_LBRulesAndVMListing(self): """ Test case no 211 and 228: List only VMs suitable for the Virtual Network on VPC for LB Rule """ # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Create a Network offering - NO1 with all supported services # 3. Add network1(10.1.1.1/24) using N01 to this VPC. # 4. Add network2(10.1.2.1/24) using N01 to this VPC. # 5. Deploy vm1 and vm2 in network1 on primary host. # 6. Deploy vm3 and vm4 in network2 on secondary host. # 7. Use the Create LB rule for vm1 and vm2 in network1. # 9. List LB rule for network1 list vms on network1 for selection of LB rule. network_1 = self.create_Network(self.services["network_offering"]) network_2 = self.create_Network(self.services["network_offering_no_lb"], '10.1.2.1') vm_1 = self.create_VM_in_Network(network_1) vm_2 = self.create_VM_in_Network(network_1) vm_3 = self.create_VM_in_Network(network_2) self.debug('vm_3=%s' % vm_3.id) vm_4 = self.create_VM_in_Network(network_2) self.debug('vm_4=%s' % vm_4.id) public_ip_1 = self.acquire_Public_IP(network_1) lb_rule = self.create_LB_Rule(public_ip_1, network_1, [vm_1, vm_2]) lb_rules = LoadBalancerRule.list(self.apiclient, id=lb_rule.id, listall=True ) self.failIfEqual(lb_rules, None, "Failed to list the LB Rule" ) vms = VirtualMachine.list(self.apiclient, networkid=network_1.id, listall=True ) self.failIfEqual(vms, None, "Failed to list the VMs in network=%s" % network_1.name ) return
def test_vm_ha(self): """Test VM HA # Validate the following: # VM started on other host in cluster """ #wait for VM to HA ping_timeout = Configurations.list(self.apiclient, name="ping.timeout") ping_interval = Configurations.list(self.apiclient, name="ping.interval") total_duration = int(float(ping_timeout[0].value) * float(ping_interval[0].value)) time.sleep(total_duration) duration = 0 vm = None while duration < total_duration: list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty") vm = list_vms[0] if vm.hostid != self.virtual_machine.hostid: break else: time.sleep(10) duration = duration + 10 self.assertEqual( vm.id, self.virtual_machine.id, "VM ids do not match") self.assertEqual( vm.name, self.virtual_machine.name, "VM names do not match") self.assertEqual( vm.state, "Running", msg="VM is not in Running state") self.assertNotEqual( vm.hostid, self.virtual_machine.hostid, msg="VM is not started on another host as part of HA")
def test_01_VPC_LBRulesListing(self): """ Test case no 210 and 227: List Load Balancing Rules belonging to a VPC """ # Validate the following # 1. Create a VPC with cidr - 10.1.1.1/16 # 2. Create a Network offering - NO1 with all supported services # 3. Add network1(10.1.1.1/24) using N01 to this VPC. # 4. Add network2(10.1.2.1/24) using N01 to this VPC. # 5. Deploy vm1 and vm2 in network1. # 6. Deploy vm3 and vm4 in network2. # 7. Use the Create LB rule for vm1 and vm2 in network1. # 8. Use the Create LB rule for vm3 amd vm4 in network2, should fail # because it's no_lb offering # 9. List LB rule network_1 = self.create_Network(self.services["network_offering"]) network_2 = self.create_Network( self.services["network_offering_no_lb"], '10.1.2.1') self.debug("deploying VMs in network: %s" % network_2.name) vm_1 = self.create_VM_in_Network(network_1) vm_2 = self.create_VM_in_Network(network_1) vm_3 = self.create_VM_in_Network(network_2) vm_4 = self.create_VM_in_Network(network_2) public_ip_1 = self.acquire_Public_IP(network_1) lb_rule1 = self.create_LB_Rule(public_ip_1, network_1, [vm_1, vm_2]) # public_ip_2 = self.acquire_Public_IP(network_2) with self.assertRaises(Exception): self.create_LB_Rule(public_ip_2, network_2, [vm_3, vm_4]) lb_rules = LoadBalancerRule.list(self.apiclient, id=lb_rule1.id, listall=True) self.failIfEqual(lb_rules, None, "Failed to list the LB Rule") vms = VirtualMachine.list(self.apiclient, networkid=network_1.id, listall=True) self.failIfEqual( vms, None, "Failed to list the VMs in network=%s" % network_1.name) return
def CreateVM(self, newvm): virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], zoneid=self.zone.id, domainid=self.domain.id, templateid=self.template.id, accountid=self.find_account(newvm["account"]).__dict__["name"], serviceofferingid=self.find_service_offering(newvm["service_offering"]).id ) list_vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % virtual_machine.id ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.assertEqual( vm.state, "Running", msg="VM is not in Running state" ) return virtual_machine.id
def test_deploy_vgpu_enabled_vm(self): """Test Deploy Virtual Machine # Validate the following: # 1. Virtual Machine is accessible via SSH # 2. Virtual Machine is vGPU enabled (via SSH) # 3. listVirtualMachines returns accurate information """ self.virtual_machine = VirtualMachine.create( self.apiclient, self.services["vgpu260q"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.services['mode'] ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.assertEqual( vm.id, self.virtual_machine.id, "Virtual Machine ids do not match" ) self.assertEqual( vm.name, self.virtual_machine.name, "Virtual Machine names do not match" ) self.assertEqual( vm.state, "Running", msg="VM is not in Running state" ) list_hosts = list_hosts( self.apiclient, id=vm.hostid ) hostip = list_hosts[0].ipaddress try: sshClient = SshClient(host=hostip, port=22, user='******',passwd=self.services["host_password"]) res = sshClient.execute("xe vgpu-list vm-name-label=%s params=type-uuid %s" % ( vm.instancename )) self.debug("SSH result: %s" % res) except Exception as e: self.fail("SSH Access failed for %s: %s" % \ (hostip, e) ) result = str(res) self.assertEqual( result.count("type-uuid"), 1, "VM is vGPU enabled." )
def test_04_reoccuring_snapshot_rules(self): """ 1) Create a VM using the Service offering IsVolatile enabled 2) Apply a recurring snapshot rule on the Volume. 3) After a couple of snapshots are taken reboot the VM. Verify the following conditions 1) New root disk should be formed 2) The recurring snapshot rule should be deleted """ vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2] ) vm_with_reset = vm_list_validation_result[1] vm_with_reset_root_disk_id = self.get_root_device_uuid_for_vm(vm_with_reset.id, vm_with_reset.rootdeviceid) self.debug("Creating recurring snapshot policy for root disk on vm created with IsVolatile=True") self.debug( "Snapshot Policy - Type : %s Scheduled Hours : %s" % (self.services["recurring_snapshot"]["intervaltype"], self.services["recurring_snapshot"]["schedule"]) ) recurring_snapshot = SnapshotPolicy.create( self.apiclient, vm_with_reset_root_disk_id, self.services["recurring_snapshot"] ) # ListSnapshotPolicy should return newly created policy list_snapshots_policy = SnapshotPolicy.list( self.apiclient, id=recurring_snapshot.id, volumeid=vm_with_reset_root_disk_id ) snapshot_list_validation_result = validateList(list_snapshots_policy) self.assertEqual( snapshot_list_validation_result[0], PASS, "snapshot list validation failed due to %s" % snapshot_list_validation_result[2], ) snapshots_policy = snapshot_list_validation_result[1] self.assertEqual( snapshots_policy.id, recurring_snapshot.id, "Check recurring snapshot id in list resources call" ) self.assertEqual( snapshots_policy.maxsnaps, self.services["recurring_snapshot"]["maxsnaps"], "Check interval type in list resources call", ) sleep_seconds = (self.services["recurring_snapshot"]["schedule"]) * 3600 + 600 sleep_minutes = sleep_seconds / 60 self.debug("Sleeping for %s minutes till the volume is snapshoted" % sleep_minutes) time.sleep(sleep_seconds) retriesCount = self.services["retriesCount"] while True: snapshots = Snapshot.list( self.apiclient, volumeid=vm_with_reset_root_disk_id, intervaltype=self.services["recurring_snapshot"]["intervaltype"], snapshottype=RECURRING, listall=True, ) snapshot_list_validation_result = validateList(snapshots) if snapshot_list_validation_result[0] == PASS: break elif retriesCount == 0: self.fail("Failed to get snapshots list") time.sleep(60) retriesCount = retriesCount - 1 # rebooting the vm with isVolatile = True try: self.vm_with_reset.reboot(self.apiclient) except Exception as e: self.fail("Failed to reboot the virtual machine. Error: %s" % e) # Check if the the root disk was destroyed and recreated for isVolatile=True self.debug("Checking whether root disk of VM with isVolatile=True was destroyed") vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "list validation failed due to %s" % vm_list_validation_result[2] ) vm_with_reset_after_reboot = vm_list_validation_result[1] vm_with_reset_root_disk_id_after_reboot = self.get_root_device_uuid_for_vm( vm_with_reset_after_reboot.id, vm_with_reset_after_reboot.rootdeviceid ) self.assertNotEqual( vm_with_reset_root_disk_id, vm_with_reset_root_disk_id_after_reboot, "VM created with IsVolatile=True has same rootdeviceid : %s after reboot" % vm_with_reset_root_disk_id_after_reboot, ) # Make sure it has the same IP after reboot self.assertEqual( vm_with_reset.nic[0].ipaddress, vm_with_reset_after_reboot.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after reboot. Got : %s Expected : %s" % (vm_with_reset_after_reboot.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress), ) # Check whether the recurring policy has been deleted from the database self.debug( "Checking whether snapshot rule for VM with isVolatile=True was destroyed \ Here we are passing root disk id of vm before reboot which does not exist hence\ listing should fail" ) with self.assertRaises(Exception): list_snapshots_policy = SnapshotPolicy.list(self.apiclient, volumeid=vm_with_reset_root_disk_id) return
def test_04_reoccuring_snapshot_rules(self): """ 1) Create a VM using the Service offering IsVolatile enabled 2) Apply a recurring snapshot rule on the Volume. 3) After a couple of snapshots are taken reboot the VM. Verify the following conditions 1) New root disk should be formed 2) The recurring snapshot rule should be deleted """ vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vm_with_reset = vm_list_validation_result[1] vm_with_reset_root_disk_id = self.get_root_device_uuid_for_vm( vm_with_reset.id, vm_with_reset.rootdeviceid) now = datetime.now() delta = timedelta(minutes=15) scheduled_time = now + delta self.services["recurring_snapshot"]["schedule"] = scheduled_time.minute self.debug( "Creating recurring snapshot policy for root disk on vm created with IsVolatile=True" ) self.debug("Snapshot Policy - Type : %s Scheduled minute : %s" % (self.services["recurring_snapshot"]["intervaltype"], self.services["recurring_snapshot"]["schedule"])) recurring_snapshot = SnapshotPolicy.create( self.apiclient, vm_with_reset_root_disk_id, self.services["recurring_snapshot"]) #ListSnapshotPolicy should return newly created policy list_snapshots_policy = SnapshotPolicy.list( self.apiclient, id=recurring_snapshot.id, volumeid=vm_with_reset_root_disk_id) snapshot_list_validation_result = validateList(list_snapshots_policy) self.assertEqual( snapshot_list_validation_result[0], PASS, "snapshot list validation failed due to %s" % snapshot_list_validation_result[2]) snapshots_policy = snapshot_list_validation_result[1] self.assertEqual(snapshots_policy.id, recurring_snapshot.id, "Check recurring snapshot id in list resources call") self.assertEqual(snapshots_policy.maxsnaps, self.services["recurring_snapshot"]["maxsnaps"], "Check interval type in list resources call") sleep_seconds = delta.seconds + 600 sleep_minutes = sleep_seconds / 60 self.debug("Sleeping for %s minutes till the volume is snapshoted" % sleep_minutes) time.sleep(sleep_seconds) retriesCount = self.services["retriesCount"] while True: snapshots = Snapshot.list( self.apiclient, volumeid=vm_with_reset_root_disk_id, intervaltype=\ self.services["recurring_snapshot"]["intervaltype"], snapshottype=RECURRING, listall=True ) snapshot_list_validation_result = validateList(snapshots) if snapshot_list_validation_result[0] == PASS: break elif retriesCount == 0: self.fail("Failed to get snapshots list") time.sleep(60) retriesCount = retriesCount - 1 # rebooting the vm with isVolatile = True try: self.vm_with_reset.reboot(self.apiclient) except Exception as e: self.fail("Failed to reboot the virtual machine. Error: %s" % e) # Check if the the root disk was destroyed and recreated for isVolatile=True self.debug( "Checking whether root disk of VM with isVolatile=True was destroyed" ) vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "list validation failed due to %s" % vm_list_validation_result[2]) vm_with_reset_after_reboot = vm_list_validation_result[1] vm_with_reset_root_disk_id_after_reboot = self.get_root_device_uuid_for_vm( vm_with_reset_after_reboot.id, vm_with_reset_after_reboot.rootdeviceid) self.assertNotEqual( vm_with_reset_root_disk_id, vm_with_reset_root_disk_id_after_reboot, "VM created with IsVolatile=True has same rootdeviceid : %s after reboot" % vm_with_reset_root_disk_id_after_reboot) # Make sure it has the same IP after reboot self.assertEqual( vm_with_reset.nic[0].ipaddress, vm_with_reset_after_reboot.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after reboot. Got : %s Expected : %s" % (vm_with_reset_after_reboot.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress)) # Check whether the recurring policy has been deleted from the database self.debug( "Checking whether snapshot rule for VM with isVolatile=True was destroyed \ Here we are passing root disk id of vm before reboot which does not exist hence\ listing should fail") with self.assertRaises(Exception): list_snapshots_policy = SnapshotPolicy.list( self.apiclient, volumeid=vm_with_reset_root_disk_id) return
def test_03_restore_vm_with_new_template(self): """ Test restoring a vm with different template than the one it was created with """ hosts = Host.list(self.apiclient, type="Routing", listall=True) host_list_validation_result = validateList(hosts) self.assertEqual( host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) hypervisor = host_list_validation_result[1].hypervisor for k, v in self.services["templates"].items(): if k == hypervisor: # Register new template template = Template.register(self.apiclient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid) self.debug("Registered a template of format: %s with ID: %s" % (v["format"], template.id)) self.debug("Downloading template with ID: %s" % (template.id)) template.download(self.apiclient) self.cleanup.append(template) # Wait for template status to be changed across time.sleep(self.services["sleep"]) self.verify_template_listing(template) # Restore a vm with the new template. self.vm_with_reset.restore(self.apiclient, templateid=template.id) self.vm_without_reset.restore(self.apiclient, templateid=template.id) # Make sure the VMs now have the new template ID # Make sure the Ip address of the VMs haven't changed self.debug("Checking template id of VM with isVolatile=True") vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) vm_with_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_with_reset.templateid, vm_with_reset.templateid, "VM created with IsVolatile=True has same templateid : %s after restore" % vm_with_reset.templateid) self.assertNotEqual( self.vm_with_reset.templateid, template.id, "VM created with IsVolatile=True has wrong templateid after restore Got:%s Expected: %s" % (self.vm_with_reset.templateid, template.id)) # Make sure it has the same IP after reboot self.assertEqual( self.vm_with_reset.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after restore. Got : %s Expected : %s" % (vm_with_reset.nic[0].ipaddress, self.vm_with_reset.nic[0].ipaddress)) # Check if the the root disk was not destroyed for isVolatile=False self.debug("Checking template id of VM with isVolatile=False") vms = VirtualMachine.list(self.apiclient, id=self.vm_without_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) vm_without_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_without_reset.templateid, vm_without_reset.templateid, "VM created with IsVolatile=False has same templateid : %s after restore" % vm_with_reset.templateid) self.assertNotEqual( self.vm_without_reset.templateid, template.id, "VM created with IsVolatile=False has wrong templateid after restore Got:%s Expected: %s" % (self.vm_without_reset.templateid, template.id)) # Make sure it has the same IP after reboot self.assertEqual( self.vm_without_reset.nic[0].ipaddress, vm_without_reset.nic[0].ipaddress, "VM created with IsVolatile=False doesn't have same ip after restore. Got : %s Expected : %s" % (vm_without_reset.nic[0].ipaddress, self.vm_without_reset.nic[0].ipaddress)) return
def test_02_reboot_instance_with_is_volatile_offering(self): """ Test rebooting instances created with isVolatile service offerings """ # Validate the following # 1. Reboot the virtual machines. # 2. Validate the following # a. VM with created with isVolatile=True should have new Root disk but same IP # b. VM with created with isVolatile=False should have same Root disk and IP as before reboot self.debug("Rebooting the virtual machines in account: %s" % self.account.name) try: self.vm_with_reset.reboot(self.apiclient) self.vm_without_reset.reboot(self.apiclient) except Exception as e: self.fail("Failed to reboot the virtual machines, %s" % e) # Check if the the root disk was destroyed and recreated for isVolatile=True self.debug("Checking root disk of VM with isVolatile=True") vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2]) vm_with_reset = vm_list_validation_result[1] vm_with_reset_root_disk_id = self.get_root_device_uuid_for_vm( vm_with_reset.id, vm_with_reset.rootdeviceid) self.assertNotEqual( self.vm_with_reset_root_disk_id, vm_with_reset_root_disk_id, "VM created with IsVolatile=True has same rootdeviceid : %s after reboot" % vm_with_reset_root_disk_id) # Make sure it has the same IP after reboot self.assertEqual( self.vm_with_reset.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after reboot. Got : %s Expected : %s" % (vm_with_reset.nic[0].ipaddress, self.vm_with_reset.nic[0].ipaddress)) # Check if the the root disk was not destroyed for isVolatile=False self.debug("Checking root disk of VM with isVolatile=False") vms = VirtualMachine.list(self.apiclient, id=self.vm_without_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "list validation failed due to %s" % vm_list_validation_result[2]) vm_without_reset = vm_list_validation_result[1] vm_without_reset_root_disk_id = self.get_root_device_uuid_for_vm( vm_without_reset.id, vm_without_reset.rootdeviceid) self.assertEqual( self.vm_without_reset_root_disk_id, vm_without_reset_root_disk_id, "VM created with IsVolatile=False has different rootdeviceid after reboot Got: %s Expected : %s" % (vm_without_reset_root_disk_id, self.vm_without_reset_root_disk_id)) # Make sure it has the same IP after reboot self.assertEqual( self.vm_without_reset.nic[0].ipaddress, vm_without_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after reboot. Got : %s Expected : %s" % (vm_without_reset.nic[0].ipaddress, self.vm_without_reset.nic[0].ipaddress)) return
def test_03_restore_vm_with_new_template(self): """ Test restoring a vm with different template than the one it was created with """ hosts = Host.list(self.apiclient, type="Routing", listall=True) host_list_validation_result = validateList(hosts) self.assertEqual( host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2], ) hypervisor = host_list_validation_result[1].hypervisor for k, v in self.services["templates"].items(): if k.lower() == hypervisor.lower(): # Register new template template = Template.register( self.apiclient, v, zoneid=self.zone.id, account=self.account.name, domainid=self.account.domainid ) self.debug("Registered a template of format: %s with ID: %s" % (v["format"], template.id)) self.debug("Downloading template with ID: %s" % (template.id)) template.download(self.apiclient) self.cleanup.append(template) # Wait for template status to be changed across time.sleep(self.services["sleep"]) self.verify_template_listing(template) # Restore a vm with the new template. self.vm_with_reset.restore(self.apiclient, templateid=template.id) self.vm_without_reset.restore(self.apiclient, templateid=template.id) # Make sure the VMs now have the new template ID # Make sure the Ip address of the VMs haven't changed self.debug("Checking template id of VM with isVolatile=True") vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2], ) vm_with_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_with_reset.templateid, vm_with_reset.templateid, "VM created with IsVolatile=True has same templateid : %s after restore" % vm_with_reset.templateid, ) self.assertNotEqual( self.vm_with_reset.templateid, template.id, "VM created with IsVolatile=True has wrong templateid after restore Got:%s Expected: %s" % (self.vm_with_reset.templateid, template.id), ) # Make sure it has the same IP after reboot self.assertEqual( self.vm_with_reset.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after restore. Got : %s Expected : %s" % (vm_with_reset.nic[0].ipaddress, self.vm_with_reset.nic[0].ipaddress), ) # Check if the the root disk was not destroyed for isVolatile=False self.debug("Checking template id of VM with isVolatile=False") vms = VirtualMachine.list(self.apiclient, id=self.vm_without_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2], ) vm_without_reset = vm_list_validation_result[1] self.assertNotEqual( self.vm_without_reset.templateid, vm_without_reset.templateid, "VM created with IsVolatile=False has same templateid : %s after restore" % vm_with_reset.templateid, ) self.assertNotEqual( self.vm_without_reset.templateid, template.id, "VM created with IsVolatile=False has wrong templateid after restore Got:%s Expected: %s" % (self.vm_without_reset.templateid, template.id), ) # Make sure it has the same IP after reboot self.assertEqual( self.vm_without_reset.nic[0].ipaddress, vm_without_reset.nic[0].ipaddress, "VM created with IsVolatile=False doesn't have same ip after restore. Got : %s Expected : %s" % (vm_without_reset.nic[0].ipaddress, self.vm_without_reset.nic[0].ipaddress), ) return
def test_vmware_anti_affinity(self): """ Test Set up anti-affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy VMs on host 1 and 2 # 2. Enable maintenance mode for host 1 # 3. VM should be migrated to 3rd host hosts = Host.list(self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing') self.assertEqual(isinstance(hosts, list), True, "List hosts should return valid host response") self.debug(len(hosts)) self.assertGreaterEqual( len(hosts), 3, "There must be at least 3 hosts present in a cluster") aff_grp = self.create_aff_grp( aff_grp=self.services["host_anti_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name]) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name]) host_1 = vm_1.hostid host_2 = vm_2.hostid vms = VirtualMachine.list(self.apiclient, id=vm_1.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_1 = vm_list_validation_result[1] self.debug("VM State: %s" % virtual_machine_1.state) self.assertEqual(virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state") vms = VirtualMachine.list(self.apiclient, id=vm_2.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_2 = vm_list_validation_result[1] self.debug("VM %s State: %s" % (virtual_machine_2.name, virtual_machine_2.state)) self.assertEqual(virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state") self.debug("Enabling maintenance mode on host_1: %s" % host_1) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = host_1 self.apiclient.prepareHostForMaintenance(cmd) timeout = self.services["timeout"] while True: hosts = Host.list(self.apiclient, zoneid=self.zone.id, type='Routing', id=host_1) host_list_validation_result = validateList(hosts) self.assertEqual( host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) host = host_list_validation_result[1] if host.resourcestate == 'Maintenance': break elif timeout == 0: self.fail("Failed to put host: %s in maintenance mode" % host.name) time.sleep(self.services["sleep"]) timeout = timeout - 1 vms = VirtualMachine.list(self.apiclient, id=virtual_machine_1.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vm = vm_list_validation_result[0] self.assertEqual(vm.state, "Running", "Deployed VM should be in RUnning state") self.assertNotEqual( vm.hostid, host_2, "The host name should not match with second host name") self.debug("Canceling host maintenance for ID: %s" % host_1.id) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = host_1.id self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % host_1.id) return
def setUp(self): self.testdata = TestData().testdata self.apiclient = self.testClient.getApiClient() # Get Zone, Domain and Default Built-in template self.domain = get_domain(self.apiclient, self.testdata) self.zone = get_zone(self.apiclient, self.testdata) self.testdata["mode"] = self.zone.networktype self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"]) #create a user account self.account = Account.create( self.apiclient, self.testdata["account"], domainid=self.domain.id ) #create a service offering self.service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offering"]["small"] ) #build cleanup list self.cleanup = [ self.service_offering, self.account ] # Validate the following: # 1. Virtual Machine is accessible via SSH # 2. listVirtualMachines returns accurate information self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.assertEqual( vm.id, self.virtual_machine.id, "Virtual Machine ids do not match" ) self.assertEqual( vm.name, self.virtual_machine.name, "Virtual Machine names do not match" ) self.assertEqual( vm.state, "Running", msg="VM is not in Running state" )
def test_vmware_anti_affinity(self): """ Test Set up anti-affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy VMs on host 1 and 2 # 2. Enable maintenance mode for host 1 # 3. VM should be migrated to 3rd host hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.debug(len(hosts)) self.assertGreaterEqual( len(hosts), 3, "There must be at least 3 hosts present in a cluster" ) aff_grp = self.create_aff_grp(aff_grp=self.services["host_anti_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) host_1 = vm_1.hostid host_2 = vm_2.hostid vms = VirtualMachine.list( self.apiclient, id=vm_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_1 = vm_list_validation_result[1] self.debug("VM State: %s" % virtual_machine_1.state) self.assertEqual( virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state" ) vms = VirtualMachine.list( self.apiclient, id=vm_2.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[1]) virtual_machine_2 = vm_list_validation_result[1] self.debug("VM %s State: %s" % ( virtual_machine_2.name, virtual_machine_2.state )) self.assertEqual( virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Enabling maintenance mode on host_1: %s" % host_1) cmd = prepareHostForMaintenance.prepareHostForMaintenanceCmd() cmd.id = host_1 self.apiclient.prepareHostForMaintenance(cmd) timeout = self.services["timeout"] while True: hosts = Host.list( self.apiclient, zoneid=self.zone.id, type='Routing', id=host_1 ) host_list_validation_result = validateList(hosts) self.assertEqual(host_list_validation_result[0], PASS, "host list validation failed due to %s" % host_list_validation_result[2]) host = host_list_validation_result[1] if host.resourcestate == 'Maintenance': break elif timeout == 0: self.fail("Failed to put host: %s in maintenance mode" % host.name) time.sleep(self.services["sleep"]) timeout = timeout - 1 vms = VirtualMachine.list( self.apiclient, id=virtual_machine_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vm = vm_list_validation_result[0] self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) self.assertNotEqual( vm.hostid, host_2, "The host name should not match with second host name" ) self.debug("Canceling host maintenance for ID: %s" % host_1.id) cmd = cancelHostMaintenance.cancelHostMaintenanceCmd() cmd.id = host_1.id self.apiclient.cancelHostMaintenance(cmd) self.debug("Maintenance mode canceled for host: %s" % host_1.id) return
def test_check_vm_stats(self, value): """Deploy VM with dynamic service offering and check VM stats""" # Steps: # 1. Create admin/user account and create its user api client # 2. Create a dynamic service offering # 3. Deploy a VM with account api client and dynamic service offering # providing custom values for cpu number, cpu speed and memory # 4. List the VM and verify the dynamic parameters are same as passed isadmin=True if value == USER_ACCOUNT: isadmin=False # Create Account and api client self.account = Account.create(self.apiclient,self.services["account"],domainid=self.domain.id, admin=isadmin) apiclient = self.testClient.createUserApiClient( UserName=self.account.name, DomainName=self.account.domain) self.cleanup.append(self.account) # Create dynamic compute offering self.services["service_offering"]["cpunumber"] = "" self.services["service_offering"]["cpuspeed"] = "" self.services["service_offering"]["memory"] = "" serviceOffering = ServiceOffering.create(self.apiclient, self.services["service_offering"]) self.cleanup_co.append(serviceOffering) # Custom values customcpunumber = 2 customcpuspeed = 256 custommemory = 128 # Deploy VM with dynamic service offering and the custom values try: virtualMachine = VirtualMachine.create(apiclient,self.services["virtual_machine"], serviceofferingid=serviceOffering.id, customcpunumber=customcpunumber, customcpuspeed=customcpuspeed, custommemory=custommemory, accountid=self.account.name,domainid=self.account.domainid) except Exception as e: self.fail("vm creation failed: %s" % e) vmlist = VirtualMachine.list(self.apiclient, id=virtualMachine.id) self.assertEqual(validateList(vmlist)[0], PASS, "vm list validation failed") vm = vmlist[0] # Verify the custom values self.assertEqual(str(vm.cpunumber), str(customcpunumber), "vm cpu number %s\ not matching with provided custom cpu number %s" % \ (vm.cpunumber, customcpunumber)) self.assertEqual(str(vm.cpuspeed), str(customcpuspeed), "vm cpu speed %s\ not matching with provided custom cpu speed %s" % \ (vm.cpuspeed, customcpuspeed)) self.assertEqual(str(vm.memory), str(custommemory), "vm memory %s\ not matching with provided custom memory %s" % \ (vm.memory, custommemory)) return
def test_vmware_affinity(self): """ Test Set up affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy 2 VMs on same hosts # 2. Migrate one VM from one host to another # 3. The second VM should also get migrated hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster" ) host_1 = hosts[0].id host_2 = hosts[1].id aff_grp = self.create_aff_grp(aff_grp=self.services["host_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name], hostid = host_1 ) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name] ) vms = VirtualMachine.list( self.apiclient, id= vm_1.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_1 = vm_list_validation_result[1] self.assertEqual( virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Deploying VM on account: %s" % self.account.name) vms = VirtualMachine.list( self.apiclient, id=vm_2.id, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_2 = vm_list_validation_result[1] self.assertEqual( virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state" ) self.debug("Migrate VM from host_1 to host_2") cmd = migrateVirtualMachine.migrateVirtualMachineCmd() cmd.virtualmachineid = virtual_machine_2.id cmd.hostid = host_2 self.apiclient.migrateVirtualMachine(cmd) self.debug("Migrated VM from host_1 to host_2") vms = VirtualMachine.list( self.apiclient, hostid=host_2, listall=True ) vm_list_validation_result = validateList(vms) self.assertEqual(vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vmids = [vm.id for vm in vms] self.assertIn( virtual_machine_1.id, vmids, "VM 1 should be successfully migrated to host 2" ) self.assertIn( virtual_machine_2.id, vmids, "VM 2 should be automatically migrated to host 2" ) return
def test_update_vm_name(self): """Test Update VirtualMachine Name # Validate the following: # 1. VirtualMachine has uuid name, displayname # 2. listVirtualMachines returns accurate information # 3. Stop the VM # 4. updateVirtualmachine no args and then new displayname # 5. listVirtualMachines nad check the displayName set # 6. start the VM # 7. Verify displayName is still set """ self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.assertNotEqual(len(list_vms), 0, "List VM response was empty") vm = list_vms[0] self.debug( "VirtualMachine launched with id, name, displayname: %s %s %s"\ % (self.virtual_machine.id, vm.name, vm.displayname) ) self.assertEqual(vm.state, "Running", msg="VM is not in Running state") self.debug("Stopping VirtualMachine to update displayname") self.virtual_machine.stop(self.apiclient) #CLOUDSTACK-3184: update without args as this results in an NPE self.virtual_machine.update(self.apiclient) self.virtual_machine.update(self.apiclient, displayname='newdisplayname') list_vms = VirtualMachine.list(self.apiclient, id=vm.id) vmnew = list_vms[0] self.assertNotEqual( vmnew.displayname, vm.displayname, msg="displayname remained the same after updateVirtualMachine") self.assertEqual( vmnew.displayname, 'newdisplayname', msg="display name not updated successfully, displayname is %s" % vmnew.displayname) self.debug("Starting VirtualMachine after updated displayname") self.virtual_machine.start(self.apiclient) list_vms = VirtualMachine.list(self.apiclient, id=vm.id) vmnewstarted = list_vms[0] self.assertEqual( vmnew.displayname, vmnewstarted.displayname, msg="display name changed on start, displayname is %s" % vmnewstarted.displayname)
def test_01_snapshot_on_rootVolume(self): """Test create VM with default cent os template and create snapshot on root disk of the vm """ # Validate the following # 1. Deploy a Linux VM using default CentOS template, use small service # offering, disk offering # 2. Create snapshot on the root disk of this newly cteated vm # 3. listSnapshots should list the snapshot that was created. # 4. verify that secondary storage NFS share contains the reqd # volume under /secondary/snapshots/$accountid/$volumeid/$snapshot_uuid # 5. verify backup_snap_id was non null in the `snapshots` table # Create virtual machine with small systerm offering and disk offering new_virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, zoneid=self.zone.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, ) self.debug("Virtual machine got created with id: %s" % new_virtual_machine.id) list_virtual_machine_response = VirtualMachine.list( self.apiclient, id=new_virtual_machine.id) self.assertEqual(isinstance(list_virtual_machine_response, list), True, "Check listVirtualMachines returns a valid list") self.assertNotEqual(len(list_virtual_machine_response), 0, "Check listVirtualMachines response") self.cleanup.append(new_virtual_machine) # Getting root volume id of the vm created above list_volume_response = Volume.list( self.apiclient, virtualmachineid=list_virtual_machine_response[0].id, type="ROOT", account=self.account.name, domainid=self.account.domainid) self.assertEqual(isinstance(list_volume_response, list), True, "Check listVolumes returns a valid list") self.assertNotEqual(len(list_volume_response), 0, "Check listVolumes response") self.debug( "Snapshot will be created on the volume with voluem id: %s" % list_volume_response[0].id) # Perform snapshot on the root volume root_volume_snapshot = Snapshot.create( self.apiclient, volume_id=list_volume_response[0].id) self.debug( "Created snapshot: %s for vm: %s" % (root_volume_snapshot.id, list_virtual_machine_response[0].id)) list_snapshot_response = Snapshot.list(self.apiclient, id=root_volume_snapshot.id, account=self.account.name, domainid=self.account.domainid) self.assertEqual(isinstance(list_snapshot_response, list), True, "Check listSnapshots returns a valid list") self.assertNotEqual(len(list_snapshot_response), 0, "Check listSnapshots response") # Verify Snapshot state self.assertEqual( list_snapshot_response[0].state in ['BackedUp', 'CreatedOnPrimary'], True, "Snapshot state is not as expected. It is %s" % list_snapshot_response[0].state) self.assertEqual( list_snapshot_response[0].volumeid, list_volume_response[0].id, "Snapshot volume id is not matching with the vm's volume id") self.cleanup.append(root_volume_snapshot) # Below code is to verify snapshots in the backend and in db. # Verify backup_snap_id field in the snapshots table for the snapshot created, it should not be null self.debug( "select id, removed, backup_snap_id from snapshots where uuid = '%s';" % root_volume_snapshot.id) qryresult = self.dbclient.execute( "select id, removed, backup_snap_id from snapshots where uuid = '%s';" % root_volume_snapshot.id) self.assertNotEqual(len(qryresult), 0, "Check sql query to return snapshots list") snapshot_qry_response = qryresult[0] snapshot_id = snapshot_qry_response[0] is_removed = snapshot_qry_response[1] backup_snap_id = snapshot_qry_response[2] self.assertNotEqual( is_removed, "NULL", "Snapshot is removed from CS, please check the logs") msg = "Backup snapshot id is set to null for the backedup snapshot :%s" % snapshot_id self.assertNotEqual(backup_snap_id, "NULL", msg) # Check if the snapshot is present on the secondary storage self.assertTrue( is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, root_volume_snapshot.id)) return
def test_01_snapshot_on_rootVolume(self): """Test create VM with default cent os template and create snapshot on root disk of the vm """ # Validate the following # 1. Deploy a Linux VM using default CentOS template, use small service # offering, disk offering # 2. Create snapshot on the root disk of this newly cteated vm # 3. listSnapshots should list the snapshot that was created. # 4. verify that secondary storage NFS share contains the reqd # volume under /secondary/snapshots/$accountid/$volumeid/$snapshot_uuid # 5. verify backup_snap_id was non null in the `snapshots` table # Create virtual machine with small systerm offering and disk offering new_virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], templateid=self.template.id, zoneid=self.zone.id, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, diskofferingid=self.disk_offering.id, ) self.debug("Virtual machine got created with id: %s" % new_virtual_machine.id) list_virtual_machine_response = VirtualMachine.list( self.apiclient, id=new_virtual_machine.id) self.assertEqual(isinstance(list_virtual_machine_response, list), True, "Check listVirtualMachines returns a valid list") self.assertNotEqual(len(list_virtual_machine_response), 0, "Check listVirtualMachines response") self.cleanup.append(new_virtual_machine) # Getting root volume id of the vm created above list_volume_response = Volume.list( self.apiclient, virtualmachineid=list_virtual_machine_response[0].id, type="ROOT", account=self.account.name, domainid=self.account.domainid) self.assertEqual(isinstance(list_volume_response, list), True, "Check listVolumes returns a valid list") self.assertNotEqual(len(list_volume_response), 0, "Check listVolumes response") self.debug( "Snapshot will be created on the volume with voluem id: %s" % list_volume_response[0].id) # Perform snapshot on the root volume root_volume_snapshot = Snapshot.create( self.apiclient, volume_id=list_volume_response[0].id) self.debug("Created snapshot: %s for vm: %s" % ( root_volume_snapshot.id, list_virtual_machine_response[0].id)) list_snapshot_response = Snapshot.list( self.apiclient, id=root_volume_snapshot.id, account=self.account.name, domainid=self.account.domainid) self.assertEqual(isinstance(list_snapshot_response, list), True, "Check listSnapshots returns a valid list") self.assertNotEqual(len(list_snapshot_response), 0, "Check listSnapshots response") # Verify Snapshot state self.assertEqual( list_snapshot_response[0].state in [ 'BackedUp', 'CreatedOnPrimary' ], True, "Snapshot state is not as expected. It is %s" % list_snapshot_response[0].state ) self.assertEqual( list_snapshot_response[0].volumeid, list_volume_response[0].id, "Snapshot volume id is not matching with the vm's volume id") self.cleanup.append(root_volume_snapshot) # Below code is to verify snapshots in the backend and in db. # Verify backup_snap_id field in the snapshots table for the snapshot created, it should not be null self.debug("select id, removed, backup_snap_id from snapshots where uuid = '%s';" % root_volume_snapshot.id) qryresult = self.dbclient.execute("select id, removed, backup_snap_id from snapshots where uuid = '%s';" % root_volume_snapshot.id) self.assertNotEqual(len(qryresult), 0, "Check sql query to return snapshots list") snapshot_qry_response = qryresult[0] snapshot_id = snapshot_qry_response[0] is_removed = snapshot_qry_response[1] backup_snap_id = snapshot_qry_response[2] self.assertNotEqual(is_removed, "NULL", "Snapshot is removed from CS, please check the logs") msg = "Backup snapshot id is set to null for the backedup snapshot :%s" % snapshot_id self.assertNotEqual(backup_snap_id, "NULL", msg ) # Check if the snapshot is present on the secondary storage self.assertTrue(is_snapshot_on_nfs(self.apiclient, self.dbclient, self.config, self.zone.id, root_volume_snapshot.id)) return
def test_00_deploy_vm_root_resize(self): """Test deploy virtual machine with root resize # Validate the following: # 1. listVirtualMachines returns accurate information # 2. root disk has new size per listVolumes # 3. Rejects non-supported hypervisor types """ if(self.apiclient.hypervisor == 'kvm'): newrootsize = (self.template.size >> 30) + 2 self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, rootdisksize=newrootsize ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.assertEqual( vm.id, self.virtual_machine.id, "Virtual Machine ids do not match" ) self.assertEqual( vm.name, self.virtual_machine.name, "Virtual Machine names do not match" ) self.assertEqual( vm.state, "Running", msg="VM is not in Running state" ) # get root vol from created vm, verify it is correct size list_volume_response = list_volumes( self.apiclient, virtualmachineid=self.virtual_machine.id, type='ROOT', listall=True ) rootvolume = list_volume_response[0] success = False if rootvolume is not None and rootvolume.size == (newrootsize << 30): success = True self.assertEqual( success, True, "Check if the root volume resized appropriately" ) else: self.debug("hypervisor %s unsupported for test 00, verifying it errors properly" % self.apiclient.hypervisor) newrootsize = (self.template.size >> 30) + 2 success = False try: self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id, rootdisksize=newrootsize ) except Exception as ex: if "Hypervisor XenServer does not support rootdisksize override" in str(ex): success = True else: self.debug("virtual machine create did not fail appropriately. Error was actually : " + str(ex)); self.assertEqual(success, True, "Check if unsupported hypervisor %s fails appropriately" % self.apiclient.hypervisor)
def test_vm_creation_in_fully_automated_mode(self): """ Test VM Creation in automation mode = Fully automated This test requires following preconditions: - DRS Cluster is configured in "Fully automated" mode """ # Validate the following # 1. Create a new VM in a host which is almost fully utilized # 2 Automatically places VM on the other host # 3. VM state is running after deployment hosts = Host.list(self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing') self.assertEqual(isinstance(hosts, list), True, "List hosts should return valid host response") self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster") host_1 = hosts[0] #Convert available memory( Keep some margin) into MBs and assign to service offering self.services["service_offering_max_memory"]["memory"] = int( (int(hosts[0].memorytotal) - int(hosts[0].memoryused)) / 1048576 - 1024) self.debug("max memory: %s" % self.services["service_offering_max_memory"]["memory"]) service_offering_max_memory = ServiceOffering.create( self.apiclient, self.services["service_offering_max_memory"]) VirtualMachine.create(self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=service_offering_max_memory.id, hostid=host_1.id) # Host 1 has only 1024 MB memory available now after deploying the instance # We are trying to deploy an instance with 2048 MB memory, this should automatically # get deployed on other host which has the enough capacity self.debug( "Trying to deploy instance with memory requirement more than that is available on\ the first host") self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id) vms = VirtualMachine.list(self.apiclient, id=virtual_machine.id, listall=True) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM") self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM") vm = vms[0] self.assertEqual(vm.state, "Running", "Deployed VM should be in RUnning state") self.assertNotEqual( vm.hostid, host_1.id, "Host Ids of two should not match as one host is full") self.debug( "The host ids of two virtual machines are different as expected\ they are %s and %s" % (vm.hostid, host_1.id)) return
def test_vm_creation_in_fully_automated_mode(self): """ Test VM Creation in automation mode = Fully automated This test requires following preconditions: - DRS Cluster is configured in "Fully automated" mode """ # Validate the following # 1. Create a new VM in a host which is almost fully utilized # 2 Automatically places VM on the other host # 3. VM state is running after deployment hosts = Host.list( self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing' ) self.assertEqual( isinstance(hosts, list), True, "List hosts should return valid host response" ) self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster" ) host_1 = hosts[0] #Convert available memory( Keep some margin) into MBs and assign to service offering self.services["service_offering_max_memory"]["memory"] = int((int(hosts[0].memorytotal) - int(hosts[0].memoryused))/1048576 - 1024) self.debug("max memory: %s" % self.services["service_offering_max_memory"]["memory"]) service_offering_max_memory = ServiceOffering.create( self.apiclient, self.services["service_offering_max_memory"] ) VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=service_offering_max_memory.id, hostid = host_1.id ) # Host 1 has only 1024 MB memory available now after deploying the instance # We are trying to deploy an instance with 2048 MB memory, this should automatically # get deployed on other host which has the enough capacity self.debug("Trying to deploy instance with memory requirement more than that is available on\ the first host") self.debug("Deploying VM in account: %s" % self.account.name) # Spawn an instance in that network virtual_machine = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id ) vms = VirtualMachine.list( self.apiclient, id=virtual_machine.id, listall=True ) self.assertEqual( isinstance(vms, list), True, "List VMs should return valid response for deployed VM" ) self.assertNotEqual( len(vms), 0, "List VMs should return valid response for deployed VM" ) vm = vms[0] self.assertEqual( vm.state, "Running", "Deployed VM should be in RUnning state" ) self.assertNotEqual( vm.hostid, host_1.id, "Host Ids of two should not match as one host is full" ) self.debug("The host ids of two virtual machines are different as expected\ they are %s and %s" % (vm.hostid, host_1.id)) return
def test_vmware_affinity(self): """ Test Set up affinity rules The test requires following pre-requisites - VMWare cluster configured in fully automated mode """ # Validate the following # 1. Deploy 2 VMs on same hosts # 2. Migrate one VM from one host to another # 3. The second VM should also get migrated hosts = Host.list(self.apiclient, zoneid=self.zone.id, resourcestate='Enabled', type='Routing') self.assertEqual(isinstance(hosts, list), True, "List hosts should return valid host response") self.assertGreaterEqual( len(hosts), 2, "There must be two hosts present in a cluster") host_1 = hosts[0].id host_2 = hosts[1].id aff_grp = self.create_aff_grp(aff_grp=self.services["host_affinity"], acc=self.account.name, domainid=self.domain.id) vm_1 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name], hostid=host_1) vm_2 = VirtualMachine.create( self.apiclient, self.services["virtual_machine"], accountid=self.account.name, domainid=self.domain.id, serviceofferingid=self.service_offering.id, affinitygroupnames=[aff_grp.name]) vms = VirtualMachine.list(self.apiclient, id=vm_1.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_1 = vm_list_validation_result[1] self.assertEqual(virtual_machine_1.state, "Running", "Deployed VM should be in RUnning state") self.debug("Deploying VM on account: %s" % self.account.name) vms = VirtualMachine.list(self.apiclient, id=vm_2.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) virtual_machine_2 = vm_list_validation_result[1] self.assertEqual(virtual_machine_2.state, "Running", "Deployed VM should be in RUnning state") self.debug("Migrate VM from host_1 to host_2") cmd = migrateVirtualMachine.migrateVirtualMachineCmd() cmd.virtualmachineid = virtual_machine_2.id cmd.hostid = host_2 self.apiclient.migrateVirtualMachine(cmd) self.debug("Migrated VM from host_1 to host_2") vms = VirtualMachine.list(self.apiclient, hostid=host_2, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "vm list validation failed due to %s" % vm_list_validation_result[2]) vmids = [vm.id for vm in vms] self.assertIn(virtual_machine_1.id, vmids, "VM 1 should be successfully migrated to host 2") self.assertIn(virtual_machine_2.id, vmids, "VM 2 should be automatically migrated to host 2") return
def setUp(self): self.testdata = TestData().testdata self.apiclient = self.testClient.getApiClient() # Get Zone, Domain and Default Built-in template self.domain = get_domain(self.apiclient, self.testdata) self.zone = get_zone(self.apiclient, self.testdata) self.testdata["mode"] = self.zone.networktype self.template = get_template(self.apiclient, self.zone.id, self.testdata["ostype"]) self.hosts = [] suitablecluster = None clusters = Cluster.list(self.apiclient) self.assertTrue(isinstance(clusters, list) and len(clusters) > 0, msg = "No clusters found") for cluster in clusters: self.hosts = Host.list(self.apiclient, clusterid=cluster.id, type='Routing') if isinstance(self.hosts, list) and len(self.hosts) >= 2: suitablecluster = cluster break self.assertTrue(isinstance(self.hosts, list) and len(self.hosts) >= 2, msg = "Atleast 2 hosts required in cluster for VM HA test") #update host tags for host in self.hosts: Host.update(self.apiclient, id=host.id, hosttags=self.testdata["service_offering"]["hasmall"]["hosttags"]) #create a user account self.account = Account.create( self.apiclient, self.testdata["account"], domainid=self.domain.id ) #create a service offering self.service_offering = ServiceOffering.create( self.apiclient, self.testdata["service_offering"]["hasmall"] ) #deploy ha vm self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertTrue(isinstance(list_vms, list) and len(list_vms) == 1, msg = "List VM response was empty") self.virtual_machine = list_vms[0] self.mock_checkhealth = SimulatorMock.create( apiclient=self.apiclient, command="CheckHealthCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=self.virtual_machine.hostid, value="result:fail") self.mock_ping = SimulatorMock.create( apiclient=self.apiclient, command="PingCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=self.virtual_machine.hostid, value="result:fail") self.mock_checkvirtualmachine = SimulatorMock.create( apiclient=self.apiclient, command="CheckVirtualMachineCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=self.virtual_machine.hostid, value="result:fail") self.mock_pingtest = SimulatorMock.create( apiclient=self.apiclient, command="PingTestCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, value="result:fail") self.mock_checkonhost_list = [] for host in self.hosts: if host.id != self.virtual_machine.hostid: self.mock_checkonhost_list.append(SimulatorMock.create( apiclient=self.apiclient, command="CheckOnHostCommand", zoneid=suitablecluster.zoneid, podid=suitablecluster.podid, clusterid=suitablecluster.id, hostid=host.id, value="result:fail")) #build cleanup list self.cleanup = [ self.service_offering, self.account, self.mock_checkhealth, self.mock_ping, self.mock_checkvirtualmachine, self.mock_pingtest ] self.cleanup = self.cleanup + self.mock_checkonhost_list
def test_update_vm_name(self): """Test Update VirtualMachine Name # Validate the following: # 1. VirtualMachine has uuid name, displayname # 2. listVirtualMachines returns accurate information # 3. Stop the VM # 4. updateVirtualmachine no args and then new displayname # 5. listVirtualMachines nad check the displayName set # 6. start the VM # 7. Verify displayName is still set """ self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["virtual_machine"], accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, templateid=self.template.id ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.assertEqual( isinstance(list_vms, list), True, "List VM response was not a valid list" ) self.assertNotEqual( len(list_vms), 0, "List VM response was empty" ) vm = list_vms[0] self.debug( "VirtualMachine launched with id, name, displayname: %s %s %s"\ % (self.virtual_machine.id, vm.name, vm.displayname) ) self.assertEqual( vm.state, "Running", msg="VM is not in Running state" ) self.debug("Stopping VirtualMachine to update displayname") self.virtual_machine.stop(self.apiclient) #CLOUDSTACK-3184: update without args as this results in an NPE self.virtual_machine.update(self.apiclient) self.virtual_machine.update(self.apiclient, displayname='newdisplayname') list_vms = VirtualMachine.list(self.apiclient, id=vm.id) vmnew = list_vms[0] self.assertNotEqual(vmnew.displayname, vm.displayname, msg="displayname remained the same after updateVirtualMachine") self.assertEqual(vmnew.displayname, 'newdisplayname', msg="display name not updated successfully, displayname is %s" % vmnew.displayname) self.debug("Starting VirtualMachine after updated displayname") self.virtual_machine.start(self.apiclient) list_vms = VirtualMachine.list(self.apiclient, id=vm.id) vmnewstarted = list_vms[0] self.assertEqual(vmnew.displayname, vmnewstarted.displayname, msg="display name changed on start, displayname is %s" % vmnewstarted.displayname)
def test_02_reboot_instance_with_is_volatile_offering(self): """ Test rebooting instances created with isVolatile service offerings """ # Validate the following # 1. Reboot the virtual machines. # 2. Validate the following # a. VM with created with isVolatile=True should have new Root disk but same IP # b. VM with created with isVolatile=False should have same Root disk and IP as before reboot self.debug("Rebooting the virtual machines in account: %s" % self.account.name) try: self.vm_with_reset.reboot(self.apiclient) self.vm_without_reset.reboot(self.apiclient) except Exception as e: self.fail("Failed to reboot the virtual machines, %s" % e) # Check if the the root disk was destroyed and recreated for isVolatile=True self.debug("Checking root disk of VM with isVolatile=True") vms = VirtualMachine.list(self.apiclient, id=self.vm_with_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "VM list validation failed due to %s" % vm_list_validation_result[2] ) vm_with_reset = vm_list_validation_result[1] vm_with_reset_root_disk_id = self.get_root_device_uuid_for_vm(vm_with_reset.id, vm_with_reset.rootdeviceid) self.assertNotEqual( self.vm_with_reset_root_disk_id, vm_with_reset_root_disk_id, "VM created with IsVolatile=True has same rootdeviceid : %s after reboot" % vm_with_reset_root_disk_id, ) # Make sure it has the same IP after reboot self.assertEqual( self.vm_with_reset.nic[0].ipaddress, vm_with_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after reboot. Got : %s Expected : %s" % (vm_with_reset.nic[0].ipaddress, self.vm_with_reset.nic[0].ipaddress), ) # Check if the the root disk was not destroyed for isVolatile=False self.debug("Checking root disk of VM with isVolatile=False") vms = VirtualMachine.list(self.apiclient, id=self.vm_without_reset.id, listall=True) vm_list_validation_result = validateList(vms) self.assertEqual( vm_list_validation_result[0], PASS, "list validation failed due to %s" % vm_list_validation_result[2] ) vm_without_reset = vm_list_validation_result[1] vm_without_reset_root_disk_id = self.get_root_device_uuid_for_vm( vm_without_reset.id, vm_without_reset.rootdeviceid ) self.assertEqual( self.vm_without_reset_root_disk_id, vm_without_reset_root_disk_id, "VM created with IsVolatile=False has different rootdeviceid after reboot Got: %s Expected : %s" % (vm_without_reset_root_disk_id, self.vm_without_reset_root_disk_id), ) # Make sure it has the same IP after reboot self.assertEqual( self.vm_without_reset.nic[0].ipaddress, vm_without_reset.nic[0].ipaddress, "VM created with IsVolatile=True doesn't have same ip after reboot. Got : %s Expected : %s" % (vm_without_reset.nic[0].ipaddress, self.vm_without_reset.nic[0].ipaddress), ) return