def get_process_status(hostip, port, username, password, linklocalip, process, hypervisor=None): """Double hop and returns a process status""" #SSH to the machine ssh = SshClient(hostip, port, username, password) if (str(hypervisor).lower() == 'vmware' or str(hypervisor).lower() == 'hyperv'): ssh_command = "ssh -i /var/cloudstack/management/.ssh/id_rsa -ostricthostkeychecking=no " else: ssh_command = "ssh -i ~/.ssh/id_rsa.cloud -ostricthostkeychecking=no " ssh_command = ssh_command +\ "-oUserKnownHostsFile=/dev/null -p 3922 %s %s" % ( linklocalip, process) # Double hop into router if str(hypervisor).lower() == 'hyperv': timeout = 12 else: timeout = 5 # Ensure the SSH login is successful while True: res = ssh.execute(ssh_command) if "Connection refused".lower() in res[0].lower(): pass elif res[0] != "Host key verification failed.": break elif timeout == 0: break time.sleep(5) timeout = timeout - 1 return res
def download_systemplates_sec_storage(server, services): """Download System templates on sec storage""" try: # Login to management server ssh = SshClient(server["ipaddress"], server["port"], server["username"], server["password"]) except Exception: raise Exception("SSH access failed for server with IP address: %s" % server["ipaddess"]) # Mount Secondary Storage on Management Server cmds = [ "mkdir -p %s" % services["mnt_dir"], "mount -t nfs %s:/%s %s" % (services["sec_storage"], services["path"], services["mnt_dir"]), "%s -m %s -u %s -h %s -F" % (services["command"], services["mnt_dir"], services["download_url"], services["hypervisor"]) ] for c in cmds: result = ssh.execute(c) res = str(result) # Unmount the Secondary storage ssh.execute("umount %s" % (services["mnt_dir"])) if res.count("Successfully installed system VM template") == 1: return else: raise Exception("Failed to download System Templates on Sec Storage") return
def get_process_status( hostip, port, username, password, linklocalip, process, hypervisor=None): """Double hop and returns a process status""" # SSH to the machine ssh = SshClient(hostip, port, username, password) if (str(hypervisor).lower() == 'vmware' or str(hypervisor).lower() == 'hyperv'): ssh_command =\ "ssh -i /var/cloudstack/management/" \ ".ssh/id_rsa -ostricthostkeychecking=no " else: ssh_command = "ssh -i ~/.ssh/id_rsa.cloud " \ "-ostricthostkeychecking=no " ssh_command = ssh_command +\ "-oUserKnownHostsFile=/dev/null -p 3922 %s %s" % ( linklocalip, process) # Double hop into router timeout = 5 # Ensure the SSH login is successful while True: res = ssh.execute(ssh_command) if res[0] != "Host key verification failed.": break elif timeout == 0: break time.sleep(5) timeout = timeout - 1 return res
def download_systemplates_sec_storage(server, services): """Download System templates on sec storage""" try: # Login to management server ssh = SshClient(server["ipaddress"], server["port"], server["username"], server["password"]) except Exception: raise Exception("SSH access failed for server with IP address: %s" % server["ipaddess"]) # Mount Secondary Storage on Management Server cmds = [ "mkdir -p %s" % services["mnt_dir"], "mount -t nfs %s:/%s %s" % (services["sec_storage"], services["path"], services["mnt_dir"]), "%s -m %s -u %s -h %s -F" % (services["command"], services["mnt_dir"], services["download_url"], services["hypervisor"]), ] for c in cmds: result = ssh.execute(c) res = str(result) # Unmount the Secondary storage ssh.execute("umount %s" % (services["mnt_dir"])) if res.count("Successfully installed system VM template") == 1: return else: raise Exception("Failed to download System Templates on Sec Storage") return
def setUp(self): self.apiclient = self.testClient.getApiClient() self.dbclient = self.testClient.getDbConnection() self.mgtSvrDetails = self.config.__dict__["mgtSvr"][0].__dict__ self.cleanup = [] self.testdata = { "account": { "email": "*****@*****.**", "firstname": "Marvin", "lastname": "TestUser", "username": "******", "password": "******", } } feature_enabled = self.apiclient.listCapabilities(listCapabilities.listCapabilitiesCmd()).dynamicrolesenabled if feature_enabled: self.skipTest("Dynamic role-based API checker is enabled, skipping tests for static role-base API checker") commandsProperties = [] try: sshClient = SshClient( self.mgtSvrDetails["mgtSvrIp"], 22, self.mgtSvrDetails["user"], self.mgtSvrDetails["passwd"], retries=1, log_lvl=logging.INFO ) result = sshClient.runCommand("cat /etc/cloudstack/management/commands.properties") if 'status' in result and result['status'] == 'SUCCESS' and 'stdout' in result and len(result['stdout']) > 0: commandsProperties = result['stdout'] except Exception: self.debug("Failed to ssh into mgmt server host and grab commands.properties file") testDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) localFileName = os.path.abspath(testDir + "/../../../client/tomcatconf/commands.properties.in") if os.path.isfile(localFileName): self.info("Detected that we're running in developer mode with maven, using file at:" + localFileName) with open(localFileName) as f: commandsProperties = f.readlines() if len(commandsProperties) < 1: self.skipTest("Unable to find commands.properties, skipping this test") apiMap = {} for line in commandsProperties: if not line or line == '' or line == '\n' or line.startswith('#'): continue name, value = line.split('=') apiMap[name.strip()] = value.strip() self.roleApiMap = {} # role to list of apis allowed octetKey = {'Admin':1, 'DomainAdmin':4, 'User':8} for role in octetKey.keys(): for api in sorted(apiMap.keys()): if (octetKey[role] & int(apiMap[api])) > 0: if role not in self.roleApiMap: self.roleApiMap[role] = [] self.roleApiMap[role].append(api)
def RestartServers(self): """ Restart management server and usage server """ sshClient = SshClient(self.mgtSvrDetails["mgtSvrIp"], 22, self.mgtSvrDetails["user"], self.mgtSvrDetails["passwd"]) command = "service cloudstack-management restart" sshClient.execute(command) return
def RestartServers(self): """ Restart management server and usage server """ sshClient = SshClient( self.mgtSvrDetails["mgtSvrIp"], 22, self.mgtSvrDetails["user"], self.mgtSvrDetails["passwd"] ) command = "service cloudstack-management restart" sshClient.execute(command) return
def test_deploy_vgpu_enabled_vm(self): """Test Deploy Virtual Machine # Validate the following: # 1. Virtual Machine is accessible via SSH # 2. Virtual Machine is vGPU enabled (via SSH) # 3. listVirtualMachines returns accurate information """ if self.hypervisor.lower() not in ["xenserver"]: self.cleanup.append(self.account) self.skipTest("This test case is written specifically\ for XenServer hypervisor") self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["small"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.testdata['mode']) self.cleanup.append(self.virtual_machine) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s" % self.virtual_machine.id) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.assertNotEqual(len(list_vms), 0, "List VM response was empty") vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") self.assertEqual(vm.name, self.virtual_machine.name, "Virtual Machine names do not match") self.assertEqual(vm.state, "Running", msg="VM is not in Running state") hosts = list_hosts(self.apiclient, id=vm.hostid) hostip = hosts[0].ipaddress try: sshClient = SshClient( host=hostip, port=self.testdata['configurableData']['host']["publicport"], user=self.testdata['configurableData']['host']["username"], passwd=self.testdata['configurableData']['host']["password"]) res = sshClient.execute( "xe vgpu-list vm-name-label=%s params=type-uuid %s" % (vm.instancename)) self.debug("SSH result: %s" % res) except Exception as e: self.fail("SSH Access failed for %s: %s" % (hostip, e)) result = str(res) self.assertEqual(result.count("type-uuid"), 1, "VM is vGPU enabled.") self.cleanup.append(self.account)
def verify_rule_on_host(self, ipaddress, user, password, rule): self.logger.debug("Verifying rule '%s' in host %s" % (rule, ipaddress)) try: ssh = SshClient(ipaddress, 22, user, password) result = ssh.execute("iptables-save |grep \"\\%s\"" % rule) if len(result) == 0 or result[0] != rule: raise Exception("Unable to apply security group rule") except KeyError: self.skipTest( "Provide a marvin config file with host credentials to run %s" % self._testMethodName)
def RestartServer(cls): """Restart management server""" sshClient = SshClient(cls.mgtSvrDetails["mgtSvrIp"], 22, cls.mgtSvrDetails["user"], cls.mgtSvrDetails["passwd"]) command = "service cloudstack-management restart" sshClient.execute(command) return
def checkHostUp(self, fromHostIp, testHostIp): try: ssh = SshClient(fromHostIp, 22, "root", "password") res = ssh.execute("ping -c 1 %s" % testHostIp) result = str(res) if result.count(" 0% packet loss") == 1: return True, 1 else: return False, 1 except Exception as e: self.logger.debug("Got exception %s" % e) return False, 1
def checkHostDown(self, fromHostIp, testHostIp): try: ssh = SshClient(fromHostIp, 22, "root", "password") res = ssh.execute("ping -c 1 %s" % testHostIp) result = str(res) if result.count("100% packet loss") == 1: return True, 1 else: return False, 1 except Exception as e: self.logger.debug("Got exception %s" % e) return False, 1
def restartUsageServer(self): #Restart usage server sshClient = SshClient( self.mgtSvrDetails["mgtSvrIp"], 22, self.mgtSvrDetails["user"], self.mgtSvrDetails["passwd"] ) command = "service cloudstack-usage restart" sshClient.execute(command) return
def ssh_kvm_host(password, ipaddr, instance_name): """Ssh into kvm host and get vm mem details""" mem = [] sshClient = SshClient(ipaddr, 22, "root", password) command = "virsh dominfo %s" % instance_name vm_detail = sshClient.execute(command) max = vm_detail[7].split() min = vm_detail[8].split() mem.append(int(max[2])) mem.append(int(min[2])) return mem
def test_DeployVm(self): """ Let's start by defining the attributes of our VM that we will be deploying on CloudStack. We will be assuming a single zone is available and is configured and all templates are Ready The hardcoded values are used only for brevity. """ deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd() deployVmCmd.zoneid = self.zone.uuid deployVmCmd.templateid = self.template.uuid #CentOS 5.6 builtin deployVmCmd.serviceofferingid = self.service_offering.uuid deployVmResponse = self.apiClient.deployVirtualMachine(deployVmCmd) self.debug("VM %s was deployed in the job %s" % (deployVmResponse.id, deployVmResponse.jobid)) # At this point our VM is expected to be Running. Let's find out what # listVirtualMachines tells us about VMs in this account listVmCmd = listVirtualMachines.listVirtualMachinesCmd() listVmCmd.id = deployVmResponse.id listVmResponse = self.apiClient.listVirtualMachines(listVmCmd) self.assertNotEqual( len(listVmResponse), 0, "Check if the list API \ returns a non-empty response") vm = listVmResponse[0] self.assertEqual(vm.state, "Running", "Check if VM has reached Running state in CS") hostname = vm.name nattedip = self.setUpNAT(vm.id) self.assertEqual( vm.id, deployVmResponse.id, "Check if the VM returned \ is the same as the one we deployed") self.assertEqual( vm.state, "Running", "Check if VM has reached \ a state of running") # SSH login and compare hostname self.debug("Attempting to SSH into %s over %s of %s" % (nattedip, "22", vm.name)) ssh_client = SshClient(nattedip, "22", "root", "password") stdout = ssh_client.execute("hostname") self.assertEqual( hostname, stdout[0], "cloudstack VM name and hostname \ do not match")
def try_ssh(self, ip_addr, hostnames): try: self.debug("SSH into NAT Rule (Public IP: %s)" % ip_addr) # If Round Robin Algorithm is chosen, # each ssh command should alternate between VMs ssh_1 = SshClient(ip_addr, 22, self.services["natrule"]["username"], self.services["natrule"]["password"]) hostnames.append(ssh_1.execute("hostname")[0]) self.debug(hostnames) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, ip_addr)) return hostnames
def try_ssh(self, ip_addr, hostnames): try: self.debug("SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % (self.vm_1.ipaddress, ip_addr)) # If Round Robin Algorithm is chosen, # each ssh command should alternate between VMs ssh_1 = SshClient(ip_addr, self.services["lbrule"]["publicport"], self.vm_1.username, self.vm_1.password) hostnames.append(ssh_1.execute("hostname")[0]) self.debug(hostnames) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, ip_addr)) time.sleep(self.services["lb_switch_wait"]) return
def ssh_xen_host(password, ipaddr, instance_name): """Ssh into xen host and get vm mem details""" mem = [] sshClient = SshClient(ipaddr, 22, "root", password) command = "xe vm-list params=all name-label=%s" % instance_name vm_detail = sshClient.execute(command) max_str = vm_detail[17].split(":") min_str = vm_detail[20].split(":") max = int(max_str[1]) min = int(min_str[1]) mem.append(max) mem.append(min) return mem
def RestartServer(cls): """Restart management server""" sshClient = SshClient( cls.mgtSvrDetails["mgtSvrIp"], 22, cls.mgtSvrDetails["user"], cls.mgtSvrDetails["passwd"] ) command = "service cloudstack-management restart" sshClient.execute(command) return
def setUpClass(self): testClient = super(TestDeployvGPUenabledVM, self).getClsTestClient() self.apiclient = testClient.getApiClient() self.testdata = self.testClient.getParsedTestDataConfig() self._cleanup = [] self.unsupportedHypervisor = False self.noSuitableHost = False # Need to add check whether zone containing the xen hypervisor or not # as well hosts = list_hosts( self.apiclient, hypervisor="XenServer" ) if hosts is None: # GPU feature is supported only on XenServer.Check listhosts response self.unsupportedHypervisor = True return else: gpuhosts = 0 for ghost in hosts: if ghost.hypervisorversion >= "6.2.0": sshClient = SshClient( host=ghost.ipaddress, port=self.testdata['configurableData']['host']["publicport"], user=self.testdata['configurableData']['host']["username"], passwd=self.testdata['configurableData']['host']["password"]) if ghost.hypervisorversion == "6.2.0": res = sshClient.execute( "xe patch-list uuid=0850b186-4d47-11e3-a720-001b2151a503") if len(res) == 0: continue res = sshClient.execute( "xe vgpu-type-list model-name=\"GRID K120Q\"") if len(res) != 0: gpuhosts = gpuhosts + 1 else: continue if gpuhosts == 0: # No XenServer available with GPU Drivers installed self.noSuitableHost = True return self.domain = get_domain(self.apiclient) self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) # Creating Account self.account = Account.create( self.apiclient, self.testdata["account"], domainid=self.domain.id ) self._cleanup.append(self.account)
def test_deploy_vgpu_enabled_vm(self): """Test Deploy Virtual Machine # Validate the following: # 1. Virtual Machine is accessible via SSH # 2. Virtual Machine is vGPU enabled (via SSH) # 3. listVirtualMachines returns accurate information """ self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["vgpu260q"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.testdata['mode']) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug( "Verify listVirtualMachines response for virtual machine: %s"\ % self.virtual_machine.id ) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.assertNotEqual(len(list_vms), 0, "List VM response was empty") vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") self.assertEqual(vm.name, self.virtual_machine.name, "Virtual Machine names do not match") self.assertEqual(vm.state, "Running", msg="VM is not in Running state") hosts = list_hosts(self.apiclient, id=vm.hostid) hostip = hosts[0].ipaddress try: sshClient = SshClient(host=hostip, port=22, user='******', passwd=self.testdata["host_password"]) res = sshClient.execute( "xe vgpu-list vm-name-label=%s params=type-uuid %s" % (vm.instancename)) self.debug("SSH result: %s" % res) except Exception as e: self.fail("SSH Access failed for %s: %s" % \ (hostip, e) ) result = str(res) self.assertEqual(result.count("type-uuid"), 1, "VM is vGPU enabled.")
def test_es_1236_cloudstack_sccs(self): """ @Desc: Test whether cloudstack-sccs is available on management server @Steps: Step1: run cloudstack-sccs on management server Step2: It should return a commit hash """ # Step1: run cloudstack-sccs on management server mgmt_ssh = SshClient( self.apiClient.connection.mgtSvr, 22, self.apiClient.connection.user, self.apiClient.connection.passwd ) res = mgmt_ssh.execute("cloudstack-sccs") # Step2: It should return a commit hash return
def test_es_1236_cloudstack_sccs(self): """ @Desc: Test whether cloudstack-sccs is available on management server @Steps: Step1: run cloudstack-sccs on management server Step2: It should return a commit hash """ # Step1: run cloudstack-sccs on management server mgmt_ssh = SshClient(self.apiClient.connection.mgtSvr, 22, self.apiClient.connection.user, self.apiClient.connection.passwd) res = mgmt_ssh.execute("cloudstack-sccs") # Step2: It should return a commit hash return
def setUpClass(self): testClient = super(TestDeployvGPUenabledVM, self).getClsTestClient() self.apiclient = testClient.getApiClient() self.testdata = self.testClient.getParsedTestDataConfig() self._cleanup = [] self.unsupportedHypervisor = False self.noSuitableHost = False # Need to add check whether zone containing the xen hypervisor or not # as well hosts = list_hosts(self.apiclient, hypervisor="XenServer") if hosts is None: # GPU feature is supported only on XenServer.Check listhosts response self.unsupportedHypervisor = True return else: gpuhosts = 0 for ghost in hosts: if ghost.hypervisorversion >= "6.2.0": sshClient = SshClient( host=ghost.ipaddress, port=self.testdata['configurableData']['host'] ["publicport"], user=self.testdata['configurableData']['host'] ["username"], passwd=self.testdata['configurableData']['host'] ["password"]) if ghost.hypervisorversion == "6.2.0": res = sshClient.execute( "xe patch-list uuid=0850b186-4d47-11e3-a720-001b2151a503" ) if len(res) == 0: continue res = sshClient.execute( "xe vgpu-type-list model-name=\"GRID K120Q\"") if len(res) != 0: gpuhosts = gpuhosts + 1 else: continue if gpuhosts == 0: # No XenServer available with GPU Drivers installed self.noSuitableHost = True return self.domain = get_domain(self.apiclient) self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) # Creating Account self.account = Account.create(self.apiclient, self.testdata["account"], domainid=self.domain.id) self._cleanup.append(self.account)
def test_multiple_mgmt_srvr_session_timeout(self): """ @Desc: Check whether mgmt server session times out with in 30s @Steps: Step1: run 'telnet localhot 8250' on the management server and see that it times out with in 30seconds """ # Step1: run cloudstack-sccs on management server mgmt_ssh = SshClient( self.apiClient.connection.mgtSvr, 22, self.apiClient.connection.user, self.apiClient.connection.passwd ) res = mgmt_ssh.execute("time telnet localhost 8250") # Step2: It should return a commit hash return
def getVirshXML(self, host, instancename): self.assertIsNotNone(host, "Host should not be None") self.assertIsNotNone(instancename, "Instance name should not be None") sshc = SshClient(host=host.ipaddress, port=22, user=self.hostConfig['username'], passwd=self.hostConfig['password']) virsh_cmd = 'virsh dumpxml %s' % instancename xml_res = sshc.execute(virsh_cmd) xml_as_str = ''.join(xml_res) parser = etree.XMLParser(remove_blank_text=True) return ET.fromstring(xml_as_str, parser=parser)
def test_multiple_mgmt_srvr_session_timeout(self): """ @Desc: Check whether mgmt server session times out with in 30s @Steps: Step1: run 'telnet localhot 8250' on the management server and see that it times out with in 30seconds """ # Step1: run cloudstack-sccs on management server mgmt_ssh = SshClient(self.apiClient.connection.mgtSvr, 22, self.apiClient.connection.user, self.apiClient.connection.passwd) res = mgmt_ssh.execute("time telnet localhost 8250") # Step2: It should return a commit hash return
def try_ssh(self, ip_addr, hostnames): try: self.debug("SSH into (Public IP: %s)" % ip_addr) # If Round Robin Algorithm is chosen, # each ssh command should alternate between VMs ssh_1 = SshClient(ip_addr, 22, self.virtual_machine.username, self.virtual_machine.password) hostnames.append(ssh_1.execute("hostname")[0]) self.debug(hostnames) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, ip_addr)) return hostnames
def _execute_ssh_command(hostip, port, username, password, ssh_command): # SSH to the machine ssh = SshClient(hostip, port, username, password) # Ensure the SSH login is successful while True: res = ssh.execute(ssh_command) if "Connection refused".lower() in res[0].lower(): pass elif res[0] != "Host key verification failed.": break elif timeout == 0: break time.sleep(5) timeout = timeout - 1 return res
def test_02_pt_deploy_vm_with_startvm_true(self): """ Positive test for stopped VM test path - T1 variant # 1. Deploy VM in the network specifying startvm parameter as True # 2. List VMs and verify that VM is in running state # 3. Verify that router is in running state (Advanced zone) # 4. Add network rules for VM (done in base.py itself) to make # it accessible # 5. Verify that VM is accessible # 6. Destroy and expunge the VM # 7. Wait for network gc time interval and check that router is # in stopped state """ # Create VM in account virtual_machine = VirtualMachine.create( self.userapiclient, self.testdata["small"], templateid=self.defaultTemplateId, accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, networkids=[ self.networkid, ] if self.networkid else None, zoneid=self.zone.id, startvm=True, mode=self.zone.networktype) response = virtual_machine.getState(self.userapiclient, VirtualMachine.RUNNING) self.assertEqual(response[0], PASS, response[1]) if str(self.zone.networktype).lower() == "advanced": response = VerifyRouterState(self.apiclient, self.account.name, self.account.domainid, RUNNING) self.assertTrue(response[0], response[1]) # Check VM accessibility try: SshClient(host=virtual_machine.ssh_ip, port=self.testdata["natrule"]["publicport"], user=virtual_machine.username, passwd=virtual_machine.password) except Exception as e: self.fail("Exception while SSHing to VM: %s" % e) virtual_machine.delete(self.apiclient) if str(self.zone.networktype).lower() == "advanced": # Wait for router to get router in stopped state wait_for_cleanup(self.apiclient, ["network.gc.interval", "network.gc.wait"]) response = VerifyRouterState(self.apiclient, self.account.name, self.account.domainid, STOPPED, retries=10) self.assertTrue(response[0], response[1]) return
def is_server_ssh_ready(ipaddress, port, username, password, retries=20, retryinterv=30, timeout=10.0, keyPairFileLocation=None): ''' @Name: is_server_ssh_ready @Input: timeout: tcp connection timeout flag, others information need to be added @Output:object for SshClient Name of the function is little misnomer and is not verifying anything as such mentioned ''' try: ssh = SshClient( host=ipaddress, port=port, user=username, passwd=password, keyPairFiles=keyPairFileLocation, retries=retries, delay=retryinterv, timeout=timeout) except Exception as e: raise Exception( "SSH connection has Failed. Waited %ss. Error is %s" % (retries * retryinterv, str(e))) else: return ssh
def _execute_ssh_command(hostip, port, username, password, ssh_command): #SSH to the machine ssh = SshClient(hostip, port, username, password) # Ensure the SSH login is successful while True: res = ssh.execute(ssh_command) if "Connection refused".lower() in res[0].lower(): pass elif res[0] != "Host key verification failed.": break elif timeout == 0: break time.sleep(5) timeout = timeout - 1 return res
def ssh_kvm_host(password, ipaddr, instance_name): """Ssh into kvm host and get vm mem details""" mem = [] sshClient = SshClient( ipaddr, 22, "root", password ) command = "virsh dominfo %s" % instance_name vm_detail = sshClient.execute(command) max = vm_detail[7].split() min = vm_detail[8].split() mem.append(int(max[2])) mem.append(int(min[2])) return mem
def try_ssh(self, ip_addr, hostnames): try: self.debug( "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % (self.vm_1.ipaddress, ip_addr)) # If Round Robin Algorithm is chosen, # each ssh command should alternate between VMs ssh_1 = SshClient(ip_addr, self.services['lbrule']["publicport"], self.vm_1.username, self.vm_1.password) hostnames.append(ssh_1.execute("hostname")[0]) self.debug(hostnames) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, ip_addr)) time.sleep(10) return
def test_releaseIP(self): """Test for release public IP address""" logger.debug("Deleting Public IP : %s" % self.ip_addr.id) self.ip_address.delete(self.apiclient) retriesCount = 10 isIpAddressDisassociated = False while retriesCount > 0: listResponse = list_publicIP( self.apiclient, id=self.ip_addr.id ) if listResponse is None: isIpAddressDisassociated = True break retriesCount -= 1 time.sleep(60) # End while self.assertTrue( isIpAddressDisassociated, "Failed to disassociate IP address") # ListPortForwardingRules should not list # associated rules with Public IP address try: list_nat_rule = list_nat_rules( self.apiclient, id=self.nat_rule.id ) logger.debug("List NAT Rule response" + str(list_nat_rule)) except CloudstackAPIException: logger.debug("Port Forwarding Rule is deleted") # listLoadBalancerRules should not list # associated rules with Public IP address try: list_lb_rule = list_lb_rules( self.apiclient, id=self.lb_rule.id ) logger.debug("List LB Rule response" + str(list_lb_rule)) except CloudstackAPIException: logger.debug("Port Forwarding Rule is deleted") # SSH Attempt though public IP should fail with self.assertRaises(Exception): SshClient( self.ip_addr.ipaddress, self.services["natrule"]["publicport"], self.virtual_machine.username, self.virtual_machine.password, retries=2, delay=0 ) return
def ssh_xen_host(password, ipaddr, instance_name): """Ssh into xen host and get vm mem details""" mem = [] sshClient = SshClient( ipaddr, 22, "root", password ) command = "xe vm-list params=all name-label=%s" % instance_name vm_detail = sshClient.execute(command) max_str = vm_detail[17].split(":") min_str = vm_detail[20].split(":") max = int(max_str[1]) min = int(min_str[1]) mem.append(max) mem.append(min) return mem
def test_06_reboot_VR_verify_ip_alias(self): """Reboot VR and verify ip alias 1.Deploy guest vm in new cidr 2.Verify ip alias creation 3.Reboot VR 4.Verify ip alias on VR """ list_router_response = list_routers(self.apiclient, zoneid=self.zone.id, listall=True) self.assertEqual(isinstance(list_router_response, list), True, "Check list response returns a valid list") router = list_router_response[0] hosts = list_hosts(self.apiclient, zoneid=router.zoneid, type='Routing', state='Up', id=router.hostid) self.assertEqual(isinstance(hosts, list), True, "Check list host returns a valid list") host = hosts[0] self.debug("Router ID: %s, state: %s" % (router.id, router.state)) self.assertEqual(router.state, 'Running', "Check list router response for router state") port = self.testdata['configurableData']['host']["publicport"] username = self.testdata['configurableData']['host']["username"] password = self.testdata['configurableData']['host']["password"] # SSH to host so that host key is saved in first # attempt SshClient(host.ipaddress, port, username, password) proc = "ip addr show eth0" result = get_process_status(host.ipaddress, port, username, password, router.linklocalip, proc) res = str(result) self.debug("ip alias configuration on VR: %s" % res) self.assertNotEqual( res.find(self.alias_ip) - 1, "ip alias is not created on VR eth0") resp = Router.reboot(self.apiclient, router.id) self.debug("Reboot router api response: %s" % resp) list_router_response = list_routers(self.apiclient, zoneid=self.zone.id, listall=True) self.assertEqual(isinstance(list_router_response, list), True, "Check list response returns a valid list") router = list_router_response[0] self.assertEqual(router.state, 'Running', "Router is not in running state after reboot") result = get_process_status(host.ipaddress, port, username, password, router.linklocalip, proc) res = str(result) self.assertNotEqual(res.find(self.alias_ip), -1, "IP alias not present on VR after VR reboot") return
def test_releaseIP(self): """Test for release public IP address""" self.debug("Deleting Public IP : %s" % self.ip_addr.id) self.ip_address.delete(self.apiclient) # Sleep to ensure that deleted state is reflected in other calls time.sleep(self.services["sleep"]) # ListPublicIpAddresses should not list deleted Public IP address list_pub_ip_addr_resp = list_publicIP( self.apiclient, id=self.ip_addr.id ) self.debug("List Public IP response" + str(list_pub_ip_addr_resp)) self.assertEqual( list_pub_ip_addr_resp, None, "Check if disassociated IP Address is no longer available" ) # ListPortForwardingRules should not list # associated rules with Public IP address try: list_nat_rule = list_nat_rules( self.apiclient, id=self.nat_rule.id ) self.debug("List NAT Rule response" + str(list_nat_rule)) except cloudstackAPIException: self.debug("Port Forwarding Rule is deleted") # listLoadBalancerRules should not list # associated rules with Public IP address try: list_lb_rule = list_lb_rules( self.apiclient, id=self.lb_rule.id ) self.debug("List LB Rule response" + str(list_lb_rule)) except cloudstackAPIException: self.debug("Port Forwarding Rule is deleted") # SSH Attempt though public IP should fail with self.assertRaises(Exception): ssh_2 = SshClient( self.ip_addr.ipaddress, self.services["natrule"]["publicport"], self.virtual_machine.username, self.virtual_machine.password, retries=2, delay=0 ) return
def restartServer(cls): """Restart management server""" sshClient = SshClient( cls.mgtSvrDetails["mgtSvrIp"], 22, cls.mgtSvrDetails["user"], cls.mgtSvrDetails["passwd"] ) command = "service cloudstack-management stop" sshClient.execute(command) command = "service cloudstack-management start" sshClient.execute(command) #time.sleep(cls.services["sleep"]) time.sleep(300) return
def test_deploy_vgpu_enabled_vm(self): """Test Deploy Virtual Machine # Validate the following: # 1. Virtual Machine is accessible via SSH # 2. Virtual Machine is vGPU enabled (via SSH) # 3. listVirtualMachines returns accurate information """ self.virtual_machine = VirtualMachine.create( self.apiclient, self.testdata["vgpu260q"], accountid=self.account.name, domainid=self.account.domainid, serviceofferingid=self.service_offering.id, mode=self.testdata["mode"], ) list_vms = VirtualMachine.list(self.apiclient, id=self.virtual_machine.id) self.debug("Verify listVirtualMachines response for virtual machine: %s" % self.virtual_machine.id) self.assertEqual(isinstance(list_vms, list), True, "List VM response was not a valid list") self.assertNotEqual(len(list_vms), 0, "List VM response was empty") vm = list_vms[0] self.assertEqual(vm.id, self.virtual_machine.id, "Virtual Machine ids do not match") self.assertEqual(vm.name, self.virtual_machine.name, "Virtual Machine names do not match") self.assertEqual(vm.state, "Running", msg="VM is not in Running state") hosts = list_hosts(self.apiclient, id=vm.hostid) hostip = hosts[0].ipaddress try: sshClient = SshClient( host=hostip, port=self.testdata["configurableData"]["host"]["publicport"], user=self.testdata["configurableData"]["host"]["username"], passwd=self.testdata["configurableData"]["host"]["password"], ) res = sshClient.execute("xe vgpu-list vm-name-label=%s params=type-uuid %s" % (vm.instancename)) self.debug("SSH result: %s" % res) except Exception as e: self.fail("SSH Access failed for %s: %s" % (hostip, e)) result = str(res) self.assertEqual(result.count("type-uuid"), 1, "VM is vGPU enabled.")
def vpnClientServicesStart(virtual_machine,vpnclient_services_script): ssh = SshClient( host=virtual_machine.public_ip, port=TestMultipleVPNAccessonVPC.services["virtual_machine"]["ssh_port"], user='******', passwd='password') ssh.execute('%s start >> /tmp/executionoutput.txt' % (vpnclient_services_script)) ssh.execute('echo "VPN Client Services Started" >> /tmp/executionoutput.txt') ssh.close()
def setUpClass(self): testClient = super(TestDeployvGPUenabledVM, self).getClsTestClient() self.apiclient = testClient.getApiClient() self.testdata = self.testClient.getParsedTestDataConfig() #Need to add check whether zone containing the xen hypervisor or not as well hosts = list_hosts(self.apiclient, hypervisor="XenServer") if hosts is None: raise unittest.SkipTest( "There are no XenServers available. GPU feature is supported only on XenServer.Check listhosts response" ) else: gpuhosts = 0 for ghost in hosts: if ghost.hypervisorversion >= "6.2.0": sshClient = SshClient( host=ghost.ipaddress, port=22, user='******', passwd=self.testdata["host_password"]) if ghost.hypervisorversion == "6.2.0": res = sshClient.execute( "xe patch-list uuid=0850b186-4d47-11e3-a720-001b2151a503" ) if len(res) == 0: continue res = sshClient.execute( "xe vgpu-type-list model-name=\"GRID K120Q\"") if len(res) != 0: gpuhosts = gpuhosts + 1 else: continue if gpuhosts == 0: raise unittest.SkipTest( "No XenServer available with GPU Drivers installed") self.domain = get_domain(self.apiclient) self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) #Creating Account self.account = Account.create(self.apiclient, self.testdata["account"], domainid=self.domain.id) self._cleanup = [self.account]
def setUpClass(cls): cls.testClient = super(TestL2PersistentNetworks, cls).getClsTestClient() cls.api_client = cls.testClient.getApiClient() cls.hypervisor = cls.testClient.getHypervisorInfo() isKVM = cls.hypervisor.lower() in ["kvm"] isOVSEnabled = False hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][ 0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__ if isKVM: # Test only if all the hosts use OVS grepCmd = 'grep "network.bridge.type=openvswitch" /etc/cloudstack/agent/agent.properties' hosts = list_hosts(cls.api_client, type='Routing', hypervisor='kvm') for host in hosts: if len( SshClient(host.ipaddress, port=22, user=hostConfig["username"], passwd=hostConfig["password"]).execute( grepCmd)) != 0: isOVSEnabled = True break if isKVM and isOVSEnabled: cls.skipTest( cls, "KVM with OVS doesn't support persistent networks, skipping") # Fill services from the external config file cls.services = cls.testClient.getParsedTestDataConfig() cls.hostConfig = cls.config.__dict__["zones"][0].__dict__["pods"][ 0].__dict__["clusters"][0].__dict__["hosts"][0].__dict__ # Get Zone and templates cls.zone = get_zone(cls.api_client, cls.testClient.getZoneForTests()) cls.template = get_template(cls.api_client, cls.zone.id, cls.services["ostype"]) cls.services["virtual_machine"]["zoneid"] = cls.zone.id cls.services["virtual_machine"]["template"] = cls.template.id cls.service_offering = ServiceOffering.create( cls.api_client, cls.services["service_offering"]) cls.l2_persistent_network_offering = cls.create_network_offering( "nw_off_L2_persistent") cls.isolated_persistent_network_offering = cls.create_network_offering( "nw_off_isolated_persistent") # network will be deleted as part of account cleanup cls._cleanup = [ cls.service_offering, cls.isolated_persistent_network_offering, cls.l2_persistent_network_offering ] return
def get_ssh_client(self, ip, username, password, retries=10): """ Setup ssh client connection and return connection """ try: ssh_client = SshClient(ip, 22, username, password, retries) except Exception as e: raise self.skipTest("Unable to create ssh connection: " % e) self.assertIsNotNone(ssh_client, "Failed to setup ssh connection to ip=%s" % ip) return ssh_client
def restartServer(cls): """Restart management server""" sshClient = SshClient( cls.mgtSvrDetails["mgtSvrIp"], 22, cls.mgtSvrDetails["user"], cls.mgtSvrDetails["passwd"] ) command = "service cloudstack-management stop" sshClient.execute(command) command = "service cloudstack-management start" sshClient.execute(command) #Waits for management to come up in 5 mins, when it's up it will continue timeout = time.time() + 300 while time.time() < timeout: if cls.isManagementUp() is True: return time.sleep(5) return cls.fail("Management server did not come up, failing")
def filecopy(cls,virtual_machine,localfile=None,remotefilelocation=None,permissions="644"): cls.ssh = SshClient( host=virtual_machine.public_ip, port=TestMultipleVPNAccessonVPC.services["virtual_machine"]["ssh_port"], user='******', passwd='password') cls.ssh.scp(localfile,remotefilelocation) cls.ssh.runCommand('chmod %s %s' % (permissions,remotefilelocation)) cls.debug("%s file successfully copied to %s " % (localfile, remotefilelocation)) cls.ssh.close()
def try_ssh(self, ip_addr, unameCmd, firstAttempt=False): try: self.debug( "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % (self.vm_1.ipaddress, ip_addr)) retries = 3 if firstAttempt: retries = 30 # If Round Robin Algorithm is chosen, # each ssh command should alternate between VMs ssh_1 = SshClient(ip_addr, self.services['lbrule']["publicport"], self.vm_1.username, self.vm_1.password, retries=retries) unameCmd.append(ssh_1.execute("uname")[0]) self.debug(unameCmd) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, ip_addr)) time.sleep(5)
def setUpClass(self): testClient = super(TestDeployvGPUenabledVM, self).getClsTestClient() self.apiclient = testClient.getApiClient() self.testdata = self.testClient.getParsedTestDataConfig() #Need to add check whether zone containing the xen hypervisor or not as well hosts = list_hosts( self.apiclient, hypervisor="XenServer" ) if hosts is None: raise unittest.SkipTest("There are no XenServers available. GPU feature is supported only on XenServer.Check listhosts response") else: gpuhosts=0 for ghost in hosts : if ghost.hypervisorversion >= "6.2.0": sshClient = SshClient(host=ghost.ipaddress, port=22, user='******',passwd=self.testdata["host_password"]) if ghost.hypervisorversion == "6.2.0": res = sshClient.execute("xe patch-list uuid=0850b186-4d47-11e3-a720-001b2151a503") if len(res) == 0: continue res = sshClient.execute("xe vgpu-type-list model-name=\"GRID K120Q\"") if len(res) != 0 : gpuhosts=gpuhosts+1 else: continue if gpuhosts == 0: raise unittest.SkipTest("No XenServer available with GPU Drivers installed") self.domain = get_domain(self.apiclient) self.zone = get_zone(self.apiclient, self.testClient.getZoneForTests()) #Creating Account self.account = Account.create( self.apiclient, self.testdata["account"], domainid=self.domain.id ) self._cleanup = [ self.account ]
def try_ssh(self, ip_addr, unameCmd, firstAttempt=False): try: self.debug( "SSH into VM (IPaddress: %s) & NAT Rule (Public IP: %s)" % (self.vm_1.ipaddress, ip_addr) ) retries = 3 if firstAttempt: retries = 30 # If Round Robin Algorithm is chosen, # each ssh command should alternate between VMs ssh_1 = SshClient( ip_addr, self.services['lbrule']["publicport"], self.vm_1.username, self.vm_1.password, retries=retries ) unameCmd.append(ssh_1.execute("uname")[0]) self.debug(unameCmd) except Exception as e: self.fail("%s: SSH failed for VM with IP Address: %s" % (e, ip_addr)) time.sleep(5)
def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid): """ Checks whether a snapshot with id (not UUID) `snapshotid` is present on the nfs storage @param apiclient: api client connection @param @dbconn: connection to the cloudstack db @param config: marvin configuration file @param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted @param snapshotid: uuid of the snapshot @return: True if snapshot is found, False otherwise """ # snapshot extension to be appended to the snapshot path obtained from db snapshot_extensions = {"vmware": ".ovf", "kvm": "", "xenserver": ".vhd", "simulator":""} qresultset = dbconn.execute( "select id from snapshots where uuid = '%s';" \ % str(snapshotid) ) if len(qresultset) == 0: raise Exception( "No snapshot found in cloudstack with id %s" % snapshotid) snapshotid = qresultset[0][0] qresultset = dbconn.execute( "select install_path,store_id from snapshot_store_ref where snapshot_id='%s' and store_role='Image';" % snapshotid ) assert isinstance(qresultset, list), "Invalid db query response for snapshot %s" % snapshotid if len(qresultset) == 0: #Snapshot does not exist return False from base import ImageStore #pass store_id to get the exact storage pool where snapshot is stored secondaryStores = ImageStore.list(apiclient, zoneid=zoneid, id=int(qresultset[0][1])) assert isinstance(secondaryStores, list), "Not a valid response for listImageStores" assert len(secondaryStores) != 0, "No image stores found in zone %s" % zoneid secondaryStore = secondaryStores[0] if str(secondaryStore.providername).lower() != "nfs": raise Exception( "is_snapshot_on_nfs works only against nfs secondary storage. found %s" % str(secondaryStore.providername)) hypervisor = get_hypervisor_type(apiclient) # append snapshot extension based on hypervisor, to the snapshot path snapshotPath = str(qresultset[0][0]) + snapshot_extensions[str(hypervisor).lower()] nfsurl = secondaryStore.url from urllib2 import urlparse parse_url = urlparse.urlsplit(nfsurl, scheme='nfs') host, path = str(parse_url.netloc), str(parse_url.path) if not config.mgtSvr: raise Exception("Your marvin configuration does not contain mgmt server credentials") mgtSvr, user, passwd = config.mgtSvr[0].mgtSvrIp, config.mgtSvr[0].user, config.mgtSvr[0].passwd try: ssh_client = SshClient( mgtSvr, 22, user, passwd ) pathSeparator = "" #used to form host:dir format if not host.endswith(':'): pathSeparator= ":" cmds = [ "mkdir -p %s /mnt/tmp", "mount -t %s %s%s%s /mnt/tmp" % ( 'nfs', host, pathSeparator, path, ), "test -f %s && echo 'snapshot exists'" % ( os.path.join("/mnt/tmp", snapshotPath) ), ] for c in cmds: result = ssh_client.execute(c) # Unmount the Sec Storage cmds = [ "cd", "umount /mnt/tmp", ] for c in cmds: ssh_client.execute(c) except Exception as e: raise Exception("SSH failed for management server: %s - %s" % (config.mgtSvr[0].mgtSvrIp, e)) return 'snapshot exists' in result
def test_DeployVm(self): """ Let's start by defining the attributes of our VM that we will be deploying on CloudStack. We will be assuming a single zone is available and is configured and all templates are Ready The hardcoded values are used only for brevity. """ deployVmCmd = deployVirtualMachine.deployVirtualMachineCmd() deployVmCmd.zoneid = self.zone.uuid deployVmCmd.templateid = self.template.uuid # CentOS 5.6 builtin deployVmCmd.serviceofferingid = self.service_offering.uuid deployVmResponse = self.apiClient.deployVirtualMachine(deployVmCmd) self.debug("VM %s was deployed in the job %s" % (deployVmResponse.id, deployVmResponse.jobid)) # At this point our VM is expected to be Running. Let's find out what # listVirtualMachines tells us about VMs in this account listVmCmd = listVirtualMachines.listVirtualMachinesCmd() listVmCmd.id = deployVmResponse.id listVmResponse = self.apiClient.listVirtualMachines(listVmCmd) self.assertNotEqual( len(listVmResponse), 0, "Check if the list API \ returns a non-empty response", ) vm = listVmResponse[0] self.assertEqual(vm.state, "Running", "Check if VM has reached Running state in CS") hostname = vm.name nattedip = self.setUpNAT(vm.id) self.assertEqual( vm.id, deployVmResponse.id, "Check if the VM returned \ is the same as the one we deployed", ) self.assertEqual( vm.state, "Running", "Check if VM has reached \ a state of running", ) # SSH login and compare hostname self.debug("Attempting to SSH into %s over %s of %s" % (nattedip, "22", vm.name)) ssh_client = SshClient(nattedip, "22", "root", "password") stdout = ssh_client.execute("hostname") self.assertEqual( hostname, stdout[0], "cloudstack VM name and hostname \ do not match", )
def exec_script_on_user_vm(self, script, exec_cmd_params, expected_result, negative_test=False): try: vm_network_id = self.virtual_machine.nic[0].networkid vm_ipaddress = self.virtual_machine.nic[0].ipaddress list_routers_response = list_routers(self.apiclient, account=self.account.name, domainid=self.account.domainid, networkid=vm_network_id) self.assertEqual(isinstance(list_routers_response, list), True, "Check for list routers response return valid data") router = list_routers_response[0] #Once host or mgt server is reached, SSH to the router connected to VM # look for Router for Cloudstack VM network. if self.apiclient.hypervisor.lower() == 'vmware': #SSH is done via management server for Vmware sourceip = self.apiclient.connection.mgtSvr else: #For others, we will have to get the ipaddress of host connected to vm hosts = list_hosts(self.apiclient, id=router.hostid) self.assertEqual(isinstance(hosts, list), True, "Check list response returns a valid list") host = hosts[0] sourceip = host.ipaddress self.debug("Sleep %s seconds for network on router to be up" % self.services['sleep']) time.sleep(self.services['sleep']) if self.apiclient.hypervisor.lower() == 'vmware': key_file = " -i /var/cloudstack/management/.ssh/id_rsa " else: key_file = " -i /root/.ssh/id_rsa.cloud " ssh_cmd = "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o LogLevel=quiet" expect_script = "#!/usr/bin/expect\n" + \ "spawn %s %s -p 3922 root@%s\n" % (ssh_cmd, key_file, router.linklocalip) + \ "expect \"root@%s:~#\"\n" % (router.name) + \ "send \"%s root@%s %s; exit $?\r\"\n" % (ssh_cmd, vm_ipaddress, script) + \ "expect \"root@%s's password: \"\n" % (vm_ipaddress) + \ "send \"password\r\"\n" + \ "interact\n" self.debug("expect_script>>\n%s<<expect_script" % expect_script) script_file = '/tmp/expect_script.exp' fd = open(script_file,'w') fd.write(expect_script) fd.close() ssh = SshClient(host=sourceip, port=22, user='******', passwd=self.services["host_password"]) self.debug("SSH client to : %s obtained" % sourceip) ssh.scp(script_file, script_file) ssh.execute('chmod +x %s' % script_file) self.debug("%s %s" % (script_file, exec_cmd_params)) exec_success = False #Timeout set to 6 minutes timeout = 360 while timeout: self.debug('sleep %s seconds for egress rule to affect on Router.' % self.services['sleep']) time.sleep(self.services['sleep']) result = ssh.execute("%s %s" % (script_file, exec_cmd_params)) self.debug('Result is=%s' % result) self.debug('Expected result is=%s' % expected_result) if str(result).strip() == expected_result: exec_success = True break else: if result == []: self.fail("Router is not accessible") # This means router network did not come up as yet loop back. if "send" in result[0]: timeout -= self.services['sleep'] else: # Failed due to some other error break #end while if timeout == 0: self.fail("Router network failed to come up after 6 minutes.") ssh.execute('rm -rf %s' % script_file) if negative_test: self.assertEqual(exec_success, True, "Script result is %s matching with %s" % (result, expected_result)) else: self.assertEqual(exec_success, True, "Script result is %s is not matching with %s" % (result, expected_result)) except Exception as e: self.debug('Error=%s' % e) raise e
def setUpClass(cls): testClient = super(TestvGPUWindowsVm, cls).getClsTestClient() cls.testdata = cls.testClient.getParsedTestDataConfig() cls.apiclient = cls.testClient.getApiClient() cls._cleanup = [] hosts = list_hosts( cls.apiclient, hypervisor="XenServer" ) if hosts is None: raise unittest.SkipTest("There are no XenServers available. GPU feature is supported only on XenServer.Check listhosts response") else: cls.k140qgpuhosts=0 cls.k120qgpuhosts=0 cls.k100gpuhosts=0 cls.k260qgpuhosts=0 cls.k240qgpuhosts=0 cls.k220qgpuhosts=0 cls.k200gpuhosts=0 cls.k1passthroughgpuhosts=0 cls.k2passthroughgpuhosts=0 k2hosts=0 k1hosts=0 for ghost in hosts : if ghost.hypervisorversion >= "6.2.0": sshClient = SshClient(host=ghost.ipaddress, port=22, user='******',passwd="host_password") if ghost.hypervisorversion == "6.2.0": res = sshClient.execute("xe patch-list uuid=0850b186-4d47-11e3-a720-001b2151a503") if len(res) == 0: continue k1card= sshClient.execute("lspci | grep \"GRID K1\"") k2card= sshClient.execute("lspci | grep \"GRID K2\"") if len(k2card) !=0: k2hosts=k2hosts+1 k260q = sshClient.execute("xe vgpu-type-list model-name=\"GRID K260Q\"") k240q = sshClient.execute("xe vgpu-type-list model-name=\"GRID K240Q\"") k220q = sshClient.execute("xe vgpu-type-list model-name=\"GRID K220Q\"") k200 = sshClient.execute("xe vgpu-type-list model-name=\"GRID K200\"") k2passthrough = sshClient.execute("xe vgpu-type-list model-name='passthrough'") if (len(k260q) == 0) and len(k240q) == 0 and len(k220q) == 0 and len(k200) == 0 and len(k2passthrough) == 0: continue else: if len(k260q) != 0 : cls.k260qgpuhosts=cls.k260qgpuhosts+1 if len(k240q) != 0 : cls.k240qgpuhosts=cls.k240qgpuhosts+1 if len(k220q) != 0 : cls.k220qgpuhosts=cls.k220qgpuhosts+1 if len(k200) != 0 : cls.k200gpuhosts=cls.k200gpuhosts+1 if len(k2passthrough) != 0: cls.k2passthroughgpuhosts=cls.k2passthroughgpuhosts+1 if len(k1card) != 0: k1hosts=k1hosts+1 k100 = sshClient.execute("xe vgpu-type-list model-name=\"GRID K100\"") k120q = sshClient.execute("xe vgpu-type-list model-name=\"GRID K120Q\"") k140q = sshClient.execute("xe vgpu-type-list model-name=\"GRID K140Q\"") k1passthrough = sshClient.execute("xe vgpu-type-list model-name='passthrough'") if len(k100) == 0 and len(k120q) == 0 and len(k140q) == 0 and len(k1passthrough) == 0: continue else: if len(k140q) != 0 : cls.k140qgpuhosts=cls.k140qgpuhosts+1 if len(k120q) != 0 : cls.k120qgpuhosts=cls.k120qgpuhosts+1 if len(k100) != 0 : cls.k100gpuhosts=cls.k100gpuhosts+1 if len(k1passthrough) != 0 : cls.k1passthroughgpuhosts=cls.k1passthroughgpuhosts+1 if (k1hosts == 0) and (k2hosts == 0): raise unittest.SkipTest("No XenServer available with GPU Drivers installed") cls.zone = get_zone(cls.apiclient, cls.testClient.getZoneForTests()) cls.domain = get_domain(cls.apiclient) cls.account = Account.create( cls.apiclient, cls.testdata["account"], domainid=cls.domain.id ) cls.template = get_windows_template(cls.apiclient, cls.zone.id ,ostype_desc="Windows 8 (64-bit)") if cls.template == FAILED: cls.template = Template.register( cls.apiclient, cls.testdata["vgpu"] ["templateregister1"], hypervisor = "XenServer", zoneid=cls.zone.id, domainid=cls.account.domainid, account=cls.account.name ) timeout = cls.testdata["vgpu"]["timeout"] while True: time.sleep(cls.testdata["vgpu"]["sleep"]) list_template_response = Template.list( cls.apiclient, templatefilter=\ cls.testdata["templatefilter"], id=cls.template.id ) if (isinstance(list_template_response, list)) is not True: raise unittest.SkipTest("Check list template api response returns a valid list") if len(list_template_response) is None : raise unittest.SkipTest("Check template registered is in List Templates") template_response = list_template_response[0] if template_response.isready == True: break if timeout == 0: raise unittest.SkipTest("Failed to download template(ID: %s)" % template_response.id) timeout = timeout - 1 cls._cleanup = [ cls.account ]
parser.add_option("-c", "--config", action="store", default="xen.cfg", dest="config", help="the path where the server configurations is stored") parser.add_option("-k","--noSimulator", action="store", default=True, dest="noSimulator", help="will not buid sumulator if set to true") (options, args) = parser.parse_args() if options.config is None: raise # cscfg = configGenerator.get_setup_config(options.config) cscfg = configGenerator.getSetupConfig(options.config) mgmt_server = cscfg.mgtSvr[0].mgtSvrIp ssh = SshClient(mgmt_server, 22, "root", "password") ssh.execute("python /root/restartMgmtServer.py -p /automation/cloudstack --noSimulator %s > /var/log/cloudstack.log"%(options.noSimulator)) #Telnet wait until api port is open tn = None timeout = 120 while timeout > 0: try: tn = telnetlib.Telnet(mgmt_server, 8096, timeout=120) break except Exception: delay(5) timeout = timeout - 1 if tn is None: raise socket.error("Unable to reach API port")
def test_03_elb_delete_lb_system(self): """Test delete LB rule generated with public IP with is_system = 1 """ # Validate the following # 1. Deleting LB rule should release EIP where is_system=1 # 2. check configuration changes for EIP reflects on NS # commands to verify on NS: # * "show ip" # * "show lb vserer"-make sure that output says they are all up and # running and USNIP : ON self.debug("Fetching LB IP for account: %s" % self.account.name) ip_addrs = PublicIPAddress.list( self.api_client, associatednetworkid=self.guest_network.id, account=self.account.name, domainid=self.account.domainid, forloadbalancing=True, listall=True, ) self.assertEqual( isinstance(ip_addrs, list), True, "List Public IP address should return valid IP address for network" ) lb_ip = ip_addrs[0] self.debug("LB IP generated for account: %s is: %s" % (self.account.name, lb_ip.ipaddress)) self.debug("Deleting LB rule: %s" % self.lb_rule.id) self.lb_rule.delete(self.apiclient) time.sleep(60) self.debug("SSH into netscaler: %s" % self.services["netscaler"]["ipaddress"]) try: ssh_client = SshClient( self.services["netscaler"]["ipaddress"], 22, self.services["netscaler"]["username"], self.services["netscaler"]["password"], ) self.debug("command: show ip") res = ssh_client.execute("show ip") result = str(res) self.debug("Output: %s" % result) self.assertEqual( result.count(lb_ip.ipaddress), 0, "One IP from EIP pool should be taken and configured on NS" ) self.debug("Command:show lb vserver") res = ssh_client.execute("show lb vserver") result = str(res) self.debug("Output: %s" % result) self.assertEqual( result.count("Cloud-VirtualServer-%s-22 (%s:22) - TCP" % (lb_ip.ipaddress, lb_ip.ipaddress)), 0, "User subnet IP should be enabled for LB service", ) except Exception as e: self.fail("SSH Access failed for %s: %s" % (self.services["netscaler"]["ipaddress"], e)) return
def test_01_elb_create(self): """Test ELB by creating a LB rule """ # Validate the following # 1. Deploy 2 instances # 2. Create LB rule to port 22 for the VMs and try to access VMs with # EIP:port. Make sure that ingress rule is created to allow access # with universal CIDR (0.0.0.0/0) # 3. For LB rule IP user_ip_address.is_system=1 # 4. check configuration changes for EIP reflects on NS # commands to verify on NS : # * "show ip" # * "show lb vserer"-make sure that output says they are all up # and running and USNIP : ON # Verify listSecurity groups response security_groups = SecurityGroup.list(self.apiclient, account=self.account.name, domainid=self.account.domainid) self.assertEqual(isinstance(security_groups, list), True, "Check for list security groups response") self.assertEqual(len(security_groups), 1, "Check List Security groups response") self.debug("List Security groups response: %s" % str(security_groups)) security_group = security_groups[0] self.debug("Creating Ingress rule to allow SSH on default security group") cmd = authorizeSecurityGroupIngress.authorizeSecurityGroupIngressCmd() cmd.domainid = self.account.domainid cmd.account = self.account.name cmd.securitygroupid = security_group.id cmd.protocol = "TCP" cmd.startport = 22 cmd.endport = 22 cmd.cidrlist = "0.0.0.0/0" self.apiclient.authorizeSecurityGroupIngress(cmd) self.debug("Fetching LB IP for account: %s" % self.account.name) ip_addrs = PublicIPAddress.list( self.api_client, associatednetworkid=self.guest_network.id, account=self.account.name, domainid=self.account.domainid, forloadbalancing=True, listall=True, ) self.assertEqual( isinstance(ip_addrs, list), True, "List Public IP address should return valid IP address for network" ) lb_ip = ip_addrs[0] self.debug("LB IP generated for account: %s is: %s" % (self.account.name, lb_ip.ipaddress)) # TODO: uncomment this after ssh issue is resolved # self.debug("SSHing into VMs using ELB IP: %s" % lb_ip.ipaddress) # try: # ssh_1 = self.vm_1.get_ssh_client(ipaddress=lb_ip.ipaddress) # self.debug("Command: hostname") # result = ssh_1.execute("hostname") # self.debug("Result: %s" % result) # # if isinstance(result, list): # res = result[0] # else: # self.fail("hostname retrieval failed!") # # self.assertIn( # res, # [self.vm_1.name, self.vm_2.name], # "SSH should return hostname of one of the VM" # ) # # ssh_2 = self.vm_2.get_ssh_client(ipaddress=lb_ip.ipaddress) # self.debug("Command: hostname") # result = ssh_2.execute("hostname") # self.debug("Result: %s" % result) # # if isinstance(result, list): # res = result[0] # else: # self.fail("hostname retrieval failed!") # self.assertIn( # res, # [self.vm_1.name, self.vm_2.name], # "SSH should return hostname of one of the VM" # ) # except Exception as e: # self.fail( # "SSH Access failed for %s: %s" % (self.vm_1.ipaddress, e)) # Fetch details from user_ip_address table in database self.debug("select is_system from user_ip_address where public_ip_address='%s';" % lb_ip.ipaddress) qresultset = self.dbclient.execute( "select is_system from user_ip_address where public_ip_address='%s';" % lb_ip.ipaddress ) self.assertEqual(isinstance(qresultset, list), True, "Check DB query result set for valid data") self.assertNotEqual(len(qresultset), 0, "Check DB Query result set") qresult = qresultset[0] self.assertEqual(qresult[0], 1, "is_system value should be 1 for system generated LB rule") self.debug("SSH into netscaler: %s" % self.services["netscaler"]["ipaddress"]) try: ssh_client = SshClient( self.services["netscaler"]["ipaddress"], 22, self.services["netscaler"]["username"], self.services["netscaler"]["password"], ) self.debug("command: show ip") res = ssh_client.execute("show ip") result = str(res) self.debug("Output: %s" % result) self.assertEqual( result.count(lb_ip.ipaddress), 1, "One IP from EIP pool should be taken and configured on NS" ) self.debug("Command:show lb vserver") res = ssh_client.execute("show lb vserver") result = str(res) self.debug("Output: %s" % result) self.assertEqual( result.count("Cloud-VirtualServer-%s-22 (%s:22) - TCP" % (lb_ip.ipaddress, lb_ip.ipaddress)), 1, "User subnet IP should be enabled for LB service", ) except Exception as e: self.fail("SSH Access failed for %s: %s" % (self.services["netscaler"]["ipaddress"], e)) return
def test_02_elb_acquire_and_create(self): """Test ELB by acquiring IP and then creating a LB rule """ # Validate the following # 1. Deploy 2 instances # 2. Create LB rule to port 22 for the VMs and try to access VMs with # EIP:port. Make sure that ingress rule is created to allow access # with universal CIDR (0.0.0.0/0) # 3. For LB rule IP user_ip_address.is_system=0 # 4. check configuration changes for EIP reflects on NS # commands to verify on NS : # * "show ip" # * "show lb vserer" - make sure that output says they are all up # and running and USNIP : ON self.debug("Acquiring new IP for network: %s" % self.guest_network.id) public_ip = PublicIPAddress.create( self.apiclient, accountid=self.account.name, zoneid=self.zone.id, domainid=self.account.domainid, services=self.services["virtual_machine"], ) self.debug("IP address: %s is acquired by network: %s" % (public_ip.ipaddress, self.guest_network.id)) self.debug("Creating LB rule for public IP: %s" % public_ip.ipaddress) lb_rule = LoadBalancerRule.create( self.apiclient, self.services["lbrule"], accountid=self.account.name, ipaddressid=public_ip.ipaddress.id, networkid=self.guest_network.id, domainid=self.account.domaind, ) self.debug("Assigning VMs (%s, %s) to LB rule: %s" % (self.vm_1.name, self.vm_2.name, lb_rule.name)) lb_rule.assign(self.apiclient, [self.vm_1, self.vm_2]) # TODO: workaround : add route in the guest VM for SNIP # # self.debug("SSHing into VMs using ELB IP: %s" % # public_ip.ipaddress) # try: # ssh_1 = self.vm_1.get_ssh_client( # ipaddress=public_ip.ipaddress) # self.debug("Command: hostname") # result = ssh_1.execute("hostname") # self.debug("Result: %s" % result) # # if isinstance(result, list): # res = result[0] # else: # self.fail("hostname retrieval failed!") # self.assertIn( # res, # [self.vm_1.name, self.vm_2.name], # "SSH should return hostname of one of the VM" # ) # # ssh_2 = self.vm_2.get_ssh_client( # ipaddress=public_ip.ipaddress) # self.debug("Command: hostname") # result = ssh_2.execute("hostname") # self.debug("Result: %s" % result) # # if isinstance(result, list): # res = result[0] # else: # self.fail("hostname retrieval failed!") # self.assertIn( # res, # [self.vm_1.name, self.vm_2.name], # "SSH should return hostname of one of the VM" # ) # except Exception as e: # self.fail( # "SSH Access failed for %s: %s" % (self.vm_1.ipaddress, e)) # ## Fetch details from user_ip_address table in database self.debug("select is_system from user_ip_address where public_ip_address='%s';" % public_ip.ipaddress) qresultset = self.dbclient.execute( "select is_system from user_ip_address where public_ip_address='%s';" % public_ip.ipaddress ) self.assertEqual(isinstance(qresultset, list), True, "Check DB query result set for valid data") self.assertNotEqual(len(qresultset), 0, "Check DB Query result set") qresult = qresultset[0] self.assertEqual(qresult[0], 0, "is_system value should be 0 for non-system generated LB rule") self.debug("SSH into netscaler: %s" % self.services["netscaler"]["ipaddress"]) try: ssh_client = SshClient( self.services["netscaler"]["ipaddress"], 22, self.services["netscaler"]["username"], self.services["netscaler"]["password"], ) self.debug("command: show ip") res = ssh_client.execute("show ip") result = str(res) self.debug("Output: %s" % result) self.assertEqual( result.count(public_ip.ipaddress), 1, "One IP from EIP pool should be taken and configured on NS" ) self.debug("Command:show lb vserver") res = ssh_client.execute("show lb vserver") result = str(res) self.debug("Output: %s" % result) self.assertEqual( result.count("Cloud-VirtualServer-%s-22 (%s:22) - TCP" % (public_ip.ipaddress, public_ip.ipaddress)), 1, "User subnet IP should be enabled for LB service", ) except Exception as e: self.fail("SSH Access failed for %s: %s" % (self.services["netscaler"]["ipaddress"], e)) return
def test_09_appcookie_leastconn(self): """Test Create a "AppCookie" stick policy for a Lb rule with leastconn """ # Validate the following # 1. Configure Netscaler for load balancing. # 2. Create a Network offering with LB services provided by Netscaler # and all other services by VR. # 3. Create a new account/user. # 4. Deploy few VMs using a network from the above created Network # offering. # 5. Create a "AppCookie" stick policy for a Lb rule with # "leastconn" algorithm self.debug( "Creating LB rule for IP address: %s with leastconn algo" % self.public_ip.ipaddress.ipaddress) self.services["lbrule"]["alg"] = 'leastconn' self.services["lbrule"]["publicport"] = 80 self.services["lbrule"]["privateport"] = 80 lb_rule = LoadBalancerRule.create( self.apiclient, self.services["lbrule"], ipaddressid=self.public_ip.ipaddress.id, accountid=self.account.name, networkid=self.network.id ) self.cleanup.append(lb_rule) self.debug("Created the load balancing rule for public IP: %s" % self.public_ip.ipaddress.ipaddress) self.debug("Assigning VM instance: %s to LB rule: %s" % ( self.virtual_machine.name, lb_rule.name )) lb_rule.assign(self.apiclient, [self.virtual_machine]) self.debug("Assigned VM instance: %s to lb rule: %s" % ( self.virtual_machine.name, lb_rule.name )) self.debug( "Configuring 'SourceBased' Sticky policy on lb rule: %s" % lb_rule.name) try: result = lb_rule.createSticky( self.apiclient, methodname='AppCookie', name='AppCookieLeastConn', param={"name": 20} ) self.debug("Response: %s" % result) except Exception as e: self.fail("Configure sticky policy failed with exception: %s" % e) self.debug("SSH into Netscaler to check whether sticky policy configured properly or not?") self.debug("SSH into netscaler: %s" % self.services["netscaler"]["ipaddress"]) try: ssh_client = SshClient( self.services["netscaler"]["ipaddress"], self.services["netscaler"]["port"], self.services["netscaler"]["username"], self.services["netscaler"]["password"], ) cmd = "show lb vserver Cloud-VirtualServer-%s-%s" % ( self.public_ip.ipaddress.ipaddress, lb_rule.publicport) self.debug("command: %s" % cmd) res = ssh_client.execute(cmd) result = str(res) self.debug("Output: %s" % result) self.assertEqual( result.count("Persistence: RULE"), 1, "'AppCookie' sticky policy should be configured on NS" ) self.assertEqual( result.count("Configured Method: LEASTCONNECTION"), 1, "'leastconn' algorithm should be configured on NS" ) except Exception as e: self.fail("SSH Access failed for %s: %s" % \ (self.services["netscaler"]["ipaddress"], e)) return
def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid): """ Checks whether a snapshot with id (not UUID) `snapshotid` is present on the nfs storage @param apiclient: api client connection @param @dbconn: connection to the cloudstack db @param config: marvin configuration file @param zoneid: uuid of the zone on which the secondary nfs storage pool is mounted @param snapshotid: uuid of the snapshot @return: True if snapshot is found, False otherwise """ from base import ImageStore, Snapshot secondaryStores = ImageStore.list(apiclient, zoneid=zoneid) assert isinstance(secondaryStores, list), "Not a valid response for listImageStores" assert len(secondaryStores) != 0, "No image stores found in zone %s" % zoneid secondaryStore = secondaryStores[0] if str(secondaryStore.providername).lower() != "nfs": raise Exception( "is_snapshot_on_nfs works only against nfs secondary storage. found %s" % str(secondaryStore.providername) ) qresultset = dbconn.execute("select id from snapshots where uuid = '%s';" % str(snapshotid)) if len(qresultset) == 0: raise Exception("No snapshot found in cloudstack with id %s" % snapshotid) snapshotid = qresultset[0][0] qresultset = dbconn.execute( "select install_path from snapshot_store_ref where snapshot_id='%s' and store_role='Image';" % snapshotid ) assert isinstance(qresultset, list), "Invalid db query response for snapshot %s" % snapshotid if len(qresultset) == 0: # Snapshot does not exist return False snapshotPath = qresultset[0][0] nfsurl = secondaryStore.url from urllib2 import urlparse parse_url = urlparse.urlsplit(nfsurl, scheme="nfs") host, path = parse_url.netloc, parse_url.path if not config.mgtSvr: raise Exception("Your marvin configuration does not contain mgmt server credentials") mgtSvr, user, passwd = config.mgtSvr[0].mgtSvrIp, config.mgtSvr[0].user, config.mgtSvr[0].passwd try: ssh_client = SshClient(mgtSvr, 22, user, passwd) cmds = [ "mkdir -p %s /mnt/tmp", "mount -t %s %s%s /mnt/tmp" % ("nfs", host, path), "test -f %s && echo 'snapshot exists'" % (os.path.join("/mnt/tmp", snapshotPath)), ] for c in cmds: result = ssh_client.execute(c) # Unmount the Sec Storage cmds = ["cd", "umount /mnt/tmp"] for c in cmds: ssh_client.execute(c) except Exception as e: raise Exception("SSH failed for management server: %s - %s" % (config.mgtSvr[0].mgtSvrIp, e)) return "snapshot exists" in result