def _install_zstack_nodes_ha(self): for node in self.nodes: cmd = "/usr/local/bin/zs-network-setting -b %s %s %s %s" % (node.ip_, node.netmask_, node.gateway_, node.nic_) ssh.execute(cmd, node.ip_, node.username_, node.password_) ssh.scp_file(self.zstack_pkg, "/root/zstack-installer.bin", node.ip_, node.username_, node.password_) cmd = "deactivate; which python; bash %s -o -i -I %s" % ("/root/zstack-installer.bin", node.bridge_) ssh.execute(cmd, node.ip_, node.username_, node.password_)
def setup_primarystorage_vm(vm_inv, vm_config, deploy_config): vm_ip = test_lib.lib_get_vm_nic_by_l3(vm_inv, vm_inv.defaultL3NetworkUuid).ip host = get_host(vm_config, deploy_config) if not hasattr(host, 'port_') or host.port_ == '22': host.port_ = '22' for primaryStorageRef in xmlobject.safe_list(vm_config.primaryStorageRef): print primaryStorageRef.text_ for zone in xmlobject.safe_list(deploy_config.zones.zone): if primaryStorageRef.type_ == 'nfs': for nfsPrimaryStorage in xmlobject.safe_list(zone.primaryStorages.nfsPrimaryStorage): if primaryStorageRef.text_ == nfsPrimaryStorage.name_: test_util.test_logger('[vm:] %s setup nfs service.' % (vm_ip)) # TODO: multiple NFS PS may refer to same host's different DIR nfsPath = nfsPrimaryStorage.url_.split(':')[1] cmd = "echo '%s *(rw,sync,no_root_squash)' > /etc/exports" % (nfsPath) ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_, True, int(host.port_)) cmd = "mkdir -p %s && service rpcbind restart && service nfs restart" % (nfsPath) ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_, True, int(host.port_)) cmd = "iptables -w 20 -I INPUT -p tcp -m tcp --dport 2049 -j ACCEPT && iptables -w 20 -I INPUT -p udp -m udp --dport 2049 -j ACCEPT" ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_, True, int(host.port_)) return elif primaryStorageRef.type_ == 'ceph': for cephPrimaryStorage in xmlobject.safe_list(zone.primaryStorages.cephPrimaryStorage): if primaryStorageRef.text_ == cephPrimaryStorage.name_: test_util.test_logger('[vm:] %s setup ceph service.' % (vm_ip)) ssh.scp_file("%s/%s" % (os.environ.get('woodpecker_root_path'), '/tools/setup_ceph_nodes.sh'), '/tmp/setup_ceph_nodes.sh', vm_ip, vm_config.imageUsername_, vm_config.imagePassword_, port=host.port_) cmd = "bash -ex /tmp/setup_ceph_nodes.sh" ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_, True, int(host.port_)) #nfsPath = nfsPrimaryStorage.url_.split(':')[1] return
def deploy_2ha(scenarioConfig, scenarioFile): mn_ip1 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 0).ip_ mn_ip2 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 1).ip_ node3_ip = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 2).ip_ vip = os.environ['zstackHaVip'] change_ip_cmd1 = "zstack-ctl change_ip --ip=" + mn_ip1 ssh.execute(change_ip_cmd1, mn_ip1, "root", "password", False, 22) iptables_cmd1 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd1, mn_ip1, "root", "password", False, 22) change_ip_cmd2 = "zstack-ctl change_ip --ip=" + mn_ip2 ssh.execute(change_ip_cmd2, mn_ip2, "root", "password", False, 22) iptables_cmd2 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd2, mn_ip2, "root", "password", False, 22) woodpecker_vm_ip = shell.call("ip r | grep src | grep '^172.20' | head -1 | awk '{print $NF}'").strip() zsha2_path = "/home/%s/zsha2" % woodpecker_vm_ip ssh.scp_file(zsha2_path, "/root/zsha2", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zsha2", mn_ip1, "root", "password", False, 22) zstack_hamon_path = "/home/%s/zstack-hamon" % woodpecker_vm_ip ssh.scp_file(zstack_hamon_path, "/root/zstack-hamon", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zstack-hamon", mn_ip1, "root", "password", False, 22) cmd = '/root/zsha2 install-ha -nic br_zsn0 -gateway 172.20.0.1 -slave "root:password@' + mn_ip2 + '" -vip ' + vip + ' -time-server ' + node3_ip + ' -db-root-pw zstack.mysql.password -yes' test_util.test_logger("deploy 2ha by cmd: %s" %(cmd)) ssh_cmd = 'sshpass -p password ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null' if shell.run("%s %s zsha2 status" %(ssh_cmd, mn_ip1)) != 0: ret, output, stderr = ssh.execute(cmd, mn_ip1, "root", "password", False, 22) test_util.test_logger("cmd=%s; ret=%s; output=%s; stderr=%s" %(cmd, ret, output, stderr)) if ret!=0: test_util.test_fail("deploy 2ha failed")
def deploy_ha_env(scenarioConfig, scenarioFile, deploy_config, config_json, deploy_tool, mn_img): prepare_config_json(scenarioConfig, scenarioFile, deploy_config, config_json) test_host = get_mn_host(scenarioConfig, scenarioFile)[0] test_host_ip = test_host.ip_ test_host_config = sce_ops.get_scenario_config_vm(test_host.name_, scenarioConfig) host_password = test_host_config.imagePassword_ mn_image_path = "/home/%s/mn.qcow2" % test_host_ip installer_path = "/home/%s/zs-ha" % test_host_ip config_path = "/home/%s/config.json" % test_host_ip ssh.scp_file(config_json, config_path, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_) cmd1 = "ceph osd pool create zstack 128" cmd2 = "qemu-img convert -f qcow2 -O raw %s rbd:zstack/mnvm.img" % mn_image_path cmd3 = '%s -a -p %s -c %s' % (installer_path, host_password, config_path) test_util.test_logger("[%s] %s" % (test_host_ip, cmd1)) ssh.execute(cmd1, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) test_util.test_logger("[%s] %s" % (test_host_ip, cmd2)) ssh.execute(cmd2, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) test_util.test_logger("[%s] %s" % (test_host_ip, cmd3)) ssh.execute(cmd3, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22)
def update_21_iso(vm_ip, tmp_file, iso_21_path, upgrade_script_path): ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip vm_username = os.environ['imageUsername'] vm_password = os.environ['imagePassword'] #cmd = '%s "rm -f /opt/zstack_20.iso"' % ssh_cmd #process_result = execute_shell_in_process(cmd, tmp_file) ssh.scp_file(iso_21_path, '/opt/zstack_21.iso', vm_ip, vm_username, vm_password) ssh.scp_file(upgrade_script_path, '/opt/zstack-upgrade', vm_ip, vm_username, vm_password) cmd = '%s "mkdir -p /opt/zstack-dvd"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) cmd = '%s "bash /opt/zstack-upgrade -r /opt/zstack_21.iso"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) #cmd = '%s "zstack-ctl stop"' % ssh_cmd #process_result = execute_shell_in_process(cmd, tmp_file) #cmd = '%s "yum -y --disablerepo=* --enablerepo=zstack-local,qemu-kvm-ev clean all"' % ssh_cmd #process_result = execute_shell_in_process(cmd, tmp_file) #cmd = '%s "yum -y clean all"' % ssh_cmd #process_result = execute_shell_in_process(cmd, tmp_file) #cmd = '%s "yum -y --disablerepo=* --enablerepo=zstack-local,qemu-kvm-ev update"' % ssh_cmd #process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack upgrade iso failed') else: test_util.test_logger('update the 2.10 iso success')
def _install_zstack_nodes_ha(self): for node in self.nodes: cmd = "/root/scripts/network-setting -b %s %s %s %s %s" % (node.ip_, node.netmask_, node.gateway_, node.nic_, node.bridge_) ssh.execute(cmd, node.ip_, node.username_, node.password_) ssh.scp_file(self.zstack_pkg, "/root/zstack-installer.bin", node.ip_, node.username_, node.password_) cmd = "bash %s -o -i -I %s" % ("/root/zstack-installer.bin", node.bridge_) ssh.execute(cmd, node.ip_, node.username_, node.password_)
def _copy_sshkey_from_node(self): node = self.nodes[0] if not node.dockerImage__: print 'Copy sshkey from mn node' #consider some zstack-server is running in vm, the server # startup speed is slow. Increase timeout to 180s. private_key_path = '%s/webapps/zstack/WEB-INF/classes/ansible/rsaKeys/id_rsa' % self.catalina_home public_key_path = '%s/webapps/zstack/WEB-INF/classes/ansible/rsaKeys/id_rsa.pub' % self.catalina_home if linux.is_ip_existing(node.ip_): cmd = 'scp %s /root/.ssh/id_rsa' % (private_key_path) shell.call(cmd) cmd = 'scp %s /root/.ssh/id_rsa.pub' % (public_key_path) shell.call(cmd) elif not linux.is_ip_existing(node.ip_): import zstackwoodpecker.test_lib as test_lib if test_lib.lib_wait_target_up(node.ip_, '22', 120): node_ip = node.ip_ else: node_ip = os.environ['zstackHaVip'] ssh.scp_file(private_key_path, "/root/.ssh/id_rsa", node_ip, node.username_, node.password_) ssh.scp_file(public_key_path, "/root/.ssh/id_rsa.pub", node_ip, node.username_, node.password_) cmd = 'scp %s /root/.ssh/id_rsa' % (private_key_path) shell.call(cmd) cmd = 'scp %s /root/.ssh/id_rsa.pub' % (public_key_path) shell.call(cmd) else: cmd = 'scp %s /root/.ssh/id_rsa' % (private_key_path) shell.call(cmd) cmd = 'scp %s /root/.ssh/id_rsa.pub' % (public_key_path) shell.call(cmd)
def _install_zstack_nodes_ha(self): for node in self.nodes: cmd = "/usr/local/bin/zs-network-setting -b %s %s %s %s" % (node.nic_, node.ip_, node.netmask_, node.gateway_) print cmd ssh.execute(cmd, node.ip_, node.username_, node.password_) ssh.scp_file(self.zstack_pkg, "/root/zstack-installer.bin", node.ip_, node.username_, node.password_) cmd = "deactivate; which python; bash %s -o -i -I %s" % ("/root/zstack-installer.bin", node.bridge_) print cmd ssh.execute(cmd, node.ip_, node.username_, node.password_)
def setup_vm_no_password(vm_inv, vm_config, deploy_config): vm_ip = test_lib.lib_get_vm_nic_by_l3(vm_inv, vm_inv.defaultL3NetworkUuid).ip # ssh.scp_file(os.environ.get('scenarioPriKey'), '/root/.ssh/id_rsa', vm_ip, vm_config.imageUsername_, vm_config.imagePassword_) # ssh.scp_file(os.environ.get('scenarioPubKey'), '/root/.ssh/authorized_keys', vm_ip, vm_config.imageUsername_, vm_config.imagePassword_) ssh.scp_file('/home/id_rsa', '/root/.ssh/id_rsa', vm_ip, vm_config.imageUsername_, vm_config.imagePassword_) ssh.scp_file('/home/id_rsa.pub', '/root/.ssh/authorized_keys', vm_ip, vm_config.imageUsername_, vm_config.imagePassword_) cmd = 'chmod go-rwx /root/.ssh/authorized_keys /root/.ssh/id_rsa' ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_, True, 22) cmd = "sed -i 's/.*StrictHostKeyChecking.*$/StrictHostKeyChecking no/g' /etc/ssh/ssh_config" ssh.execute(cmd, vm_ip, vm_config.imageUsername_, vm_config.imagePassword_, True, 22)
def prepare_etc_hosts(scenarioConfig, scenarioFile, deploy_config, config_json): mha_s_vm_list = get_mha_s_vm_list_from_scenario_file(scenarioConfig, scenarioFile) if len(mha_s_vm_list) < 1: return False for i in range(len(mha_s_vm_list)): os.system('echo %s %s >> /etc/hosts' % (mha_s_vm_list[i].ip_, mha_s_vm_list[i].ip_.replace('.', '-'))) for i in range(len(mha_s_vm_list)): test_host_config = sce_ops.get_scenario_config_vm(mha_s_vm_list[i].name_, scenarioConfig) ssh.scp_file('/etc/hosts', '/etc/hosts', mha_s_vm_list[i].ip_, test_host_config.imageUsername_, test_host_config.imagePassword_)
def prepare_etc_hosts(scenarioConfig, scenarioFile, deploy_config, config_json): mn_host_list = get_mn_host(scenarioConfig, scenarioFile) if len(mn_host_list) < 1: return False for i in range(len(mn_host_list)): os.system('echo %s %s >> /etc/hosts' % (mn_host_list[i].ip_, mn_host_list[i].ip_.replace('.', '-'))) for i in range(len(mn_host_list)): test_host_config = sce_ops.get_scenario_config_vm(mn_host_list[i].name_, scenarioConfig) ssh.scp_file('/etc/hosts', '/etc/hosts', mn_host_list[i].ip_, test_host_config.imageUsername_, test_host_config.imagePassword_)
def _install_zstack_nodes_ha(self): for node in self.nodes: cmd = "/root/scripts/network-setting -b %s %s %s %s %s" % ( node.ip_, node.netmask_, node.gateway_, node.nic_, node.bridge_) ssh.execute(cmd, node.ip_, node.username_, node.password_) ssh.scp_file(self.zstack_pkg, "/root/zstack-installer.bin", node.ip_, node.username_, node.password_) cmd = "bash %s -o -i -I %s" % ("/root/zstack-installer.bin", node.bridge_) ssh.execute(cmd, node.ip_, node.username_, node.password_)
def upgrade_by_iso(vm_ip, tmp_file, iso_path, upgrade_script_path): ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip vm_username = os.environ['imageUsername'] vm_password = os.environ['imagePassword'] cmd = '%s "rm -f /opt/zstack.iso"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) ssh.scp_file(iso_path, '/opt/zstack.iso', vm_ip, vm_username, vm_password) ssh.scp_file(upgrade_script_path, '/opt/zstack-upgrade', vm_ip, vm_username, vm_password) cmd = '%s "mkdir -p /opt/zstack-dvd"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) cmd = '%s "bash /opt/zstack-upgrade /opt/zstack.iso"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file)
def setup_ceph_storages(scenario_config, scenario_file, deploy_config): ceph_storages = dict() for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): vm_name = vm.name_ if hasattr(vm, 'backupStorageRef'): for backupStorageRef in xmlobject.safe_list(vm.backupStorageRef): print backupStorageRef.text_ if backupStorageRef.type_ == 'ceph': if ceph_storages.has_key(backupStorageRef.text_): if vm_name in ceph_storages[backupStorageRef.text_]: continue else: ceph_storages[backupStorageRef.text_].append(vm_name) else: ceph_storages[backupStorageRef.text_] = [ vm_name ] if hasattr(vm, 'primaryStorageRef'): for primaryStorageRef in xmlobject.safe_list(vm.primaryStorageRef): print primaryStorageRef.text_ for zone in xmlobject.safe_list(deploy_config.zones.zone): if primaryStorageRef.type_ == 'ceph': if ceph_storages.has_key(backupStorageRef.text_): if vm_name in ceph_storages[backupStorageRef.text_]: continue else: ceph_storages[backupStorageRef.text_].append(vm_name) else: ceph_storages[backupStorageRef.text_] = [ vm_name ] for ceph_storage in ceph_storages: test_util.test_logger('setup ceph [%s] service.' % (ceph_storage)) node1_name = ceph_storages[ceph_storage][0] node1_config = get_scenario_config_vm(node1_name, scenario_config) node1_ip = get_scenario_file_vm(node1_name, scenario_file).ip_ node_host = get_deploy_host(node1_config.hostRef.text_, deploy_config) if not hasattr(node_host, 'port_') or node_host.port_ == '22': node_host.port_ = '22' vm_ips = '' for ceph_node in ceph_storages[ceph_storage]: vm_nic_id = get_ceph_storages_nic_id(ceph_storage, scenario_config) vm = get_scenario_file_vm(ceph_node, scenario_file) if vm_nic_id == None: vm_ips += vm.ip_ + ' ' else: vm_ips += vm.ips.ip[vm_nic_id].ip_ + ' ' ssh.scp_file("%s/%s" % (os.environ.get('woodpecker_root_path'), '/tools/setup_ceph_nodes.sh'), '/tmp/setup_ceph_nodes.sh', node1_ip, node1_config.imageUsername_, node1_config.imagePassword_, port=int(node_host.port_)) cmd = "bash -ex /tmp/setup_ceph_nodes.sh %s" % (vm_ips) ssh.execute(cmd, node1_ip, node1_config.imageUsername_, node1_config.imagePassword_, True, int(node_host.port_))
def test(): if test_lib.scenario_config == None or test_lib.scenario_file ==None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth("eth0", 10) linux.create_vlan_eth("eth0", 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = 'eth0' cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = 'eth0' cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) config_json = os.environ.get('configJson') ha_deploy_tool = os.environ.get('zstackHaInstaller') mn_img = os.environ.get('mnImage') test_stub.deploy_ha_env(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config,config_json, ha_deploy_tool, mn_img) node_operations.wait_for_management_server_start(300) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", os.environ.get('zstackHaVip'), 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) if test_lib.lib_get_ha_selffencer_maxattempts() != None: test_lib.lib_set_ha_selffencer_maxattempts('60') test_lib.lib_set_ha_selffencer_storagechecker_timeout('60') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_lib.lib_set_reserved_memory('8G') test_util.test_pass('Suite Setup Success')
def _enable_jacoco_agent(self): for node in self.nodes: woodpecker_ip = '' import commands (status, output) = commands.getstatusoutput( "ip addr show zsn0 | sed -n '3p' | awk '{print $2}' | awk -F / '{print $1}'" ) dst_file = '/var/lib/zstack/virtualenv/zstackctl/lib/python2.7/site-packages/zstackctl/ctl.py' if output.startswith('172'): woodpecker_ip = output if woodpecker_ip != '': #Separate woodpecker condition if os.path.exists('/home/%s/jacocoagent.jar' % woodpecker_ip): src_file = '/home/%s/zstack-utility/zstackctl/zstackctl/ctl.py' % woodpecker_ip fd_r = open(src_file, 'r') fd_w = open(dst_file, 'w') ctl_content = '' for line in fd_r: if line.find('with open(setenv_path') != -1: line = ' catalina_opts.append("-javaagent:/var/lib/zstack/jacocoagent.jar=output=tcpserver,address=%s,port=6300")\n%s'\ %(node.ip_, line) ctl_content += line fd_r.close() fd_w.write(ctl_content) fd_w.close() ssh.scp_file(dst_file, dst_file, node.ip_, node.username_, node.password_) print 'Inject jacoco agent into ctl.py' else: print 'Here is no jacocoagent.jar, skip to inject jacoco agent' else: #Incorporate wookpecker condition if os.path.exists('/home/%s/jacocoagent.jar' % node.ip_): src_file = '/home/%s/zstack-utility/zstackctl/zstackctl/ctl.py' % node.ip_ fd_r = open(src_file, 'r') fd_w = open(dst_file, 'w') ctl_content = '' for line in fd_r: if line.find('with open(setenv_path') != -1: line = ' catalina_opts.append("-javaagent:/home/%s/jacocoagent.jar=output=tcpserver,address=127.0.0.1,port=6300")\n%s'\ %(node.ip_, line) ctl_content += line fd_r.close() fd_w.write(ctl_content) fd_w.close() print 'Inject jacoco agent into ctl.py' else: print 'Here is no jacocoagent.jar, skip to inject jacoco agent'
def update_232_iso(vm_ip, tmp_file, iso_232_path, upgrade_script_path): ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip vm_username = os.environ['imageUsername'] vm_password = os.environ['imagePassword'] #cmd = '%s "rm -f /opt/zstack_20.iso"' % ssh_cmd #process_result = execute_shell_in_process(cmd, tmp_file) ssh.scp_file(iso_232_path, '/opt/zstack_232.iso', vm_ip, vm_username, vm_password) ssh.scp_file(upgrade_script_path, '/opt/zstack-upgrade', vm_ip, vm_username, vm_password) cmd = '%s "mkdir -p /opt/zstack-dvd"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) cmd = '%s "bash /opt/zstack-upgrade -r /opt/zstack_232.iso"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack upgrade iso failed') else: test_util.test_logger('update the 2.3.2 iso success')
def copy_data(self, vm=None): vm = vm if vm else self.vm vm_ip = vm.get_vm().vmNics[0].ip test_lib.lib_wait_target_up(vm_ip, '22', timeout=600) cmd = "find /home -iname 'zstack-all-in-one.*'" file_path = commands.getoutput(cmd).split('\n')[0] file_name = os.path.basename(file_path) dst_file = os.path.join('/mnt', file_name) src_file_md5 = commands.getoutput('md5sum %s' % file_path).split(' ')[0] ssh.scp_file(file_path, dst_file, vm_ip, 'root', 'password') (_, dst_md5, _)= ssh.execute('sync; sync; sleep 60; md5sum %s' % dst_file, vm_ip, 'root', 'password') dst_file_md5 = dst_md5.split(' ')[0] test_util.test_dsc('src_file_md5: [%s], dst_file_md5: [%s]' % (src_file_md5, dst_file_md5)) assert dst_file_md5 == src_file_md5, 'dst_file_md5 [%s] and src_file_md5 [%s] is not match, stop test' % (dst_file_md5, src_file_md5) extract_cmd = 'tar xvf /mnt/zstack-all-in-one.* -C /mnt > /dev/null 2>&1' ssh.execute(extract_cmd, vm_ip, 'root', 'password') return self
def scp_in_guest_vm(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = ScpInVmRsp() try: ssh.scp_file(cmd.src_file, cmd.dst_file, cmd.ip, cmd.username, cmd.password, cmd.port) rsp.success = True rsp.output = '[SCP] Successfully scp %s to [vm:] %s %s' % \ (cmd.src_file, cmd.ip, cmd.dst_file) except Exception as e: logger.debug('[SCP] scp %s to vm[ip:%s] failed: %s.' % \ (cmd.src_file, cmd.ip, str(e))) rsp.success = False rsp.error = str(e) return jsonobject.dumps(rsp)
def upgrade_zstack(vm_ip, target_file, tmp_file): ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip vm_username = os.environ['imageUsername'] vm_password = os.environ['imagePassword'] ssh.scp_file(target_file, '/opt/zstack_installer', vm_ip, vm_username, vm_password) env_var = "WEBSITE='%s'" % 'localhost' # cmd = '%s "%s bash %s -u -R aliyun"' % (ssh_cmd, env_var, target_file) #cmd = '%s "%s bash %s -u -o"' % (ssh_cmd, env_var, '/opt/zstack_installer') cmd = '%s "%s bash %s -u"' % (ssh_cmd, env_var, '/opt/zstack_installer') process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: test_util.test_fail('zstack upgrade failed') else: test_util.test_logger('upgrade zstack success')
def update_iso(vm_ip, tmp_file, iso_path, upgrade_script_path): ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip vm_username = os.environ['imageUsername'] vm_password = os.environ['imagePassword'] cmd = '%s "rm -f /opt/zstack.iso"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) ssh.scp_file(iso_path, '/opt/zstack.iso', vm_ip, vm_username, vm_password) ssh.scp_file(upgrade_script_path, '/opt/zstack-upgrade', vm_ip, vm_username, vm_password) cmd = '%s "mkdir -p /opt/zstack-dvd"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) cmd = '%s "bash /opt/zstack-upgrade -r /opt/zstack.iso"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) #cmd = '%s "zstack-ctl stop"' % ssh_cmd #process_result = execute_shell_in_process(cmd, tmp_file) cmd = '%s "yum -y --disablerepo=* --enablerepo=zstack-local,qemu-kvm-ev clean all"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file) cmd = '%s "yum -y clean all"' % ssh_cmd process_result = execute_shell_in_process(cmd, tmp_file)
def restart_mn_node_with_long_timeout(): mn_ip = os.environ['zstackHaVip'] test_lib.lib_wait_target_up(mn_ip, '22', 120) cmd = "zstack-ctl status|grep 'MN status'|awk '{print $3}'" ret, stdout, stderr = ssh.execute(cmd, mn_ip, "root", "password", False, 22) if stdout.strip().strip('\n').lower() != "running": check_mn_tool_path = "%s/%s" %(os.environ.get('woodpecker_root_path'), '/tools/check_mn_start.sh') test_util.test_logger("check_mn_tool_path:[%s],mn_ip:[%s]" %(check_mn_tool_path, mn_ip)) ssh.scp_file(check_mn_tool_path, "/home/check_mn_start.sh", mn_ip, "root", "password") cmd = "bash /home/check_mn_start.sh" ret1, stdout1, stderr1 = ssh.execute(cmd, mn_ip, "root", "password", False, 22) test_util.test_logger("check_mn_start.sh stdout1:[%s],stderr1:[%s]" %(stdout1,stderr1)) if str(ret1) == "0" : cmd = "zstack-ctl stop" ret, stdout, stderr = ssh.execute(cmd, mn_ip, "root", "password", True, 22) cmd = "zstack-ctl configure ThreadFacade.maxThreadNum=200" ret, stdout, stderr = ssh.execute(cmd, mn_ip, "root", "password", True, 22) cmd = "zstack-ctl start_node --timeout 3000" ret, stdout, stderr = ssh.execute(cmd, mn_ip, "root", "password", True, 22) cmd = "zstack-ctl start_ui" ret, stdout, stderr = ssh.execute(cmd, mn_ip, "root", "password", True, 22) #modify zstack start script cmd = r'sed -i "s:zstack-ctl start:zstack-ctl start_node --timeout 3000:g" /etc/init.d/zstack-server' test_lib.lib_execute_ssh_cmd(mn_ip, "root", "password", cmd) time.sleep(1) cmd = r'sed -i "/zstack-ctl start_node --timeout 3000/ a\ ZSTACK_HOME=\$zstack_app zstack-ctl start_ui" /etc/init.d/zstack-server' test_lib.lib_execute_ssh_cmd(mn_ip, "root", "password", cmd) else: test_util.test_logger("find mn not self-started as expected, checked by /home/check_mn_start.sh") else: test_util.test_logger("find zstack MN is running.")
def deploy_2ha(scenarioConfig, scenarioFile, deployConfig): mn_ip1 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 0).ip_ mn_ip2 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 1).ip_ if not xmlobject.has_element(deployConfig, 'backupStorages.miniBackupStorage'): node3_ip = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 2).ip_ vip = os.environ['zstackHaVip'] change_ip_cmd1 = "zstack-ctl change_ip --ip=" + mn_ip1 ssh.execute(change_ip_cmd1, mn_ip1, "root", "password", False, 22) iptables_cmd1 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd1, mn_ip1, "root", "password", False, 22) change_ip_cmd2 = "zstack-ctl change_ip --ip=" + mn_ip2 ssh.execute(change_ip_cmd2, mn_ip2, "root", "password", False, 22) iptables_cmd2 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd2, mn_ip2, "root", "password", False, 22) woodpecker_vm_ip = shell.call("ip r | grep src | head -1 | awk '{print $NF}'").strip() zsha2_path = "/home/%s/zsha2" % woodpecker_vm_ip ssh.scp_file(zsha2_path, "/root/zsha2", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zsha2", mn_ip1, "root", "password", False, 22) zstack_hamon_path = "/home/%s/zstack-hamon" % woodpecker_vm_ip ssh.scp_file(zstack_hamon_path, "/root/zstack-hamon", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zstack-hamon", mn_ip1, "root", "password", False, 22) if xmlobject.has_element(deployConfig, 'backupStorages.miniBackupStorage'): cmd = '/root/zsha2 install-ha -nic br_zsn0 -gateway 172.20.0.1 -slave "root:password@' + mn_ip2 + '" -vip ' + vip + ' -time-server ' + mn_ip2 + ',' + mn_ip2 + ' -db-root-pw zstack.mysql.password -yes' else: cmd = '/root/zsha2 install-ha -nic br_zsn0 -gateway 172.20.0.1 -slave "root:password@' + mn_ip2 + '" -vip ' + vip + ' -time-server ' + node3_ip + ',' + mn_ip2 + ' -db-root-pw zstack.mysql.password -yes' test_util.test_logger("deploy 2ha by cmd: %s" %(cmd)) ret, output, stderr = ssh.execute(cmd, mn_ip1, "root", "password", False, 22) test_util.test_logger("cmd=%s; ret=%s; output=%s; stderr=%s" %(cmd, ret, output, stderr)) if ret!=0: test_util.test_fail("deploy 2ha failed")
def upgrade_zstack(vm_ip, target_file, tmp_file): ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip vm_username = os.environ['imageUsername'] vm_password = os.environ['imagePassword'] ssh.scp_file(target_file, '/opt/zstack_installer', vm_ip, vm_username, vm_password) env_var = "WEBSITE='%s'" % 'localhost' # cmd = '%s "%s bash %s -u -R aliyun"' % (ssh_cmd, env_var, target_file) cmd = '%s "%s bash %s -u -o"' % (ssh_cmd, env_var, '/opt/zstack_installer') process_result = execute_shell_in_process(cmd, tmp_file) if process_result != 0: cmd = '%s "cat /tmp/zstack_installation.log"' % ssh_cmd execute_shell_in_process(cmd, tmp_file) if 'no management-node-ready message received within' in open( tmp_file).read(): times = 30 cmd = '%s "zstack-ctl status"' % ssh_cmd while (times > 0): time.sleep(10) process_result = execute_shell_in_process( cmd, tmp_file, 10, True) times -= 0 if process_result == 0: test_util.test_logger( "management node start after extra %d seconds" % (30 - times + 1) * 10) return 0 test_util.test_logger( "mn node is still not started up, wait for another 10 seconds..." ) else: test_util.test_fail('zstack upgrade failed')
def test(): if test_lib.scenario_config == None or test_lib.scenario_file == None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists( test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists( test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-nfs.xml"], \ ["scenario-config-storage-separate-nfs.xml"]): config_json = os.environ.get('configJsonSepStor') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-nonmon-ceph.xml"], \ ["scenario-config-storage-separate-ceph.xml"]): config_json = os.environ.get('configJsonNonMon') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-flat-dhcp-nfs-sep-pub-man.xml"], \ ["scenario-config-nfs-sep-man.xml", \ "scenario-config-nfs-sep-pub.xml"]): config_json = os.environ.get('configJsonSepPub') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-ceph-3-nets-sep.xml"], \ ["scenario-config-ceph-sep-man.xml", \ "scenario-config-ceph-sep-pub.xml", \ "scenario-config-ceph-3-nets-sep.xml"]): config_json = os.environ.get('configJsonSepPub') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-fusionstor-3-nets-sep.xml"], \ ["scenario-config-fusionstor-3-nets-sep.xml"]): config_json = os.environ.get('configJson3Net') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-flat-dhcp-nfs-mul-net-pubs.xml"], \ ["scenario-config-nfs-sep-man.xml", \ "scenario-config-nfs-sep-pub.xml"]): config_json = os.environ.get('configJsonAllOne') else: config_json = os.environ.get('configJson') ha_deploy_tool = os.environ.get('zstackHaInstaller') mn_img = os.environ.get('mnImage') test_stub.deploy_ha_env(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config, config_json, ha_deploy_tool, mn_img) #if os.path.basename(os.environ.get('WOODPECKER_SCENARIO_CONFIG_FILE')).strip() == "scenario-config-vpc-ceph-3-sites.xml": # test_util.test_logger("@@@DEBUG->IS VPC CEPH@@@") # old_mn_ip = os.environ['zstackHaVip'] # test_stub.auto_set_mn_ip(test_lib.scenario_file) # cmd = 'sed -i "s/%s/%s/g" %s' %(old_mn_ip, os.environ['zstackHaVip'], EXTRA_SUITE_SETUP_SCRIPT) # os.system(cmd) #node_operations.wait_for_management_server_start(600) test_stub.wrapper_of_wait_for_management_server_start( 600, EXTRA_SUITE_SETUP_SCRIPT) test_util.test_logger("@@@DEBUG->suite_setup@@@ os\.environ\[\'ZSTACK_BUILT_IN_HTTP_SERVER_IP\'\]=%s; os\.environ\[\'zstackHaVip\'\]=%s" \ %(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], os.environ['zstackHaVip']) ) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", os.environ.get('zstackHaVip'), 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) #if test_lib.lib_get_ha_selffencer_maxattempts() != None: # test_lib.lib_set_ha_selffencer_maxattempts('60') # test_lib.lib_set_ha_selffencer_storagechecker_timeout('60') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_lib.lib_set_reserved_memory('8G') if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-flat-dhcp-nfs-sep-pub-man.xml"], ["scenario-config-nfs-sep-pub.xml"]) or \ test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-ceph-3-nets-sep.xml"], ["scenario-config-ceph-sep-pub.xml"]) or \ test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-fusionstor-3-nets-sep.xml"], ["scenario-config-fusionstor-3-nets-sep.xml"]): add_ps_network_gateway_sys_tag() test_util.test_pass('Suite Setup Success')
def setup_fusionstor_storages(scenario_config, scenario_file, deploy_config): fusionstor_storages = dict() for host in xmlobject.safe_list(scenario_config.deployerConfig.hosts.host): for vm in xmlobject.safe_list(host.vms.vm): vm_name = vm.name_ if hasattr(vm, 'backupStorageRef'): for backupStorageRef in xmlobject.safe_list(vm.backupStorageRef): print backupStorageRef.text_ if backupStorageRef.type_ == 'fusionstor': if fusionstor_storages.has_key(backupStorageRef.text_): if vm_name in fusionstor_storages[backupStorageRef.text_]: continue else: fusionstor_storages[backupStorageRef.text_].append(vm_name) else: fusionstor_storages[backupStorageRef.text_] = [ vm_name ] if hasattr(vm, 'primaryStorageRef'): for primaryStorageRef in xmlobject.safe_list(vm.primaryStorageRef): print primaryStorageRef.text_ for zone in xmlobject.safe_list(deploy_config.zones.zone): if primaryStorageRef.type_ == 'fusionstor': if fusionstor_storages.has_key(backupStorageRef.text_): if vm_name in fusionstor_storages[backupStorageRef.text_]: continue else: fusionstor_storages[backupStorageRef.text_].append(vm_name) else: fusionstor_storages[backupStorageRef.text_] = [ vm_name ] if len(fusionstor_storages) > 0: test_util.test_logger('get fusionstor pkg') fusionstorPkg = os.environ['fusionstorPkg'] else: test_util.test_logger('no fusionstor pkg return here') return for fusionstor_storage in fusionstor_storages: test_util.test_logger('setup fusionstor [%s] service.' % (fusionstor_storage)) node1_name = fusionstor_storages[fusionstor_storage][0] node1_config = get_scenario_config_vm(node1_name, scenario_config) node1_ip = get_scenario_file_vm(node1_name, scenario_file).ip_ node_host = get_deploy_host(node1_config.hostRef.text_, deploy_config) if not hasattr(node_host, 'port_') or node_host.port_ == '22': node_host.port_ = '22' vm_ips = '' for fusionstor_node in fusionstor_storages[fusionstor_storage]: vm_nic_id = get_fusionstor_storages_nic_id(fusionstor_storage, scenario_config) vm = get_scenario_file_vm(fusionstor_node, scenario_file) if vm_nic_id == None: vm_ips += vm.ip_ + ' ' else: vm_ips += vm.ips.ip[vm_nic_id].ip_ + ' ' ssh.scp_file("%s/%s" % (os.environ.get('woodpecker_root_path'), '/tools/setup_fusionstor_nodes.sh'), '/tmp/setup_fusionstor_nodes.sh', node1_ip, node1_config.imageUsername_, node1_config.imagePassword_, port=int(node_host.port_)) ssh.scp_file(fusionstorPkg, fusionstorPkg, node1_ip, node1_config.imageUsername_, node1_config.imagePassword_, port=int(node_host.port_)) cmd = "bash -ex /tmp/setup_fusionstor_nodes.sh %s %s" % ((fusionstorPkg), (vm_ips)) try: ssh.execute(cmd, node1_ip, node1_config.imageUsername_, node1_config.imagePassword_, True, int(node_host.port_)) except Exception as e: print str(e) ssh.execute(cmd, node1_ip, node1_config.imageUsername_, node1_config.imagePassword_, True, int(node_host.port_))
def scp_file_to_vm(vm_inv, src_file, target_file): vm_ip = vm_inv.vmNics[0].ip vm_username = test_lib.lib_get_vm_username(vm_inv) vm_password = test_lib.lib_get_vm_password(vm_inv) ssh.scp_file(src_file, target_file, vm_ip, vm_username, vm_password)
def scp_file_to_vm(vm_ip, src_file, target_file): vm_username = os.environ['imageUsername'] vm_password = os.environ['imagePassword'] ssh.scp_file(src_file, target_file, vm_ip, vm_username, vm_password)
def test(): if test_lib.scenario_config == None or test_lib.scenario_file ==None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_stub.deploy_2ha(test_lib.all_scenario_config, test_lib.scenario_file) mn_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 0).ip_ mn_ip2 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 1).ip_ host_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 2).ip_ test_stub.recover_vlan_in_host(host_ip1, test_lib.all_scenario_config, test_lib.deploy_config) test_stub.wrapper_of_wait_for_management_server_start(600, EXTRA_SUITE_SETUP_SCRIPT) test_util.test_logger("@@@DEBUG->suite_setup@@@ os\.environ\[\'ZSTACK_BUILT_IN_HTTP_SERVER_IP\'\]=%s; os\.environ\[\'zstackHaVip\'\]=%s" \ %(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], os.environ['zstackHaVip']) ) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip1, 'root', 'password') ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip2, 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip1)) os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip2)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) test_lib.lib_set_primary_storage_imagecache_gc_interval(1) #test_lib.lib_set_reserved_memory('1G') test_util.test_pass('Suite Setup Success')
def scp_file_to_vm(vm_inv, src_file, target_file): vm_ip = vm_inv.vmNics[0].ip vm_username = test_lib.lib_get_vm_username(vm_inv) vm_password = test_lib.lib_get_vm_password(vm_inv) ssh.scp_file(src_file, target_file, vm_ip, vm_username, vm_password)
def deploy_ha_env(scenarioConfig, scenarioFile, deploy_config, config_json, deploy_tool, mn_img): prepare_config_json(scenarioConfig, scenarioFile, deploy_config, config_json) mn_ha_storage_type = sce_ops.get_mn_ha_storage_type(scenarioConfig, scenarioFile, deploy_config) if mn_ha_storage_type == 'ceph': os.system('sed -i s/node/ceph-/g %s' %(config_json)) test_host = get_mn_host(scenarioConfig,scenarioFile)[0] test_host_ip = test_host.ip_ test_host_config = sce_ops.get_scenario_config_vm(test_host.name_, scenarioConfig) host_password = test_host_config.imagePassword_ mn_image_path = "/home/%s/mn.qcow2" % test_host_ip installer_path = "/home/%s/zs-ha" % test_host_ip config_path = "/home/%s/config.json" % test_host_ip ssh.scp_file(config_json, config_path, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_) ssh.scp_file(mn_img, mn_image_path, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_) ssh.scp_file(deploy_tool, installer_path, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_) cmd0 = "chmod a+x %s" % (installer_path) test_util.test_logger("[%s] %s" % (test_host_ip, cmd0)) ssh.execute(cmd0, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) if mn_ha_storage_type == 'ceph': if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-ceph-3-nets-sep.xml"], ["scenario-config-storage-separate-ceph.xml"]): ceph_node_ip = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 0).ip_ mn_image_path = "/home/%s/mn.qcow2" % ceph_node_ip ssh.scp_file(mn_img, mn_image_path, ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_) cmd0="yum install -y --disablerepo=* --enablerepo=zstack-local qemu-img" test_util.test_logger("[%s] %s" % (ceph_node_ip, cmd0)) ssh.execute(cmd0, ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) else: ceph_node_ip = test_host_ip cmd1="ceph osd pool create zstack 128" test_util.test_logger("[%s] %s" % (ceph_node_ip, cmd1)) ssh.execute(cmd1, ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) cmd2="qemu-img convert -f qcow2 -O raw %s rbd:zstack/mnvm.img" % mn_image_path test_util.test_logger("[%s] %s" % (ceph_node_ip, cmd2)) if test_lib.lib_execute_ssh_cmd(ceph_node_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, cmd2, timeout=7200 ) == False: test_util.test_fail("fail to run cmd: %s on %s" %(cmd2, ceph_node_ip)) elif mn_ha_storage_type == 'nfs': prepare_etc_hosts(scenarioConfig, scenarioFile, deploy_config, config_json) cmd1 = "cp %s /storage/mnvm.img" % (mn_image_path) test_util.test_logger("[%s] %s" % (test_host_ip, cmd1)) ssh.execute(cmd1, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) elif mn_ha_storage_type == 'fusionstor': cmd1 = "lichbd pool create zstack -p nbd" test_util.test_logger("[%s] %s" % (test_host_ip, cmd1)) ssh.execute(cmd1, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) cmd2 = "lichbd vol import %s zstack/mnvm.img -p nbd" %(mn_image_path) test_util.test_logger("[%s] %s" % (test_host_ip, cmd2)) ssh.execute(cmd2, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) cmd3 = "lich.inspect --localize /default/zstack/mnvm.img 0" test_util.test_logger("[%s] %s" % (test_host_ip, cmd3)) ssh.execute(cmd3, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) cmd3='%s install -p %s -c %s' % (installer_path, host_password, config_path) test_util.test_logger("[%s] %s" % (test_host_ip, cmd3)) #ssh.execute(cmd3, test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, True, 22) if test_lib.lib_execute_ssh_cmd(test_host_ip, test_host_config.imageUsername_, test_host_config.imagePassword_, cmd3, timeout=3600 ) == False: test_util.test_fail("fail to run cmd: %s on %s" %(cmd3, test_host_ip))
def test(): if test_lib.scenario_config == None or test_lib.scenario_file ==None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_stub.deploy_2ha(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) mn_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 0).ip_ mn_ip2 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 1).ip_ if not xmlobject.has_element(test_lib.deploy_config, 'backupStorages.miniBackupStorage'): host_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 2).ip_ test_stub.recover_vlan_in_host(host_ip1, test_lib.all_scenario_config, test_lib.deploy_config) test_stub.wrapper_of_wait_for_management_server_start(600, EXTRA_SUITE_SETUP_SCRIPT) test_util.test_logger("@@@DEBUG->suite_setup@@@ os\.environ\[\'ZSTACK_BUILT_IN_HTTP_SERVER_IP\'\]=%s; os\.environ\[\'zstackHaVip\'\]=%s" \ %(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], os.environ['zstackHaVip']) ) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip1, 'root', 'password') ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip2, 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip1)) os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip2)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) test_lib.lib_set_primary_storage_imagecache_gc_interval(1) #test_lib.lib_set_reserved_memory('1G') if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-local-ps.xml"], ["scenario-config-upgrade-3.1.1.xml"]): cmd = r"sed -i '$a\172.20.198.8 rsync.repo.zstack.io' /etc/hosts" ssh.execute(cmd, mn_ip1, "root", "password", False, 22) ssh.execute(cmd, mn_ip2, "root", "password", False, 22) test_util.test_pass('Suite Setup Success')
def test(): if os.environ.get('ZSTACK_SIMULATOR') == "yes": os.system('pkill -f ./test_rest_server.py') process = subprocess.Popen("./test_rest_server.py", cwd=os.environ.get('woodpecker_root_path')+'/dailytest/', universal_newlines=True, preexec_fn=os.setsid) test_lib.setup_plan.execute_plan_without_deploy_test_agent() deploy_operations.deploy_simulator_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) agent_url = CP_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); src_path = entity_body_json["srcPath"].split('/')[3].split('@')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+src_path).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = CREATE_SNAPSHOT_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); volume_uuid = entity_body_json["volumeUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+volume_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = UPLOAD_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); src_path = entity_body_json["srcPath"].split('/')[3].split('@')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+src_path).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = NFS_CREATE_TEMPLATE_FROM_VOLUME_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); src_path = entity_body_json["rootVolumePath"].split('vol-')[1].split('/')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+src_path).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SMP_CREATE_TEMPLATE_FROM_VOLUME_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); src_path = entity_body_json["volumePath"].split('vol-')[1].split('/')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+src_path).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = NFS_COMMIT_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SMP_COMMIT_BITS_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = NFS_UPLOAD_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SMP_UPLOAD_BITS_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = LOCAL_CREATE_TEMPLATE_FROM_VOLUME script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); volume_uuid = entity_body_json["volumePath"].split('vol-')[1].split('/')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+volume_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = LOCAL_COMMIT_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = LOCAL_UPLOAD_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = KVM_TAKE_VOLUME_SNAPSHOT_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); volume_uuid = entity_body_json["volumeUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+volume_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = LOCAL_UPLOAD_BIT_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); src_path = entity_body_json["primaryStorageInstallPath"].split('image-')[1].split('/')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+src_path).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = NFS_UPLOAD_TO_SFTP_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); src_path = entity_body_json["primaryStorageInstallPath"].split('image-')[1].split('/')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+src_path).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SMP_UPLOAD_BITS_TO_SFTP_BACKUPSTORAGE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); src_path = entity_body_json["primaryStorageInstallPath"].split('image-')[1].split('/')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+src_path).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SBLK_CREATE_TEMPLATE_FROM_VOLUME_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); volume_uuid = entity_body_json["volumePath"].split('/')[3] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+volume_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SBLK_COMMIT_BITS_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SBLK_UPLOAD_BITS_TO_IMAGESTORE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = CEPH_DOWNLOAD_IMAGE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = IMAGESTORE_IMPORT script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageuuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = SFTP_DOWNLOAD_IMAGE_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); image_uuid = entity_body_json["imageUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+image_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = KVM_MIGRATE_VM_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); vm_uuid = entity_body_json["vmUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+vm_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = GET_MD5_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); volume_uuid = entity_body_json["volumeUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+volume_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = CHECK_MD5_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); volume_uuid = entity_body_json["volumeUuid"] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+volume_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) agent_url = COPY_TO_REMOTE_BITS_PATH script = ''' { entity -> slurper = new groovy.json.JsonSlurper(); entity_body_json = slurper.parseText(entity.body); volume_uuid = entity_body_json["paths"][0].split('vol-')[1].split('/')[0] def get = new URL("http://127.0.0.1:8888/test/api/v1.0/store/"+volume_uuid).openConnection(); get.setRequestMethod("GET"); def getRC = get.getResponseCode(); if (!getRC.equals(200)) { return; //throw new Exception("shuang") }; reply = get.getInputStream().getText(); reply_json = slurper.parseText(reply); try { item = reply_json['result'] item_json = slurper.parseText(item); action = item_json['%s'] } catch(Exception ex) { return } if (action == 1) { sleep((24*60*60-60)*1000) } else if (action == 2) { sleep(360*1000) } } ''' % (agent_url) deploy_operations.remove_simulator_agent_script(agent_url) deploy_operations.deploy_simulator_agent_script(agent_url, script) deploy_operations.install_mini_server() else: if test_lib.scenario_config == None or test_lib.scenario_file ==None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_stub.deploy_2ha(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) mn_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 0).ip_ mn_ip2 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 1).ip_ if not xmlobject.has_element(test_lib.deploy_config, 'backupStorages.miniBackupStorage'): host_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 2).ip_ test_stub.recover_vlan_in_host(host_ip1, test_lib.all_scenario_config, test_lib.deploy_config) test_stub.wrapper_of_wait_for_management_server_start(600, EXTRA_SUITE_SETUP_SCRIPT) test_util.test_logger("@@@DEBUG->suite_setup@@@ os\.environ\[\'ZSTACK_BUILT_IN_HTTP_SERVER_IP\'\]=%s; os\.environ\[\'zstackHaVip\'\]=%s" \ %(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], os.environ['zstackHaVip']) ) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip1, 'root', 'password') ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip2, 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip1)) os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip2)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) test_lib.lib_set_primary_storage_imagecache_gc_interval(1) #test_lib.lib_set_reserved_memory('1G') if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-local-ps.xml"], ["scenario-config-upgrade-3.1.1.xml"]): cmd = r"sed -i '$a\172.20.198.8 rsync.repo.zstack.io' /etc/hosts" ssh.execute(cmd, mn_ip1, "root", "password", False, 22) ssh.execute(cmd, mn_ip2, "root", "password", False, 22) test_util.test_pass('Suite Setup Success')