def test(): #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #linux.create_vlan_eth("eth0", 10, "10.0.0.200", "255.255.255.0") #linux.create_vlan_eth("eth0", 11, "10.0.1.200", "255.255.255.0") #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth("eth0", 10) linux.create_vlan_eth("eth0", 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() hosts = test_lib.lib_get_all_hosts_from_plan() if type(hosts) != type([]): hosts = [hosts] for host in hosts: cmd.ethname = 'eth0' cmd.vlan = 10 http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) cmd.vlan = 11 http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) test_lib.setup_plan.execute_plan_without_deploy_test_agent() deploy_operations.deploy_initial_database(test_lib.deploy_config) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT) test_util.test_pass('Suite Setup Success')
def test(): nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_lib.setup_plan.execute_plan_without_deploy_test_agent() deploy_operations.deploy_initial_database(test_lib.deploy_config) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT) test_util.test_pass('Suite Setup Success')
def testName(self): server = sftpbackupstorage.SftpBackupStorageAgent() server.http_server.register_sync_uri('/testcallback', self.callback) server.http_server.start_in_thread() time.sleep(2) cmd = sftpbackupstorage.ConnectCmd() cmd.storagePath = "/tmp" #url = sftpbackupstorage._build_url_for_test([sftpbackupstorage.SftpBackupStorageAgent.CONNECT_PATH]) url = 'http://localhost:7171%s' % sftpbackupstorage.SftpBackupStorageAgent.CONNECT_PATH print url rsp = http.json_dump_post(url, cmd) cmd = sftpbackupstorage.DownloadCmd() cmd.accountUuid = uuidhelper.uuid() cmd.bits = 64 cmd.description = "Test" cmd.format = sftpbackupstorage.SftpBackupStorageAgent.IMAGE_TEMPLATE cmd.guestOsType = "rpm" cmd.hypervisorType = "KVM" cmd.imageUuid = uuidhelper.uuid() cmd.name = "test" cmd.timeout = 60 cmd.url = "http://yum.puppetlabs.com/el/6/products/i386/puppetlabs-release-6-6.noarch.rpm" cmd.urlScheme = "http" url = 'http://localhost:7171%s' % sftpbackupstorage.SftpBackupStorageAgent.DOWNLOAD_IMAGE_PATH print url rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) print "post back" time.sleep(20) server.http_server.stop()
def test(): # Clear ECS instance remained in Aliyun hybrid.add_datacenter_iz() hybrid.tear_down() clean_util.cleanup_all_vms_violently() clean_util.cleanup_none_vm_volumes_violently() clean_util.umount_all_primary_storages_violently() clean_util.cleanup_backup_storage() #linux.remove_vlan_eth("eth0", 10) #linux.remove_vlan_eth("eth0", 11) cmd = host_plugin.DeleteVlanDeviceCmd() cmd.vlan_ethname = 'eth0.10' hosts = test_lib.lib_get_all_hosts_from_plan() if type(hosts) != type([]): hosts = [hosts] for host in hosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd) cmd.vlan_ethname = 'eth0.11' for host in hosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd) test_lib.setup_plan.stop_node() test_lib.lib_cleanup_host_ip_dict() test_util.test_pass('Hybrid Teardown Success')
def report(self): cmd = ProgressReportCmd() cmd.serverUuid = Report.serverUuid cmd.processType = self.processType cmd.progress = self.progress cmd.resourceUuid = self.resourceUuid logger.debug("url: %s, progress: %s, header: %s", Report.url, cmd.progress, self.header) http.json_dump_post(Report.url, cmd, self.header)
def report_to_management_node(): cmd = ReportPsStatusCmd() cmd.psUuids = ps_uuids cmd.hostUuid = host_uuid cmd.psStatus = ps_status cmd.reason = reason logger.debug( 'primary storage[psList:%s] has new connection status[%s], report it to %s' % ( ps_uuids, ps_status, url)) http.json_dump_post(url, cmd, {'commandpath': '/kvm/reportstoragestatus'})
def report_to_management_node(): cmd = ReportSelfFencerCmd() cmd.psUuids = ps_uuids cmd.hostUuid = host_uuid cmd.vmUuidsString = vm_uuids_string cmd.reason = "primary storage[uuids:%s] on host[uuid:%s] heartbeat fail, self fencer has been triggered" % (ps_uuids, host_uuid) logger.debug( 'host[uuid:%s] primary storage[psList:%s], triggered self fencer, report it to %s' % ( host_uuid, ps_uuids, url)) http.json_dump_post(url, cmd, {'commandpath': '/kvm/reportselffencer'})
def _rm_folder_contents_violently(host_ip, path): cmd = host_plugin.HostShellCmd() cmd.command = "rm -rf %s/*" % path try: http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd) except Exception as e: err = linux.get_exception_stacktrace() test_util.test_logger("Fail to delete contents in folder: %s in Host: %s" % (path, host_ip)) test_util.test_logger(err) test_util.test_logger("Successfully delete contents in folder: %s in Host: %s" % (path, host_ip))
def _umount_folder_violently(host_ip, path): cmd = host_plugin.HostShellCmd() cmd.command = "umount %s" % path try: http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd) except Exception as e: err = linux.get_exception_stacktrace() test_util.test_logger("Fail to umount folder: %s in Host: %s" % (path, host_ip)) test_util.test_logger(err) test_util.test_logger("Umount folder: %s in Host: %s" % (path, host_ip))
def test(): if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_lib.setup_plan.execute_plan_without_deploy_test_agent() if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): mn_ips = deploy_operations.get_nodes_from_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s '%s'" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ips)) elif os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % (EXTRA_SUITE_SETUP_SCRIPT)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName if test_lib.ver_ge_zstack_2_0(mn_ip): test_lib.lib_set_allow_live_migration_local_storage('true') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_lib.ensure_recover_script_l2_correct() if test_lib.lib_is_storage_network_separate(): add_ps_network_gateway_sys_tag() test_util.test_pass('Suite Setup Success')
def test(): if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" linux.create_vlan_eth(nic_name, 1010) linux.create_vlan_eth(nic_name, 1011) #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #linux.create_vlan_eth("eth0", 10, "10.0.0.200", "255.255.255.0") #linux.create_vlan_eth("eth0", 11, "10.0.1.200", "255.255.255.0") #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() hosts = test_lib.lib_get_all_hosts_from_plan() if type(hosts) != type([]): hosts = [hosts] for host in hosts: cmd.ethname = nic_name cmd.vlan = 10 http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) cmd.vlan = 11 http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) test_lib.setup_plan.execute_plan_without_deploy_test_agent() if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): mn_ips = deploy_operations.get_nodes_from_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s '%s'" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ips)) elif os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % (EXTRA_SUITE_SETUP_SCRIPT)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in hosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct') delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct') delete_policy = test_lib.lib_set_delete_policy('image', 'Direct') # if test_lib.lib_get_ha_selffencer_maxattempts() != None: # test_lib.lib_set_ha_selffencer_maxattempts('60') # test_lib.lib_set_ha_selffencer_storagechecker_timeout('60') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_util.test_pass('Suite Setup Success')
def testName(self): cmd = dnsmasq.AddDhcpEntryCmd() nic = DhcpInfo() nic.ip = "10.0.0.10" nic.mac = "50:e5:49:c9:65:a3" nic.gateway = "10.0.0.1" nic.netmask = "255.255.255.0" nic.hostname = 'host1' nic.dns = ['8.8.8.8', '75.75.75.75'] nic1 = DhcpInfo() nic1.ip = "10.0.0.13" nic1.mac = "50:e5:49:c9:65:b0" nic1.netmask = "255.255.255.0" nic1.hostname = 'host2' nic1.dns = ['8.8.8.8', '75.75.75.75'] nic2 = DhcpInfo() nic2.ip = "10.0.0.12" nic2.mac = "50:e5:49:c9:65:b1" nic2.netmask = "255.255.255.0" nic2.hostname = 'host3' cmd.dhcpEntries = [nic, nic1, nic2] cmd.rebuild = True rsp = http.json_dump_post('http://localhost:7272/adddhcp', cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(10) self.service.stop()
def check(self): super(zstack_kvm_volume_file_checker, self).check() volume = self.test_obj.volume volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) return self.judge(False) host = test_lib.lib_get_volume_host(volume) if not host: test_util.test_logger('Check result: can not find Host, who is belonged to same Zone Uuid of [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) return self.judge(False) cmd = host_plugin.HostShellCmd() file_exist = "file_exist" cmd.command = '[ -f %s ] && echo %s' % (volume_installPath, file_exist) rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, host_plugin.HOST_SHELL_CMD_PATH), cmd) rsp = jsonobject.loads(rspstr) output = jsonobject.dumps(rsp.stdout) if file_exist in output: test_util.test_logger('Check result: [volume file: ] %s exist on [host name:] %s .' % (volume.uuid, host.name)) return self.judge(True) else: test_util.test_logger('Check result: [volume file: ] %s does not exist on [host name:] %s .' % (volume.uuid, host.name)) return self.judge(False)
def _wait_echo(target_ip): try: rspstr = http.json_dump_post(testagent.build_http_path(target_ip, host_plugin.ECHO_PATH)) except: print('zstack-testagent does not startup, will try again ...') return False return True
def async_call_wait_for_complete(self, apicmd, exception_on_error=True, interval=500, fail_soon=False): self._check_not_none_field(apicmd) timeout = apicmd.timeout if not timeout: timeout = 1800000 cmd = {apicmd.FULL_NAME: apicmd} logger.debug("async call[url: %s, request: %s]" % (self.api_url, jsonobject.dumps(cmd))) jstr = http.json_dump_post(self.api_url, cmd, fail_soon=fail_soon) rsp = jsonobject.loads(jstr) if rsp.state == 'Done': logger.debug("async call[url: %s, response: %s]" % (self.api_url, rsp.result)) reply = jsonobject.loads(rsp.result) (name, event) = (reply.__dict__.items()[0]) if exception_on_error and not event.success: raise ApiError('API call[%s] failed because %s' % (name, self._error_code_to_string(event.error))) return name, event curr = 0 finterval = float(float(interval) / float(1000)) ret_uuid = rsp.uuid while rsp.state != 'Done' and curr < timeout: time.sleep(finterval) rsp = self._get_response(ret_uuid) curr += interval if curr >= timeout: raise ApiError('API call[%s] timeout after %dms' % (apicmd.FULL_NAME, curr)) logger.debug("async call[url: %s, response: %s] after %dms" % (self.api_url, rsp.result, curr)) reply = jsonobject.loads(rsp.result) (name, event) = (reply.__dict__.items()[0]) if exception_on_error and not event.success: raise ApiError('API call[%s] failed because %s' % (name, self._error_code_to_string(event.error))) return name, event
def check(self): super(zstack_kvm_volume_attach_checker, self).check() volume = self.test_obj.volume if not volume.vmInstanceUuid: test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid) return self.judge(False) host = test_lib.lib_get_vm_host(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd) rsp = jsonobject.loads(rspstr) output = jsonobject.dumps(rsp.vm_status[vm.uuid]) if volume_installPath in output: test_util.test_logger('Check result: [volume file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, vm.uuid, host.managementIp)) return self.judge(True) else: test_util.test_logger('Check result: [volume file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, vm.uuid, host.managementIp)) return self.judge(False)
def test_mount_failure(self): cmd = nfs_primarystorage_plugin.MountCmd() cmd.url = 'this_is_a_wrong_path' cmd.mountPath = os.path.join('/mnt', uuidhelper.uuid()) callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.MOUNT_PATH]) ret = http.json_dump_post(callurl, cmd) rsp = jsonobject.loads(ret) self.assertFalse(rsp.success, rsp.error)
def test_check_physical_network_interface(self): url = kvmagent._build_url_for_test([network_plugin.CHECK_PHYSICAL_NETWORK_INTERFACE_PATH]) logger.debug('calling %s' % url) cmd = network_plugin.CheckPhysicalNetworkInterfaceCmd() cmd.interfaceNames = ['p5p1'] ret = http.json_dump_post(url, body=cmd) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success)
def testName(self): self.mount() cmd = nfs_primarystorage_plugin.GetCapacityCmd() url = kvmagent._build_url_for_test([nfs_primarystorage_plugin.GET_CAPACITY_PATH]) rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(5) self.service.stop() linux.umount_by_url(self.NFS_URL)
def testName(self): url = kvmagent._build_url_for_test([network_plugin.KVM_REALIZE_L2NOVLAN_NETWORK_PATH]) logger.debug('calling %s' % url) cmd = network_plugin.CreateBridgeCmd() cmd.physicalInterfaceName = 'eth0' cmd.bridgeName = 'br_eth0' rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(2) self.service.stop()
def mount(self): cmd = nfs_primarystorage_plugin.MountCmd() cmd.url = self.NFS_URL cmd.mountPath = os.path.join('/mnt', uuidhelper.uuid()) callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.MOUNT_PATH]) ret = http.json_dump_post(callurl, cmd) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success, rsp.error) self.assertTrue(linux.is_mounted(cmd.url, cmd.mountPath))
def test_unmount_always_success(self): cmd = nfs_primarystorage_plugin.UnmountCmd() cmd.url = NFS_URL cmd.mountPath = '/not_mounted_path' callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.UNMOUNT_PATH]) ret = http.json_dump_post(callurl, cmd) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success, rsp.error) self.assertFalse(linux.is_mounted(path=cmd.mountPath))
def test_hostfact(self): url = kvmagent._build_url_for_test([host_plugin.HOSTFACT_PATH]) cmd = HostFactCmd() ret = http.json_dump_post(url, body=cmd) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success) self.assertEqual(host_plugin._get_cpu_num(), rsp.cpuNum) self.assertEqual(host_plugin._get_cpu_speed(), rsp.cpuSpeed) self.assertEqual(host_plugin._get_total_memory(), rsp.totalMemory)
def testName(self): url = kvmagent._build_url_for_test([virtualrouter_plugin.VirtualRouterPlugin.VR_KVM_CREATE_BOOTSTRAP_ISO_PATH]) info = virtualrouter_plugin.BootstrapIsoInfo() info.managementNicGateway = "192.168.1.1" info.managementNicIp = "192.168.1.10" info.managementNicMac = "50:E5:49:C9:65:A3" info.managementNicNetmask = "255.255.255.0" cmd = virtualrouter_plugin.CreateVritualRouterBootstrapIsoCmd() cmd.isoInfo = info cmd.isoPath = '/tmp/vr.iso' rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(2) cmd = virtualrouter_plugin.DeleteVirtualRouterBootstrapIsoCmd() cmd.isoPath = '/tmp/vr.iso' url = kvmagent._build_url_for_test([virtualrouter_plugin.VirtualRouterPlugin.VR_KVM_DELETE_BOOTSTRAP_ISO_PATH]) rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(2) self.service.stop()
def callback(self, req): rsp = jsonobject.loads(req[http.REQUEST_BODY]) print jsonobject.dumps(rsp) cmd = vm_plugin.RebootVmCmd() cmd.uuid = self.uuid cmd.timeout = 30 url = kvmagent._build_url_for_test([vm_plugin.KVM_REBOOT_VM_PATH]) rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL2})
def testName(self): cmd = dns.SetDnsCmd() info = dns.DnsInfo() info.dnsAddress = '72.72.72.72' cmd.dns = [info] rsp = http.json_dump_post('http://localhost:7272/setdns', cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(10) self.service.stop()
def test(): clean_util.cleanup_all_vms_violently() clean_util.cleanup_none_vm_volumes_violently() clean_util.umount_all_primary_storages_violently() clean_util.cleanup_backup_storage() test_lib.setup_plan.stop_node() cmd = host_plugin.DeleteVlanDeviceCmd() hosts = test_lib.lib_get_all_hosts_from_plan() if type(hosts) != type([]): hosts = [hosts] for host in hosts: cmd.vlan_ethname = 'eth0.10' http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd) cmd.vlan_ethname = 'eth0.11' http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.DELETE_VLAN_DEVICE_PATH), cmd) test_lib.lib_cleanup_host_ip_dict() test_util.test_pass('Multicluster Teardown Success')
def testName(self): cmd = snat.SetSNATCmd() info = snat.SnatInfo() info.publicIp = '192.168.0.199' info.nicMac = '52:54:00:03:FA:54' cmd.natInfo = [info] rsp = http.json_dump_post('http://localhost:7272/setsnat', cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(10) self.service.stop()
def check(self): super(zstack_kvm_volume_attach_checker, self).check() volume = self.test_obj.volume if not volume.vmInstanceUuid: test_util.test_logger('Check result: [volume:] %s does NOT have vmInstanceUuid. It is not attached to any vm.' % volume.uuid) return self.judge(False) if not self.test_obj.target_vm: test_util.test_logger('Check result: test [volume:] %s does NOT have vmInstance record in test structure. Can not do furture checking.' % volume.uuid) return self.judge(False) vm = self.test_obj.target_vm.vm volume_installPath = volume.installPath if not volume_installPath: test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check if volume is attached to vm.' % volume.uuid) return self.judge(False) host = test_lib.lib_get_vm_host(vm) cmd = vm_plugin.VmStatusCmd() cmd.vm_uuids = [vm.uuid] rspstr = http.json_dump_post(testagent.build_http_path(host.managementIp, vm_plugin.VM_BLK_STATUS), cmd) rsp = jsonobject.loads(rspstr) output = jsonobject.dumps(rsp.vm_status[vm.uuid]) if volume_installPath.startswith('iscsi'): volume_installPath = volume_installPath.split(';')[0].split('/iqn')[1] volume_installPath = 'iqn%s' % volume_installPath volume_installPath = volume_installPath[:-2] elif volume_installPath.startswith('ceph'): volume_installPath = volume_installPath.split('ceph://')[1] elif volume_installPath.startswith('fusionstor'): volume_installPath = volume_installPath.split('fusionstor://')[1] elif volume_installPath.startswith('sharedblock'): volume_installPath = "/dev/" + volume_installPath.split('sharedblock://')[1] elif volume_installPath.startswith('mini'): _cmd = "drbdsetup show %s | grep device | awk -F';' '{print $1}' | awk '{print $3}'" % volume.uuid result = test_lib.lib_execute_ssh_cmd(host.managementIp,host.username, host.password, _cmd, 180) volume_installPath = '/dev/drbd' + result.strip() elif volume_installPath.startswith('ebs'): ps_uuid = volume.primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) url = ps.url.replace('ocean/api', 'dev/name') vol_id = volume_installPath.split(';')[1].split('volumeId=')[-1] req = urllib2.Request(url, headers={'Volumeid': vol_id}) volume_installPath = '/dev/' + urllib2.urlopen(req).read().split('"')[-2] if volume_installPath in output: test_util.test_logger('Check result: [volume:] %s [file:] %s is found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(True) else: test_util.test_logger('Check result: [volume:] %s [file:] %s is not found in [vm:] %s on [host:] %s .' % (volume.uuid, volume_installPath, vm.uuid, host.managementIp)) return self.judge(False)
def testName(self): self.mount() cmd = nfs_primarystorage_plugin.CreateRootVolumeFromTemplateCmd() cmd.installUrl = '/tmp/test1.qcow2' cmd.templatePathInCache = "/tmp/test.qcow2" cmd.timeout = 30 url = kvmagent._build_url_for_test([nfs_primarystorage_plugin.CREATE_VOLUME_FROM_TEMPLATE_PATH]) rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL}) time.sleep(10) self.service.stop() linux.umount_by_url(self.NFS_URL)
def test(): if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists( test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists( test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_lib.setup_plan.execute_plan_without_deploy_test_agent() if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists( test_lib.scenario_file): mn_ips = deploy_operations.get_nodes_from_scenario_file( test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s '%s' %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ips, 'disaster-recovery')) elif os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s '' '%s'" % (EXTRA_SUITE_SETUP_SCRIPT, 'disaster-recovery')) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists( test_lib.scenario_file): host_ips = scenario_operations.dump_scenario_file_ips( test_lib.scenario_file) else: host_ips = testHosts #for host in host_ips: # if host.managementIp_ != mn_ip: # cmd = "echo 'export LANG=\"zh_CN.GB18030\"' >> /etc/profile && sudo ls /root && source /etc/profile" # os.system('sshpass -p password ssh root@%s "%s"' %(host.managementIp_,cmd)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) mn_ip = res_ops.query_resource(res_ops.MANAGEMENT_NODE)[0].hostName if test_lib.ver_ge_zstack_2_0(mn_ip): test_lib.lib_set_allow_live_migration_local_storage('true') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_lib.ensure_recover_script_l2_correct() if test_lib.lib_is_storage_network_separate(): add_ps_network_gateway_sys_tag() if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists( test_lib.scenario_file): scenario_operations.replace_env_params_if_scenario() else: pass test_util.test_pass('Suite Setup Success')
def test_hostfact(self): url = kvmagent._build_url_for_test([host_plugin.HOSTFACT_PATH]) cmd = HostFactCmd() ret = http.json_dump_post(url, body=cmd) rsp = jsonobject.loads(ret)
def test(): if test_lib.scenario_config == None or test_lib.scenario_file == None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists( test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists( test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-nfs.xml"], \ ["scenario-config-storage-separate-nfs.xml"]): config_json = os.environ.get('configJsonSepStor') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-nonmon-ceph.xml"], \ ["scenario-config-storage-separate-ceph.xml"]): config_json = os.environ.get('configJsonNonMon') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-flat-dhcp-nfs-sep-pub-man.xml"], \ ["scenario-config-nfs-sep-man.xml", \ "scenario-config-nfs-sep-pub.xml"]): config_json = os.environ.get('configJsonSepPub') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-ceph-3-nets-sep.xml"], \ ["scenario-config-ceph-sep-man.xml", \ "scenario-config-ceph-sep-pub.xml", \ "scenario-config-ceph-3-nets-sep.xml"]): config_json = os.environ.get('configJsonSepPub') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-fusionstor-3-nets-sep.xml"], \ ["scenario-config-fusionstor-3-nets-sep.xml"]): config_json = os.environ.get('configJson3Net') elif test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-flat-dhcp-nfs-mul-net-pubs.xml"], \ ["scenario-config-nfs-sep-man.xml", \ "scenario-config-nfs-sep-pub.xml"]): config_json = os.environ.get('configJsonAllOne') else: config_json = os.environ.get('configJson') ha_deploy_tool = os.environ.get('zstackHaInstaller') mn_img = os.environ.get('mnImage') test_stub.deploy_ha_env(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config, config_json, ha_deploy_tool, mn_img) #if os.path.basename(os.environ.get('WOODPECKER_SCENARIO_CONFIG_FILE')).strip() == "scenario-config-vpc-ceph-3-sites.xml": # test_util.test_logger("@@@DEBUG->IS VPC CEPH@@@") # old_mn_ip = os.environ['zstackHaVip'] # test_stub.auto_set_mn_ip(test_lib.scenario_file) # cmd = 'sed -i "s/%s/%s/g" %s' %(old_mn_ip, os.environ['zstackHaVip'], EXTRA_SUITE_SETUP_SCRIPT) # os.system(cmd) #node_operations.wait_for_management_server_start(600) test_stub.wrapper_of_wait_for_management_server_start( 600, EXTRA_SUITE_SETUP_SCRIPT) test_util.test_logger("@@@DEBUG->suite_setup@@@ os\.environ\[\'ZSTACK_BUILT_IN_HTTP_SERVER_IP\'\]=%s; os\.environ\[\'zstackHaVip\'\]=%s" \ %(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], os.environ['zstackHaVip']) ) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", os.environ.get('zstackHaVip'), 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) #if test_lib.lib_get_ha_selffencer_maxattempts() != None: # test_lib.lib_set_ha_selffencer_maxattempts('60') # test_lib.lib_set_ha_selffencer_storagechecker_timeout('60') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_lib.lib_set_reserved_memory('8G') if test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-flat-dhcp-nfs-sep-pub-man.xml"], ["scenario-config-nfs-sep-pub.xml"]) or \ test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-ceph-3-nets-sep.xml"], ["scenario-config-ceph-sep-pub.xml"]) or \ test_lib.lib_cur_cfg_is_a_and_b(["test-config-vyos-fusionstor-3-nets-sep.xml"], ["scenario-config-fusionstor-3-nets-sep.xml"]): add_ps_network_gateway_sys_tag() test_util.test_pass('Suite Setup Success')
def test_connect(self): url = kvmagent._build_url_for_test([host_plugin.CONNECT_PATH]) logger.debug('calling %s' % url) ret = http.json_dump_post(url, body=ConnectCmd()) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success)
def test_sync_uri2(self): data = {"value": "hello"} rsp = http.json_dump_post("http://localhost:7070/returnsame/", data) self.assertEqual("hello", rsp)
def _delete_files(host_ip, path): cmd = host_plugin.HostShellCmd() cmd.command = "rm -rf %s*" % path test_util.test_logger("Delete files: %s in Host: %s" % (path, host_ip)) http.json_dump_post(testagent.build_http_path(host_ip, host_plugin.HOST_SHELL_CMD_PATH), cmd)
def _destroy_vm_violently(host_ip, uuid): cmd = vm_plugin.DeleteVmCmd() cmd.vm_uuids = [uuid] http.json_dump_post( testagent.build_http_path(host_ip, vm_plugin.DELETE_VM_PATH), cmd)
def test(): if test_lib.scenario_config == None or test_lib.scenario_file ==None: test_util.test_fail('Suite Setup Fail without scenario') if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists(test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file): nic_name = "zsn0" #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() cmd.ethname = nic_name cmd.vlan = 10 cmd2 = host_plugin.CreateVlanDeviceCmd() cmd2.ethname = nic_name cmd2.vlan = 11 testHosts = test_lib.lib_get_all_hosts_from_plan() if type(testHosts) != type([]): testHosts = [testHosts] for host in testHosts: http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2) test_stub.deploy_2ha(test_lib.all_scenario_config, test_lib.scenario_file) mn_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 0).ip_ mn_ip2 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 1).ip_ host_ip1 = test_stub.get_host_by_index_in_scenario_file(test_lib.all_scenario_config, test_lib.scenario_file, 2).ip_ test_stub.recover_vlan_in_host(host_ip1, test_lib.all_scenario_config, test_lib.deploy_config) test_stub.wrapper_of_wait_for_management_server_start(600, EXTRA_SUITE_SETUP_SCRIPT) test_util.test_logger("@@@DEBUG->suite_setup@@@ os\.environ\[\'ZSTACK_BUILT_IN_HTTP_SERVER_IP\'\]=%s; os\.environ\[\'zstackHaVip\'\]=%s" \ %(os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'], os.environ['zstackHaVip']) ) ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip1, 'root', 'password') ssh.scp_file("/home/license-10host-10days-hp.txt", "/home/license-10host-10days-hp.txt", mn_ip2, 'root', 'password') if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip1)) os.system("bash %s %s" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ip2)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in testHosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) test_lib.lib_set_primary_storage_imagecache_gc_interval(1) #test_lib.lib_set_reserved_memory('1G') test_util.test_pass('Suite Setup Success')
def test(): if test_lib.scenario_config != None and test_lib.scenario_file != None and not os.path.exists( test_lib.scenario_file): scenario_operations.deploy_scenario(test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) test_util.test_skip('Suite Setup Success') if test_lib.scenario_config != None and test_lib.scenario_destroy != None: scenario_operations.destroy_scenario(test_lib.all_scenario_config, test_lib.scenario_destroy) nic_name = "eth0" if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists( test_lib.scenario_file): nic_name = "zsn0" linux.create_vlan_eth(nic_name, 1010) linux.create_vlan_eth(nic_name, 1011) #This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine. #linux.create_vlan_eth("eth0", 10, "10.0.0.200", "255.255.255.0") #linux.create_vlan_eth("eth0", 11, "10.0.1.200", "255.255.255.0") #no matter if current host is a ZStest host, we need to create 2 vlan devs for future testing connection for novlan test cases. linux.create_vlan_eth(nic_name, 10) linux.create_vlan_eth(nic_name, 11) #If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run() test_lib.setup_plan.deploy_test_agent() cmd = host_plugin.CreateVlanDeviceCmd() hosts = test_lib.lib_get_all_hosts_from_plan() if type(hosts) != type([]): hosts = [hosts] for host in hosts: cmd.ethname = nic_name cmd.vlan = 10 http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) cmd.vlan = 11 http.json_dump_post( testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd) test_lib.setup_plan.execute_plan_without_deploy_test_agent() if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists( test_lib.scenario_file): mn_ips = deploy_operations.get_nodes_from_scenario_file( test_lib.all_scenario_config, test_lib.scenario_file, test_lib.deploy_config) if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s '%s'" % (EXTRA_SUITE_SETUP_SCRIPT, mn_ips)) elif os.path.exists(EXTRA_SUITE_SETUP_SCRIPT): os.system("bash %s" % (EXTRA_SUITE_SETUP_SCRIPT)) deploy_operations.deploy_initial_database(test_lib.deploy_config, test_lib.all_scenario_config, test_lib.scenario_file) for host in hosts: os.system("bash %s %s" % (EXTRA_HOST_SETUP_SCRIPT, host.managementIp_)) delete_policy = test_lib.lib_set_delete_policy('vm', 'Direct') delete_policy = test_lib.lib_set_delete_policy('volume', 'Direct') delete_policy = test_lib.lib_set_delete_policy('image', 'Direct') # if test_lib.lib_get_ha_selffencer_maxattempts() != None: # test_lib.lib_set_ha_selffencer_maxattempts('60') # test_lib.lib_set_ha_selffencer_storagechecker_timeout('60') test_lib.lib_set_primary_storage_imagecache_gc_interval(1) test_util.test_pass('Suite Setup Success')