def test(): target_value = "writeback" global test_obj_dict vm = test_stub.create_basic_vm() test_obj_dict.add_vm(vm) vm.check() pre_value = conf_ops.change_global_config("kvm", "vm.cacheMode", target_value) vm_cacheMode = conf_ops.get_global_config_value("kvm", "vm.cacheMode") if vm_cacheMode != target_value: test_util.test_fail( 'change value of vm.cacheMode failed. target value is %s, real value is %s' % (target_value, vm_cacheMode)) #set back to defualt conf_ops.change_global_config("kvm", "vm.cacheMode", pre_value) vm_cacheMode = conf_ops.get_global_config_value("kvm", "vm.cacheMode") if vm_cacheMode != pre_value: test_util.test_fail('Reset vm.cacheMode Value to Default Fail.') vm.destroy() test_util.test_pass('vm.cacheMode Change Pass.')
def test(): #Enable twofa and check login password = '******' session_uuid = acc_ops.login_as_admin() twofa_enabled = conf_ops.get_global_config_value('twofa', 'twofa.enable') if twofa_enabled == 'false': conf_ops.change_global_config('twofa', 'twofa.enable', 'true') twofa = acc_ops.get_twofa_auth_secret('admin', password, session_uuid = session_uuid) secret = twofa.secret twofa_status = twofa.status if twofa_status != 'NewCreated': test_util.test_fail("The twofa auth secret statue should be 'NewCreated' but it's %s" %twofa_status) security_code = test_stub.get_security_code(secret) session1_uuid = acc_ops.login_by_account('admin', password, system_tags=['twofatoken::%s' %security_code]) if session1_uuid != None: test_util.test_logger("Enable twofa and login with security code passed") twofa_status = acc_ops.get_twofa_auth_secret('admin', password, session_uuid = session_uuid).status if twofa_status != 'Logined': test_util.test_fail("The twofa auth secret statue should be 'Logined' but it's %s" %twofa_status) #Disable twofa and check login again conf_ops.change_global_config('twofa', 'twofa.enable', 'false', session_uuid = session_uuid) session2_uuid = acc_ops.login_as_admin() if session2_uuid != None: test_util.test_pass("Disable twofa and login without security code passed") test_util.test_fail("Fail to login without security code after twofa disabled")
def test(): if conf_ops.get_global_config_value('vm', 'deletionPolicy') == 'Direct': test_util.test_skip('vm delete_policy is Direct, skip test.') return test_util.test_dsc('Create original vm') vm = test_stub.create_vm(vm_name='basic-test-vm') test_obj_dict.add_vm(vm) root_volume_uuid = test_lib.lib_get_root_volume_uuid(vm.get_vm()) test_util.test_dsc('Stop vm before create snapshot.') vm.stop() test_util.test_dsc('create snapshot and check') snapshots = test_obj_dict.get_volume_snapshot(root_volume_uuid) snapshots.set_utility_vm(vm) snapshots.create_snapshot('create_root_snapshot1') test_util.test_dsc('start vm') vm.destroy() vm.recover() vm.start() vm.destroy() vm.expunge() test_obj_dict.rm_vm(vm) test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass( 'Do snapshot ops on VM root volume with VM ops successfully')
def test(): test_util.test_dsc('Test storage capacity when using expunge vm') if conf_ops.get_global_config_value('vm', 'deletionPolicy') != 'Delay' : test_util.test_skip('vm delete_policy is not Delay, skip test.') return zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit = 1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.' ) return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit = 1) if not ps: test_util.test_skip('No Enabled/Connected primary storage was found, skip test.' ) return True host = host[0] ps = ps[0] host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap = host_res.availableCapacity vm = test_stub.create_vm(vm_name = 'basic-test-vm', host_uuid = host.uuid) test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() host_res2 = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap2 = host_res.availableCapacity if avail_cap != avail_cap2: test_util.test_fail('PS capacity is not same after create/expunge vm on host: %s. Capacity before create vm: %s, after expunge vm: %s ' % (host.uuid, avail_cap, avail_cap2)) test_util.test_pass('Expunge VM Test Success')
def test(): global vm global origin_value imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', 'SharedMountPoint') pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required %s ps to test' % (ps_type)) ps_uuid = pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) vm.stop() ha_ops.set_vm_instance_ha_level(vm.get_vm().uuid,'NeverStop') cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid) for i in range(5): time.sleep(1) try: if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: break except: test_util.test_logger('Retry until VM change to running') if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state != vm_header.RUNNING: test_util.test_fail('set HA after stopped VM test fail') no_exception = True try: config_ops.change_global_config('ha','host.check.successRatio', -1) no_exception = True except: test_util.test_logger('Expected exception') no_exception = False if no_exception: test_util.test_fail('Expect exception while there is none') origin_value = config_ops.change_global_config('ha','neverStopVm.scan.interval', '30') config_ops.change_global_config('ha','enable', 'false') vm.stop() cond = res_ops.gen_query_conditions('uuid', '=', vm.get_vm().uuid) for i in range(int(config_ops.get_global_config_value('ha','neverStopVm.scan.interval'))): time.sleep(1) try: if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state != vm_header.STOPPED: break except: test_util.test_logger('Retry until VM change to running') if res_ops.query_resource(res_ops.VM_INSTANCE, cond)[0].state == vm_header.RUNNING: test_util.test_fail('disable HA after stopped VM test fail') test_util.test_pass('set HA global config pass')
def test(): if conf_ops.get_global_config_value('vm', 'deletionPolicy') == 'Direct': test_util.test_skip('vm delete_policy is Direct, skip test.') return vm = test_stub.create_vm(vm_name='basic-test-vm') test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() test_util.test_pass('Expunge VM Test Success')
def test(): if conf_ops.get_global_config_value('vm', 'deletionPolicy') == 'Direct' : test_util.test_skip('vm delete_policy is Direct, skip test.') return vm = test_stub.create_vm(vm_name = 'basic-test-vm') test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() test_util.test_pass('Expunge VM Test Success')
def test(): global test_obj_dict global Port global ct_original # close ip_spoofing; keep the original value, used to restore the original state if con_ops.get_global_config_value('vm', 'cleanTraffic') == 'false' : ct_original ='false' else: ct_original ='true' con_ops.change_global_config('vm', 'cleanTraffic', 'false')
def test(): global test_obj_dict global test_file_src global test_file_des global ct_original if con_ops.get_global_config_value('vm', 'cleanTraffic') == 'false': ct_original = 'false' con_ops.change_global_config('vm', 'cleanTraffic', 'true') else: ct_original = 'true' vm = test_stub.create_basic_vm() test_obj_dict.add_vm(vm) vm.check() vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip new_vm_ip = '172.20.1.1' if new_vm_ip == vm_ip: new_vm_ip = '172.20.1.2' test_util.test_dsc("Prepare Test File") cmd = "cp %s %s" % (test_file_src, test_file_des) os.system(cmd) cmd = "sed -i \"s/TemplateNodeIP/%s/g\" %s" % (node_ip, test_file_des) os.system(cmd) cmd = "sed -i \"s/TemplateOriginalIP/%s/g\" %s" % (vm_ip, test_file_des) os.system(cmd) cmd = "sed -i \"s/TemplateTestIP/%s/g\" %s" % (new_vm_ip, test_file_des) os.system(cmd) target_file = "/home/change_ip_test.sh" test_stub.scp_file_to_vm(vm_inv, test_file_des, target_file) cmd = "chmod +x %s" % target_file rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', target_file, 180) time.sleep(60) cmd = "cat /home/ip_spoofing_result" rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp[0] != "1": test_util.test_fail(rsp) vm.destroy() test_obj_dict.rm_vm(vm) con_ops.change_global_config('vm', 'cleanTraffic', ct_original) os.system('rm -f %s' % test_file_des) test_util.test_pass('IP Spoofing Test Success')
def test(): global test_obj_dict global test_file_src global test_file_des global ct_original if con_ops.get_global_config_value('vm', 'cleanTraffic') == 'false' : ct_original='false' con_ops.change_global_config('vm', 'cleanTraffic', 'true') else: ct_original='true' vm = test_stub.create_basic_vm() test_obj_dict.add_vm(vm) vm.check() vm_inv = vm.get_vm() vm_ip = vm_inv.vmNics[0].ip new_vm_ip = '172.20.1.1' if new_vm_ip == vm_ip: new_vm_ip = '172.20.1.2' test_util.test_dsc("Prepare Test File") cmd = "cp %s %s" % (test_file_src, test_file_des) os.system(cmd) cmd = "sed -i \"s/TemplateNodeIP/%s/g\" %s" % (node_ip, test_file_des) os.system(cmd) cmd = "sed -i \"s/TemplateOriginalIP/%s/g\" %s" % (vm_ip, test_file_des) os.system(cmd) cmd = "sed -i \"s/TemplateTestIP/%s/g\" %s" % (new_vm_ip, test_file_des) os.system(cmd) target_file = "/home/change_ip_test.sh" test_stub.scp_file_to_vm(vm_inv, test_file_des, target_file) cmd = "chmod +x %s" % target_file rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', target_file, 180) time.sleep(60) cmd = "cat /home/ip_spoofing_result" rsp = test_lib.lib_execute_ssh_cmd(vm_ip, 'root', 'password', cmd, 180) if rsp[0] != "1": test_util.test_fail(rsp) vm.destroy() test_obj_dict.rm_vm(vm) con_ops.change_global_config('vm', 'cleanTraffic', ct_original) os.system('rm -f %s' % test_file_des) test_util.test_pass('IP Spoofing Test Success')
def test(): target_value = "writeback" global test_obj_dict vm = test_stub.create_basic_vm() test_obj_dict.add_vm(vm) vm.check() pre_value = conf_ops.change_global_config("kvm", "vm.cacheMode", target_value) vm_cacheMode = conf_ops.get_global_config_value("kvm", "vm.cacheMode") if vm_cacheMode != target_value: test_util.test_fail('change value of vm.cacheMode failed. target value is %s, real value is %s' %(target_value, vm_cacheMode)) #set back to defualt conf_ops.change_global_config("kvm", "vm.cacheMode", pre_value) vm_cacheMode = conf_ops.get_global_config_value("kvm", "vm.cacheMode") if vm_cacheMode != pre_value: test_util.test_fail('Reset vm.cacheMode Value to Default Fail.') vm.destroy() test_util.test_pass('vm.cacheMode Change Pass.')
def check(self): ''' Will check snapshot tree correctness To be noticed. The tree depth changing will impact the snapshots who have been created. So if the snapshots are created before incrementalSnapshot.maxNum is changed. The checker results will be untrustable. ''' import json import zstacklib.utils.jsonobject as jsonobject super(zstack_kvm_snapshot_tree_checker, self).check() snapshots = self.test_obj.get_snapshot_list() if not self.test_obj.get_snapshot_head(): test_util.test_logger('Snapshot is not created, skipped checking') return self.judge(self.exp_result) utility_vm = self.test_obj.get_utility_vm() vm_inv = utility_vm.get_vm() volume_obj = self.test_obj.get_target_volume() volume_uuid = volume_obj.get_volume().uuid if volume_obj.get_state() == vl_header.DELETED or \ (volume_obj.get_volume().type == 'Root' and \ volume_obj.get_target_vm().get_state() == vm_header.DESTROYED): test_util.test_logger( 'Checker result: target volume is deleted, can not get get and check snapshot tree status' ) return self.judge(self.exp_result) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] tree_max_depth = find_tree_max_depth(tree) if tree_max_depth > (int(tree_allowed_depth) + 1): test_util.test_logger(\ 'Checker result: volume: %s snapshot tree: %s depth checking failure. The max \ allowed depth is : %s. But we get: %s' % (volume_uuid, tree['inventory'].uuid, \ tree_allowed_depth, str(tree_max_depth - 1))) return self.judge(False) test_util.test_logger(\ 'Checker result: volume: %s snapshot tree depth checking pass. The max allowed \ depth is : %s. The real snapshot max depth is: %s' % \ (volume_uuid, tree_allowed_depth, str(tree_max_depth - 1))) return self.judge(True)
def check(self): ''' Will check snapshot tree correctness To be noticed. The tree depth changing will impact the snapshots who have been created. So if the snapshots are created before incrementalSnapshot.maxNum is changed. The checker results will be untrustable. ''' import json import zstacklib.utils.jsonobject as jsonobject super(zstack_kvm_snapshot_tree_checker, self).check() snapshots = self.test_obj.get_snapshot_list() if not self.test_obj.get_snapshot_head(): test_util.test_logger('Snapshot is not created, skipped checking') return self.judge(self.exp_result) utility_vm = self.test_obj.get_utility_vm() vm_inv = utility_vm.get_vm() volume_obj = self.test_obj.get_target_volume() volume_uuid = volume_obj.get_volume().uuid if volume_obj.get_state() == vl_header.DELETED or \ (volume_obj.get_volume().type == 'Root' and \ volume_obj.get_target_vm().get_state() == vm_header.DESTROYED): test_util.test_logger('Checker result: target volume is deleted, can not get get and check snapshot tree status') return self.judge(self.exp_result) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] tree_max_depth = find_tree_max_depth(tree) if tree_max_depth > (int(tree_allowed_depth) + 1): test_util.test_logger(\ 'Checker result: volume: %s snapshot tree: %s depth checking failure. The max \ allowed depth is : %s. But we get: %s' % (volume_uuid, tree['inventory'].uuid, \ tree_allowed_depth, str(tree_max_depth - 1))) return self.judge(False) test_util.test_logger(\ 'Checker result: volume: %s snapshot tree depth checking pass. The max allowed \ depth is : %s. The real snapshot max depth is: %s' % \ (volume_uuid, tree_allowed_depth, str(tree_max_depth - 1))) return self.judge(True)
def test(): test_util.test_dsc('Test storage capacity when using expunge vm') if conf_ops.get_global_config_value('vm', 'deletionPolicy') != 'Delay': test_util.test_skip('vm delete_policy is not Delay, skip test.') return zone_uuid = res_ops.query_resource(res_ops.ZONE)[0].uuid cond = res_ops.gen_query_conditions('state', '=', 'Enabled') cond = res_ops.gen_query_conditions('status', '=', 'Connected', cond) host = res_ops.query_resource_with_num(res_ops.HOST, cond, limit=1) if not host: test_util.test_skip('No Enabled/Connected host was found, skip test.') return True ps = res_ops.query_resource_with_num(res_ops.PRIMARY_STORAGE, cond, limit=1) if not ps: test_util.test_skip( 'No Enabled/Connected primary storage was found, skip test.') return True host = host[0] ps = ps[0] host_res = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap = host_res.availableCapacity vm = test_stub.create_vm(vm_name='basic-test-vm', host_uuid=host.uuid) test_obj_dict.add_vm(vm) time.sleep(1) vm.destroy() vm.expunge() host_res2 = vol_ops.get_local_storage_capacity(host.uuid, ps.uuid)[0] avail_cap2 = host_res.availableCapacity if avail_cap != avail_cap2: test_util.test_fail( 'PS capacity is not same after create/expunge vm on host: %s. Capacity before create vm: %s, after expunge vm: %s ' % (host.uuid, avail_cap, avail_cap2)) test_util.test_pass('Expunge VM Test Success')
def test(): flavor = case_flavor[os.environ.get('CASE_FLAVOR')] global agent_url global vm global live_migration ps_type = flavor['ps'] if ps_type == "Local": live_migration = config_ops.get_global_config_value('localStoragePrimaryStorage', 'liveMigrationWithStorage.allow') config_ops.change_global_config('localStoragePrimaryStorage', 'liveMigrationWithStorage.allow', 'true') imagestore = test_lib.lib_get_image_store_backup_storage() if imagestore == None: test_util.test_skip('Required imagestore to test') image_uuid = test_stub.get_image_by_bs(imagestore.uuid) cond = res_ops.gen_query_conditions('type', '=', ps_type) pss = res_ops.query_resource(res_ops.PRIMARY_STORAGE, cond) if len(pss) == 0: test_util.test_skip('Required %s ps to test' % (ps_type)) ps_uuid = pss[0].uuid vm = test_stub.create_vm(image_uuid=image_uuid, ps_uuid=ps_uuid) agent_url = flavor['agent_url'] agent_action = flavor['agent_action'] if agent_action == 1: agent_time = (24*60*60-60)*1000 elif agent_action == 2: agent_time = 360 * 1000 rsp = dep_ops.json_post("http://127.0.0.1:8888/test/api/v1.0/store/create", simplejson.dumps({"key": vm.get_vm().uuid, "value": '{"%s":%s}' % (agent_url, agent_action)})) candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm.get_vm().uuid) start = time.time() if candidate_hosts != None and test_lib.lib_check_vm_live_migration_cap(vm.get_vm()): vm_ops.migrate_vm(vm.get_vm().uuid, candidate_hosts.inventories[0].uuid) else: test_util.test_skip('Required migratable host to test') end = time.time() if end - start < agent_time / 2 / 1000: test_util.test_fail('execution time too short %s' % (end - start))
def _config_sp_depth(self): Maxdepth = conf_ops.get_global_config_value("volumeSnapshot", 'incrementalSnapshot.maxNum') return Maxdepth
def running_vm_operations(vm,bss): numa = config_ops.get_global_config_value('vm', 'numa') live_migration = config_ops.get_global_config_value('localStoragePrimaryStorage','liveMigrationWithStorage.allow') ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) vm_uuid = vm.get_vm().uuid #change vm's instanceoffering if numa == 'false': try: vm_ops.update_vm(vm_uuic,2,2048*1024*1024) test_util.test_fail('Test Fail.Cannot change instanceoffering of running vm when NUMA is false') except: config_ops.change_global_config('vm','numa','true') vm_ops.reboot_vm(vm_uuid) vm_ops.update_vm(vm_uuid,2,2048*1024*1024) #Change vm's status;set ha level/stop/del ha level/reboot/pause/resume/force stop ha_ops.set_vm_instance_ha_level(vm_uuid,'NeverStop') vm_ops.stop_vm(vm_uuid) ha_ops.del_vm_instance_ha_level(vm_uuid) vm_ops.stop_vm(vm_uuid) vm_ops.start_vm(vm_uuid) vm_ops.reboot_vm(vm_uuid) vm_ops.suspend_vm(vm_uuid) vm_ops.resume_vm(vm_uuid) vm_ops.stop_vm(vm_uuid,'cold') vm_ops.start_vm(vm_uuid) #clone vms vm_ops.clone_vm(vm_uuid,['vm-1','vm-2','vm-3'],'InstantStart') #migrate candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm_uuid) migrate_host_uuids = [] if candidate_hosts == None: pass else: for host in candidate_hosts.inventories: migrate_host_uuids.append(host.uuid) if ps.type == 'LocalStorage': if live_migration == 'false': try: vm_ops.migrate_vm(vm_uuid,migrate_host_uuids[0]) test_util.test_fail('Test Fail.Cannot migrate localstorage vm when liveMigrationWithStorage is false.' ) except: config_ops.change_global_config('localStoragePrimaryStorage','liveMigrationWithStorage.allow','true') else: vm_ops.migrate_vm(vm_uuid,migrate_host_uuids[0]) test_util.test_logger('migrate vm success') else: vm_ops.migrate_vm(vm_uuid,migrate_host_uuids[0]) test_util.test_logger('migrate vm success') #change vm's password(qga) try: vm_ops.change_vm_password(vm_uuid,'root','testpassword') test_util.test_fail('Test Fail.Cannot change vm password when qga is disabled.') except: vm_ops.set_vm_qga_enable(vm_uuid) vm_ops.change_vm_password(vm_uuid,'root','testpassword') vm_ops.set_vm_qga_disable(vm_uuid) #snapshot operations sp_option = test_util.SnapshotOption() vm_root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) root_volume_uuid = vm_root_volume_inv.uuid test_util.test_logger('rootvolumerunning:%s' % root_volume_uuid) sp_option.set_volume_uuid(root_volume_uuid) sp = vol_ops.create_snapshot(sp_option) vm_ops.stop_vm(vm_uuid) vol_ops.use_snapshot(sp.uuid) vm_ops.start_vm(vm_uuid) vol_ops.delete_snapshot(sp.uuid) common_operations(vm,bss,'running') vm_ops.destroy_vm(vm_uuid) vm_ops.recover_vm(vm_uuid) vm_ops.start_vm(vm_uuid) vm.destroy() vm.expunge()
def running_vm_operations(vm, bss): numa = config_ops.get_global_config_value('vm', 'numa') live_migration = config_ops.get_global_config_value( 'localStoragePrimaryStorage', 'liveMigrationWithStorage.allow') ps = test_lib.lib_get_primary_storage_by_vm(vm.get_vm()) vm_uuid = vm.get_vm().uuid #change vm's instanceoffering if numa == 'false': try: vm_ops.update_vm(vm_uuic, 2, 2048 * 1024 * 1024) test_util.test_fail( 'Test Fail.Cannot change instanceoffering of running vm when NUMA is false' ) except: config_ops.change_global_config('vm', 'numa', 'true') vm_ops.reboot_vm(vm_uuid) vm_ops.update_vm(vm_uuid, 2, 2048 * 1024 * 1024) #Change vm's status;set ha level/stop/del ha level/reboot/pause/resume/force stop ha_ops.set_vm_instance_ha_level(vm_uuid, 'neverstop') vm_ops.stop_vm(vm_uuid) ha_ops.del_vm_instance_ha_level(vm_uuid) vm_ops.stop_vm(vm_uuid) vm_ops.start_vm(vm_uuid) vm_ops.reboot_vm(vm_uuid) vm_ops.suspend_vm(vm_uuid) vm_ops.resume_vm(vm_uuid) vm_ops.stop_vm(vm_uuid, 'cold') vm_ops.start_vm(vm_uuid) #clone vms vm_ops.clone_vm(vm_uuid, ['vm-1', 'vm-2', 'vm-3'], 'InstantStart') #migrate candidate_hosts = vm_ops.get_vm_migration_candidate_hosts(vm_uuid) migrate_host_uuids = [] if candidate_hosts == None: pass else: for host in candidate_hosts.inventories: migrate_host_uuids.append(host.uuid) if ps.type == 'LocalStorage': if live_migration == 'false': try: vm_ops.migrate_vm(vm_uuid, migrate_host_uuids[0]) test_util.test_fail( 'Test Fail.Cannot migrate localstorage vm when liveMigrationWithStorage is false.' ) except: config_ops.change_global_config( 'localStoragePrimaryStorage', 'liveMigrationWithStorage.allow', 'true') else: vm_ops.migrate_vm(vm_uuid, migrate_host_uuids[0]) test_util.test_logger('migrate vm success') else: vm_ops.migrate_vm(vm_uuid, migrate_host_uuids[0]) test_util.test_logger('migrate vm success') #change vm's password(qga) try: vm_ops.change_vm_password(vm_uuid, 'root', 'testpassword') test_util.test_fail( 'Test Fail.Cannot change vm password when qga is disabled.') except: vm_ops.set_vm_qga_enable(vm_uuid) vm_ops.change_vm_password(vm_uuid, 'root', 'testpassword') vm_ops.set_vm_qga_disable(vm_uuid) #snapshot operations sp_option = test_util.SnapshotOption() vm_root_volume_inv = test_lib.lib_get_root_volume(vm.get_vm()) root_volume_uuid = vm_root_volume_inv.uuid test_util.test_logger('rootvolumerunning:%s' % root_volume_uuid) sp_option.set_volume_uuid(root_volume_uuid) sp = vol_ops.create_snapshot(sp_option) vm_ops.stop_vm(vm_uuid) vol_ops.use_snapshot(sp.uuid) vm_ops.start_vm(vm_uuid) vol_ops.delete_snapshot(sp.uuid) common_operations(vm, bss, 'running') vm_ops.destroy_vm(vm_uuid) vm_ops.recover_vm(vm_uuid) vm_ops.start_vm(vm_uuid) vm.destroy() vm.expunge()
def check(self): ''' Will check snapshot tree correctness To be noticed. The tree depth changing will impact the snapshots who have been created. So if the snapshots are created before incrementalSnapshot.maxNum is changed. The checker results will be untrustable. ''' import json import zstacklib.utils.jsonobject as jsonobject sp_tree_actual = [] sp_tree_zs = [] super(zstack_kvm_snapshot_tree_checker, self).check() snapshots = self.test_obj.get_snapshot_list() if not self.test_obj.get_snapshot_head(): test_util.test_logger('Snapshot is not created, skipped checking') return self.judge(self.exp_result) #utility_vm = self.test_obj.get_utility_vm() #vm_inv = utility_vm.get_vm() volume_obj = self.test_obj.get_target_volume() volume_uuid = volume_obj.get_volume().uuid #volume_installPath = volume_obj.get_volume().installPath #if not volume_installPath: # test_util.test_logger('Check result: [installPath] is Null for [volume uuid: ] %s. Can not check volume file existence' % volume.uuid) # return self.judge(self.exp_result) if volume_obj.get_state() == vl_header.DELETED: test_util.test_logger('Checker result: target volume is deleted, can not get get and check snapshot tree status') return self.judge(self.exp_result) if volume_obj.get_target_vm(): if volume_obj.get_volume().type == 'Root' and volume_obj.get_target_vm().get_state() == vm_header.DESTROYED: test_util.test_logger('Checker result: target volume is deleted, can not get get and check snapshot tree status') return self.judge(self.exp_result) ps_uuid = volume_obj.get_volume().primaryStorageUuid ps = test_lib.lib_get_primary_storage_by_uuid(ps_uuid) # Only Ceph has raw image format for non-Root volume if ps.type == inventory.CEPH_PRIMARY_STORAGE_TYPE: for snapshot in snapshots: backing_list = [] backing_file = '' sp_covered = 0 activate_host = '' devPath = snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8').split("ceph://")[1] volumePath = snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8').split("ceph://")[1].split("@")[0] for i in sp_tree_actual: if devPath in i: test_util.test_logger('%s already in sp list %s' % (devPath, backing_list)) sp_covered = 1 if sp_covered == 1: continue else: test_util.test_logger('%s not in current sp list, start checking its backing chain' % (devPath)) backing_list.append(devPath) cmd_info = "rbd info %s" % devPath for host in test_lib.lib_find_hosts_by_ps_uuid(ps_uuid): result = test_lib.lib_execute_ssh_cmd(host.managementIp, 'root', 'password', cmd_info) if result: activate_host = host.managementIp break if not activate_host: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) while True: cmd_info = "rbd info %s" % devPath result = test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_info) if result: tmp_list = get_snaps_for_raw_by_ip(volumePath, activate_host) else: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) if tmp_list: for i in tmp_list: i = i.replace("\n", "") if i == snapshot.get_snapshot().primaryStorageInstallPath.split("ceph://")[1].split("@")[1]: test_util.test_logger('%s is found for volume %s' % (devPath, volumePath)) sp_covered = 1 elif not tmp_list: test_util.test_logger('No snapshots found for volume %s' % (volumePath)) return self.judge(False) #backing_file = backing_file.replace("\n", "") if sp_covered == 1: break else: test_util.test_logger('%s is not found for volume %s' % (devPath, volumePath)) return self.judge(False) sp_covered = 0 #backing_list = list(reversed(backing_list)) if not sp_tree_actual: test_util.test_logger('current sp list is empty, add %s into it' % (backing_list)) sp_tree_actual.append(backing_list) continue for i in sp_tree_actual: if backing_list == i: sp_covered = 1 if sp_covered == 1: test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) continue else: test_util.test_logger('%s not in current sp list %s, start comparing detailed list items' % (backing_list, sp_tree_actual)) for i in sp_tree_actual: count = min(len(backing_list), len(i)) - 1 tmp_count = 0 while tmp_count <= count: if backing_list[tmp_count] == i[tmp_count]: tmp_count += 1 sp_covered = 1 continue elif backing_list[tmp_count] != i[tmp_count]: sp_covered = 0 break if sp_covered == 0: if i == sp_tree_actual[-1]: test_util.test_logger('%s not in current sp list %s, add it into sp list' % (backing_list, sp_tree_actual)) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) > len(i): test_util.test_logger('%s is the superset of the list %s in current sp list %s, update current sp list' % (backing_list, i, sp_tree_actual)) sp_tree_actual.remove(i) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) <= len(i): test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) break test_util.test_logger('sp_tree_actual is %s' % (sp_tree_actual)) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] for leaf_node in get_leaf_nodes(tree): backing_list = [] backing_file = '' current_node = '' backing_file = leaf_node['inventory']['primaryStorageInstallPath'].split("ceph://")[1] backing_list.append(backing_file.encode('utf-8')) current_node = leaf_node while True: parent_node = get_parent_node(tree, current_node) if not parent_node: break backing_file = parent_node['inventory']['primaryStorageInstallPath'].split("ceph://")[1] backing_list.append(backing_file.encode('utf-8')) if parent_node.has_key('parentUuid'): current_node = parent_node continue else: break backing_list = list(reversed(backing_list)) sp_tree_zs.append(backing_list) test_util.test_logger('sp_tree_zs is %s' % (sp_tree_zs)) test_util.test_logger('compare the 2 sp lists - %s and %s' % (sp_tree_actual, sp_tree_zs)) sp_covered = 0 if len(sp_tree_actual) != len(sp_tree_zs): test_util.test_logger('%s is not same length as %s' % (sp_tree_actual, sp_tree_zs)) return self.judge(False) for i in sp_tree_actual: if i in sp_tree_zs: sp_covered = 1 test_util.test_logger('%s is in zs sp list %s' % (i, sp_tree_zs)) if i == sp_tree_actual[-1]: test_util.test_logger('all the items in %s are in zs sp list %s' % (sp_tree_actual, sp_tree_zs)) continue elif i not in sp_tree_zs: sp_covered = 0 test_util.test_logger('%s is not in zs sp list %s' % (i, sp_tree_zs)) return self.judge(False) elif ps.type == 'SharedBlock': for snapshot in snapshots: backing_list = [] backing_file = '' sp_covered = 0 devPath = "/dev/" + snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8').split("sharedblock://")[1] for i in sp_tree_actual: if devPath in i: test_util.test_logger('%s already in sp list %s' % (devPath, backing_list)) sp_covered = 1 if sp_covered == 1: continue else: test_util.test_logger('%s not in current sp list, start checking its backing chain' % (devPath)) backing_list.append(devPath) while True: activate_host = '' image_cached = 0 cmd_info = "lvs --nolocking --noheadings %s | awk '{print $3}'" % devPath cmd_activate = "lvchange -a y %s" % devPath cmd_unactivate = "lvchange -a n %s" % devPath for host in test_lib.lib_find_hosts_by_ps_uuid(ps_uuid): result = test_lib.lib_execute_ssh_cmd(host.managementIp, 'root', 'password', cmd_info) if "-a-" in result or "-ao-" in result: activate_host = host.managementIp backing_file = get_qcow_backing_file_by_ip(devPath, activate_host) break if not activate_host: activate_host = test_lib.lib_find_hosts_by_ps_uuid(ps_uuid)[0].managementIp test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_activate) backing_file = get_qcow_backing_file_by_ip(devPath, activate_host) test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_unactivate) backing_file = backing_file.replace("\n", "") if volume_obj.get_volume().type == 'Root': for image in test_lib.lib_get_not_vr_images(): if image.uuid == backing_file.split("/")[3]: test_util.test_logger('%s is against the Root volume and %s is the last snapshot and its backing file %s is image cache' % (snapshot, devPath, backing_file)) image_cached = 1 if image_cached == 1: break if backing_file: if len(backing_file.split("/")[3]) == 40: test_util.test_logger('%s is against the Data volume and %s is the last snapshot and its backing file %s is image cache from bs' % (snapshot, devPath, backing_file)) break else: backing_list.append(backing_file) devPath = backing_file else: break #if not backing_file: # if volume_obj.get_volume().type == 'Root': # test_util.test_logger('%s is against the Root volume, need to pop up the image cache %s' % (snapshot, devPath)) # backing_list.pop() # break #else: # backing_list.append(backing_file) # devPath = backing_file backing_list = list(reversed(backing_list)) if not sp_tree_actual: test_util.test_logger('current sp list is empty, add %s into it' % (backing_list)) sp_tree_actual.append(backing_list) continue for i in sp_tree_actual: if backing_list == i: sp_covered = 1 if sp_covered == 1: test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) continue else: test_util.test_logger('%s not in current sp list %s, start comparing detailed list items' % (backing_list, sp_tree_actual)) for i in sp_tree_actual: count = min(len(backing_list), len(i)) - 1 tmp_count = 0 while tmp_count <= count: if backing_list[tmp_count] == i[tmp_count]: tmp_count += 1 sp_covered = 1 continue elif backing_list[tmp_count] != i[tmp_count]: sp_covered = 0 break if sp_covered == 0: if i == sp_tree_actual[-1]: test_util.test_logger('%s not in current sp list %s, add it into sp list' % (backing_list, sp_tree_actual)) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) > len(i): test_util.test_logger('%s is the superset of the list %s in current sp list %s, update current sp list' % (backing_list, i, sp_tree_actual)) sp_tree_actual.remove(i) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) <= len(i): test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) break test_util.test_logger('sp_tree_actual is %s' % (sp_tree_actual)) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] for leaf_node in get_leaf_nodes(tree): backing_list = [] backing_file = '' current_node = '' backing_file = "/dev/" + leaf_node['inventory']['primaryStorageInstallPath'].split("sharedblock://")[1] backing_list.append(backing_file.encode('utf-8')) current_node = leaf_node while True: parent_node = get_parent_node(tree, current_node) if not parent_node: break backing_file = "/dev/" + parent_node['inventory']['primaryStorageInstallPath'].split("sharedblock://")[1] backing_list.append(backing_file.encode('utf-8')) if parent_node.has_key('parentUuid'): current_node = parent_node continue else: break backing_list = list(reversed(backing_list)) sp_tree_zs.append(backing_list) test_util.test_logger('sp_tree_zs is %s' % (sp_tree_zs)) test_util.test_logger('compare the 2 sp lists - %s and %s' % (sp_tree_actual, sp_tree_zs)) sp_covered = 0 if len(sp_tree_actual) != len(sp_tree_zs): test_util.test_logger('%s is not same length as %s' % (sp_tree_actual, sp_tree_zs)) return self.judge(False) for i in sp_tree_actual: if i in sp_tree_zs: sp_covered = 1 test_util.test_logger('%s is in zs sp list %s' % (i, sp_tree_zs)) if i == sp_tree_actual[-1]: test_util.test_logger('all the items in %s are in zs sp list %s' % (sp_tree_actual, sp_tree_zs)) continue elif i not in sp_tree_zs: sp_covered = 0 test_util.test_logger('%s is not in zs sp list %s' % (i, sp_tree_zs)) return self.judge(False) tree_max_depth = find_tree_max_depth(tree) if tree_max_depth > (int(tree_allowed_depth) + 1): test_util.test_logger(\ 'Checker result: volume: %s snapshot tree: %s depth checking failure. The max \ allowed depth is : %s. But we get: %s' % (volume_uuid, tree['inventory'].uuid, \ tree_allowed_depth, str(tree_max_depth - 1))) return self.judge(False) test_util.test_logger(\ 'Checker result: volume: %s snapshot tree depth checking pass. The max allowed \ depth is : %s. The real snapshot max depth is: %s' % \ (volume_uuid, tree_allowed_depth, str(tree_max_depth - 1))) elif ps.type == "LocalStorage": for snapshot in snapshots: backing_list = [] backing_file = '' sp_covered = 0 activate_host = '' devPath = snapshot.get_snapshot().primaryStorageInstallPath.encode('utf-8') for i in sp_tree_actual: if devPath in i: test_util.test_logger('%s already in sp list %s' % (devPath, backing_list)) sp_covered = 1 if sp_covered == '1': continue else: test_util.test_logger('%s not in current sp list, start checking its backing chain' % (devPath)) backing_list.append(devPath) cmd_info = "ls %s" % devPath for host in test_lib.lib_find_hosts_by_ps_uuid(ps_uuid): result = test_lib.lib_execute_ssh_cmd(host.managementIp, 'root', 'password', cmd_info) if result: activate_host = host.managementIp break if not activate_host: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) while True: cmd_info = "ls %s" % devPath image_cache = 0 result = test_lib.lib_execute_ssh_cmd(activate_host, 'root', 'password', cmd_info) if result: backing_file = get_qcow_backing_file_by_ip(devPath, activate_host) else: test_util.test_logger('No activate host found for %s' % (snapshot)) return self.judge(self.exp_result) backing_file = backing_file.replace("\n", "") if volume_obj.get_volume().type == 'Root': for image in test_lib.lib_get_not_vr_images(): if image.uuid in backing_file.split("/")[-1]: test_util.test_logger('%s is against the Root volume and %s is the last snapshot and its backing file %s is image cache' % (snapshot, devPath, backing_file)) image_cached = 1 if image_cached == 1: break if backing_file: if len(backing_file.split("/")[-1].split(".")[0]) == 40: test_util.test_logger('%s is against the Data volume and %s is the last snapshot and its backing file %s is image cache from bs' % (snapshot, devPath, backing_file)) break else: backing_list.append(backing_file) devPath = backing_file else: break #if not backing_file: # if volume_obj.get_volume().type == 'Root': # test_util.test_logger('%s is against the Root volume, need to pop up the image cache %s' % (snapshot, devPath)) # backing_list.pop() # break #else: # backing_list.append(backing_file) # devPath = backing_file backing_list = list(reversed(backing_list)) if not sp_tree_actual: test_util.test_logger('current sp list is empty, add %s into it' % (backing_list)) sp_tree_actual.append(backing_list) continue for i in sp_tree_actual: if backing_list == i: sp_covered = 1 if sp_covered == '1': test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) continue else: test_util.test_logger('%s not in current sp list %s, start comparing detailed list items' % (backing_list, sp_tree_actual)) for i in sp_tree_actual: count = min(len(backing_list), len(i)) - 1 tmp_count = 0 while tmp_count <= count: if backing_list[tmp_count] == i[tmp_count]: tmp_count += 1 sp_covered = 1 continue elif backing_list[tmp_count] != i[tmp_count]: sp_covered = 0 break if sp_covered == 0: if i == sp_tree_actual[-1]: test_util.test_logger('%s not in current sp list %s, add it into sp list' % (backing_list, sp_tree_actual)) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) > len(i): test_util.test_logger('%s is the superset of the list %s in current sp list %s, update current sp list' % (backing_list, i, sp_tree_actual)) sp_tree_actual.remove(i) sp_tree_actual.append(backing_list) break elif sp_covered == 1 and len(backing_list) <= len(i): test_util.test_logger('%s already in current sp list %s, no need to add it anymore' % (backing_list, sp_tree_actual)) break test_util.test_logger('sp_tree_actual is %s' % (sp_tree_actual)) vol_trees = test_lib.lib_get_volume_snapshot_tree(volume_uuid) tree_allowed_depth = cfg_ops.get_global_config_value('volumeSnapshot', \ 'incrementalSnapshot.maxNum') for vol_tree in vol_trees: tree = json.loads(jsonobject.dumps(vol_tree))['tree'] for leaf_node in get_leaf_nodes(tree): backing_list = [] backing_file = '' current_node = '' backing_file = leaf_node['inventory']['primaryStorageInstallPath'] backing_list.append(backing_file.encode('utf-8')) current_node = leaf_node while True: parent_node = get_parent_node(tree, current_node) if not parent_node: break backing_file = parent_node['inventory']['primaryStorageInstallPath'] backing_list.append(backing_file.encode('utf-8')) if parent_node.has_key('parentUuid'): current_node = parent_node continue else: break backing_list = list(reversed(backing_list)) sp_tree_zs.append(backing_list) test_util.test_logger('sp_tree_zs is %s' % (sp_tree_zs)) test_util.test_logger('compare the 2 sp lists - %s and %s' % (sp_tree_actual, sp_tree_zs)) sp_covered = 0 if len(sp_tree_actual) != len(sp_tree_zs): test_util.test_logger('%s is not same length as %s' % (sp_tree_actual, sp_tree_zs)) return self.judge(False) for i in sp_tree_actual: if i in sp_tree_zs: sp_covered = 1 test_util.test_logger('%s is in zs sp list %s' % (i, sp_tree_zs)) if i == sp_tree_actual[-1]: test_util.test_logger('all the items in %s are in zs sp list %s' % (sp_tree_actual, sp_tree_zs)) continue elif i not in sp_tree_zs: sp_covered = 0 test_util.test_logger('%s is not in zs sp list %s' % (i, sp_tree_zs)) return self.judge(False) return self.judge(True)