def _create_checking_file(self): #make fs for volume, if it doesn't exist if not self.parent and not self.child_list: test_lib.lib_mkfs_for_volume(self.target_volume.get_volume().uuid, \ self.utility_vm.get_vm()) import tempfile with tempfile.NamedTemporaryFile() as script: script.write(''' device=`fdisk -l|grep Disk|tail -2|head -1|awk '{print $2}'|awk -F: '{print $1}'`1 mkdir -p %s mount $device %s mkdir -p %s touch %s/%s umount %s ''' % (test_lib.WOODPECKER_MOUNT_POINT, \ test_lib.WOODPECKER_MOUNT_POINT, \ checking_point_folder, checking_point_folder, \ self.checking_point, test_lib.WOODPECKER_MOUNT_POINT)) script.flush() test_lib.lib_execute_shell_script_in_vm(self.utility_vm.get_vm(), script.name) if self.parent: test_util.test_logger('[snapshot:] %s checking file: %s is created.\ Its [parent:] %s' % \ (self.snapshot_option.get_name(), \ self.checking_point, self.parent.get_snapshot().uuid)) else: test_util.test_logger( '[snapshot:] %s checking file: %s is created.' % (self.snapshot_option.get_name(), self.checking_point))
def test(): global default_l3_mtu test_util.test_dsc('Create test vm and check. VR has DNS SNAT EIP PF and DHCP services') l3_name = os.environ.get('l3PublicNetworkName') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid default_l3_mtu = net_ops.get_l3_mtu(l3_net_uuid) vm = test_stub.create_vlan_vm(l3_name) test_obj_dict.add_vm(vm) vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('tracepath -n yyk.net | tail -1 | grep "pmtu %s"' % (default_l3_mtu)) script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to check mtu in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) net_ops.set_l3_mtu(l3_net_uuid, 1200) vm.reboot() vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('tracepath -n yyk.net | tail -1 | grep "pmtu 1200"') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to check mtu in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) vm.destroy() test_util.test_pass('Create VirtualRouter VM DNS DHCP SANT EIP PF Test Success')
def _create_checking_file(self): #make fs for volume, if it doesn't exist if not self.parent and not self.child_list: test_lib.lib_mkfs_for_volume(self.target_volume.get_volume().uuid, \ self.utility_vm.get_vm()) import tempfile with tempfile.NamedTemporaryFile() as script: script.write(''' device=/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`1 mkdir -p %s mount $device %s mkdir -p %s touch %s/%s umount %s ''' % (test_lib.WOODPECKER_MOUNT_POINT, \ test_lib.WOODPECKER_MOUNT_POINT, \ checking_point_folder, checking_point_folder, \ self.checking_point, test_lib.WOODPECKER_MOUNT_POINT)) script.flush() test_lib.lib_execute_shell_script_in_vm(self.utility_vm.get_vm(), script.name) if self.parent: test_util.test_logger('[snapshot:] %s checking file: %s is created.\ Its [parent:] %s' % \ (self.snapshot_option.get_name(), \ self.checking_point, self.parent.get_snapshot().uuid)) else: test_util.test_logger('[snapshot:] %s checking file: %s is created.'% (self.snapshot_option.get_name(), self.checking_point))
def mount_disk_in_vm(self): import tempfile script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('''device="/dev/`ls -ltr --file-type /dev | awk '$4~/disk/ {print $NF}' | grep -v '[[:digit:]]'| sort | tail -1`" \n mount ${device}1 /mnt''') script_file.close() test_lib.lib_execute_shell_script_in_vm(self.vm.vm, script_file.name) return self
def test(): global vms global images global threads global checker_threads global origin_interval global bs_type test_util.test_dsc('Create test vm and check') script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('dd if=/dev/zero of=/home/dd bs=1M count=100') script_file.close() for i in range(0, threads_num): vms[i] = test_stub.create_vlan_vm() vms[i].check() backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm( vms[i].vm) if backup_storage_list[0].type != 'ImageStoreBackupStorage': test_util.test_skip("Requires imagestore BS to test, skip testing") if not test_lib.lib_execute_shell_script_in_vm(vms[i].get_vm(), script_file.name): test_util.test_fail("fail to create data in [vm:] %s" % (vms[i].get_vm().uuid)) test_obj_dict.add_vm(vms[i]) vms[i].stop() os.unlink(script_file.name) for i in range(0, threads_num): threads[i] = threading.Thread(target=create_temp_image, args=(i, )) threads[i].start() for i in range(0, threads_num): checker_threads[i] = threading.Thread( target=check_create_temp_image_progress, args=(i, )) checker_threads[i].start() for i in range(0, threads_num): checker_threads[i].join() threads[i].join() images[i].check() vms[i].destroy() images[i].delete() if bs_type == 'Ceph': time.sleep(60) if bs_type == 'Ceph': conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', origin_interval) test_util.test_pass('Create Image Template Test Success')
def test(): global vms global images global threads global checker_threads global origin_interval global bs_type test_util.test_dsc('Create test vm and check') script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('dd if=/dev/zero of=/home/dd bs=1M count=100') script_file.close() for i in range(0, threads_num): vms[i] = test_stub.create_vlan_vm() vms[i].check() backup_storage_list = test_lib.lib_get_backup_storage_list_by_vm(vms[i].vm) if backup_storage_list[0].type != 'ImageStoreBackupStorage': test_util.test_skip("Requires imagestore BS to test, skip testing") if not test_lib.lib_execute_shell_script_in_vm(vms[i].get_vm(), script_file.name): test_util.test_fail("fail to create data in [vm:] %s" % (vms[i].get_vm().uuid)) test_obj_dict.add_vm(vms[i]) vms[i].stop() os.unlink(script_file.name) for i in range(0, threads_num): threads[i] = threading.Thread(target=create_temp_image, args=(i, )) threads[i].start() for i in range(0, threads_num): checker_threads[i] = threading.Thread(target=check_create_temp_image_progress, args=(i, )) checker_threads[i].start() for i in range(0, threads_num): checker_threads[i].join() threads[i].join() images[i].check() vms[i].destroy() images[i].delete() for i in range(0, threads_num): if checker_results[i] == None: test_util.test_fail("Image checker thread %s fail" % (i)) if bs_type == 'Ceph': time.sleep(60) if bs_type == 'Ceph': conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', origin_interval) test_util.test_pass('Create Image Template Test Success')
def test(): global default_l3_mtu test_util.test_dsc( 'Create test vm and check. VR has DNS SNAT EIP PF and DHCP services') l3_name = os.environ.get('l3PublicNetworkName') l3_net_uuid = test_lib.lib_get_l3_by_name(l3_name).uuid default_l3_mtu = net_ops.get_l3_mtu(l3_net_uuid) vm = test_stub.create_vlan_vm(l3_name) test_obj_dict.add_vm(vm) vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('tracepath -n yyk.net | tail -1 | grep "pmtu %s"' % (default_l3_mtu)) script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to check mtu in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) net_ops.set_l3_mtu(l3_net_uuid, 1200) vm.reboot() vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('tracepath -n yyk.net | tail -1 | grep "pmtu 1200"') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to check mtu in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) vm.destroy() test_util.test_pass( 'Create VirtualRouter VM DNS DHCP SANT EIP PF Test Success')
def attach_mount_volume(volume, vm, mount_point): volume.attach(vm) import tempfile script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write(''' mkdir -p %s device="/dev/`ls -ltr --file-type /dev | awk '$4~/disk/ {print $NF}' | grep -v '[[:digit:]]' | tail -1`" mount ${device}1 %s ''' % (mount_point, mount_point)) script_file.close() vm_inv = vm.get_vm() if not test_lib.lib_execute_shell_script_in_vm(vm_inv, script_file.name): test_util.test_fail("mount operation failed in [volume:] %s in [vm:] %s" % (volume.get_volume().uuid, vm_inv.uuid)) os.unlink(script_file.name)
def attach_mount_volume(volume, vm, mount_point): volume.attach(vm) import tempfile script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write(''' mkdir -p %s device="/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`" mount ${device}1 %s ''' % (mount_point, mount_point)) script_file.close() vm_inv = vm.get_vm() if not test_lib.lib_execute_shell_script_in_vm(vm_inv, script_file.name): test_util.test_fail("mount operation failed in [volume:] %s in [vm:] %s" % (volume.get_volume().uuid, vm_inv.uuid)) os.unlink(script_file.name)
def _remove_checking_file(self): import tempfile with tempfile.NamedTemporaryFile() as script: script.write(''' device=/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`1 mkdir -p %s mount $device %s || exit 1 /bin/rm -rf %s umount %s ''' % (test_lib.WOODPECKER_MOUNT_POINT, \ test_lib.WOODPECKER_MOUNT_POINT, \ checking_point_folder, \ test_lib.WOODPECKER_MOUNT_POINT)) script.flush() if not test_lib.lib_execute_shell_script_in_vm(\ self.utility_vm.get_vm(), script.name): test_util.test_logger('cleanup checking point failed. It might be because there is not any partition in the target volume. It is harmless.')
def test(): test_util.test_dsc('Create test vm with EIP and check.') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) pri_l3_name = os.environ.get('l3VlanNetworkName1') pri_l3_uuid = test_lib.lib_get_l3_by_name(pri_l3_name).uuid pub_l3_name = os.environ.get('l3PublicNetworkName') pub_l3_uuid = test_lib.lib_get_l3_by_name(pub_l3_name).uuid vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid vip = test_stub.create_vip('create_eip_test', pub_l3_uuid) test_obj_dict.add_vip(vip) eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm) vip.attach_eip(eip) vm.check() if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('ping -c 4 223.5.5.5') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to tracepath in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write(''' while [ 1 -eq 1 ]; do route -n | grep 0.0.0.0 if [ $? -ne 0 ]; then pkill dhclient dhclient fi sleep 40 done ''') script_file.close() try: test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name, timeout=2) except: test_util.test_logger('ignore') os.unlink(script_file.name) net_ops.detach_l3(vm_nic_uuid) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail( 'expected to not be able to ping vip while it success') #vm.stop() net_ops.attach_l3(pri_l3_uuid, vm.get_vm().uuid) #vm.start() vm.check() vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid net_ops.attach_eip(eip.get_eip().uuid, vm_nic_uuid) vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('ping -c 4 223.5.5.5') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to tracepath in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') vm.destroy() test_obj_dict.rm_vm(vm) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail( 'not expected to be able to ping vip while it succeed') eip.delete() vip.delete() test_obj_dict.rm_vip(vip) test_util.test_pass('Create EIP for VM Success')
def test(): global vm1 global origin_interval global bs_type global image test_util.test_dsc('Create test vm and check') vm1 = test_stub.create_vlan_vm() vm1.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('dd if=/dev/zero of=/home/dd bs=1M count=300') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm1.get_vm(), script_file.name): test_util.test_fail("fail to create data in [vm:] %s" % (vm1.get_vm().uuid)) os.unlink(script_file.name) test_obj_dict.add_vm(vm1) vm1.stop() thread = threading.Thread(target=create_temp_image, args=()) thread.start() for i in range(0, 100): time.sleep(0.1) image_cond = res_ops.gen_query_conditions("status", '=', "Creating") image_cond = res_ops.gen_query_conditions("name", '=', "test_create_image_template_progress", image_cond) image_query = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \ None, fields=['uuid']) if len(image_query) > 0: break if len(image_query) <= 0: test_util.test_fail("image is not in creating after 10 seconds") for i in range(0, 100): try: progress = res_ops.get_task_progress(image_query[0].uuid) break except: test_util.test_logger('task progress still not ready') time.sleep(0.1) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail("Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) for i in range(0, 3600): try: last_progress = progress progress = res_ops.get_task_progress(image_query[0].uuid) if int(progress.progress) < int(last_progress.progress): test_util.test_fail("Progress (%s) of task is smaller than last time (%s)" % (progress.progress, last_progress.progress)) except: break image_cond = res_ops.gen_query_conditions("uuid", '=', image_query[0].uuid) image_query2 = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \ None, fields=['status']) if image_query2[0].status != "Ready": test_util.test_fail("Image should be ready when no progress anymore") thread.join() image.check() vm1.destroy() image.delete() if bs_type == 'Ceph': time.sleep(60) image.check() if bs_type == 'Ceph': conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', origin_interval) test_util.test_pass('Create Image Template Test Success')
def check(self): ''' Will use snapshot:createDataVolumeFromSnapshot function to do checking. ''' super(zstack_kvm_snapshot_checker, self).check() target_volume = self.test_obj.get_target_volume() if target_volume.get_volume().type == 'Root': test_util.test_logger( 'Checking Result: skip snapshot checking, since target volume: %s is Root volme' % target_volume.get_volume().uuid) return self.judge(self.exp_result) #snapshots = self.test_obj.get_snapshot_list() sp = self.test_obj.get_current_snapshot() if not sp: test_util.test_logger( 'Checker result: no available current snapshot to be checked') return self.judge(self.exp_result) utility_vm = self.test_obj.get_utility_vm() vm_inv = utility_vm.get_vm() result = True #only need to test latest current snapshot, since previouse snapshot #operations should be checked already and assumed won't be changed. #If there is not true, change following 2 lines to next line: #for sp in snapshots.get_snapshot_list(): if sp.get_state() == sp_header.DELETED: #continue test_util.test_logger( 'Checking Result: snapshot status is Deleted, it should not be tested' ) return self.judge(self.exp_result) #calculate checking point checking_points_list = self.test_obj.get_checking_points(sp) volume_obj = sp.create_data_volume() volume_obj.attach(utility_vm) import tempfile with tempfile.NamedTemporaryFile() as script: script.write(''' device=/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`1 mkdir -p %s >/dev/null mount $device %s >/dev/null mkdir -p %s >/dev/null checking_result='' ls %s umount %s >/dev/null ''' % (test_lib.WOODPECKER_MOUNT_POINT, \ test_lib.WOODPECKER_MOUNT_POINT, \ zstack_sp_header.checking_point_folder, \ zstack_sp_header.checking_point_folder, \ test_lib.WOODPECKER_MOUNT_POINT)) script.flush() rsp = test_lib.lib_execute_shell_script_in_vm(vm_inv, \ script.name) volume_obj.detach() volume_obj.delete() if rsp: result_list = rsp.result.split() temp_checking_list = list(result_list) temp_exp_list = list(checking_points_list) for item in result_list: if item in checking_points_list: temp_checking_list.remove(item) temp_exp_list.remove(item) if len(temp_exp_list) == 0: if len(temp_checking_list) == 0: test_util.test_logger( 'Checker result: snapshot: %s integrity checking pass' % sp.get_snapshot().uuid) else: test_util.test_logger( 'Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s' % (sp.get_snapshot().uuid, temp_checking_list)) zstack_sp_header.print_snapshot_chain_checking_point( zstack_sp_header.get_all_ancestry(sp)) result = False else: if len(temp_checking_list) == 0: test_util.test_logger( 'Checker result: snapshot: %s integrity checking fail, there are something less than expected: %s' % (sp.get_snapshot().uuid, temp_exp_list)) zstack_sp_header.print_snapshot_chain_checking_point( zstack_sp_header.get_all_ancestry(sp)) result = False else: test_util.test_logger( 'Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s and there are something less than expected: %s ' % (sp.get_snapshot().uuid, temp_checking_list, temp_exp_list)) zstack_sp_header.print_snapshot_chain_checking_point( zstack_sp_header.get_all_ancestry(sp)) result = False else: test_util.test_logger( 'Checker result: check snapshot: %s failed with checking script.' % sp.get_snapshot().uuid) zstack_sp_header.print_snapshot_chain_checking_point( zstack_sp_header.get_all_ancestry(sp)) result = False return self.judge(result)
def check(self): ''' Will use snapshot:createDataVolumeFromSnapshot function to do checking. ''' super(zstack_kvm_snapshot_checker, self).check() target_volume = self.test_obj.get_target_volume() if target_volume.get_volume().type == 'Root': test_util.test_logger('Checking Result: skip snapshot checking, since target volume: %s is Root volme' % target_volume.get_volume().uuid) return self.judge(self.exp_result) #snapshots = self.test_obj.get_snapshot_list() sp = self.test_obj.get_current_snapshot() if not sp: test_util.test_logger('Checker result: no available current snapshot to be checked') return self.judge(self.exp_result) utility_vm = self.test_obj.get_utility_vm() vm_inv = utility_vm.get_vm() result = True #only need to test latest current snapshot, since previouse snapshot #operations should be checked already and assumed won't be changed. #If there is not true, change following 2 lines to next line: #for sp in snapshots.get_snapshot_list(): if sp.get_state() == sp_header.DELETED: #continue test_util.test_logger('Checking Result: snapshot status is Deleted, it should not be tested') return self.judge(self.exp_result) #calculate checking point checking_points_list = self.test_obj.get_checking_points(sp) volume_obj = sp.create_data_volume() volume_obj.attach(utility_vm) import tempfile with tempfile.NamedTemporaryFile() as script: script.write(''' device=/dev/`ls -ltr --file-type /dev | grep disk | awk '{print $NF}' | grep -v '[[:digit:]]' | tail -1`1 mkdir -p %s >/dev/null mount $device %s >/dev/null mkdir -p %s >/dev/null checking_result='' ls %s umount %s >/dev/null ''' % (test_lib.WOODPECKER_MOUNT_POINT, \ test_lib.WOODPECKER_MOUNT_POINT, \ zstack_sp_header.checking_point_folder, \ zstack_sp_header.checking_point_folder, \ test_lib.WOODPECKER_MOUNT_POINT)) script.flush() rsp = test_lib.lib_execute_shell_script_in_vm(vm_inv, \ script.name) volume_obj.detach() volume_obj.delete() if rsp: result_list = rsp.result.split() temp_checking_list = list(result_list) temp_exp_list = list(checking_points_list) for item in result_list: if item in checking_points_list: temp_checking_list.remove(item) temp_exp_list.remove(item) if len(temp_exp_list) == 0: if len(temp_checking_list) == 0: test_util.test_logger('Checker result: snapshot: %s integrity checking pass' % sp.get_snapshot().uuid) else: test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s' % (sp.get_snapshot().uuid, temp_checking_list)) zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp)) result = False else: if len(temp_checking_list) == 0: test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something less than expected: %s' % (sp.get_snapshot().uuid, temp_exp_list)) zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp)) result = False else: test_util.test_logger('Checker result: snapshot: %s integrity checking fail, there are something more than expected : %s and there are something less than expected: %s ' % (sp.get_snapshot().uuid, temp_checking_list, temp_exp_list)) zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp)) result = False else: test_util.test_logger('Checker result: check snapshot: %s failed with checking script.' % sp.get_snapshot().uuid) zstack_sp_header.print_snapshot_chain_checking_point(zstack_sp_header.get_all_ancestry(sp)) result = False return self.judge(result)
def online_hotplug_cpu_memory(vm): script_file = "%s/%s" % (os.environ.get('woodpecker_root_path'), '/tools/online_hotplug_cpu_memory.sh') test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file)
def test(): global vm1 global origin_interval global bs_type global image test_util.test_dsc('Create test vm and check') vm1 = test_stub.create_vlan_vm() vm1.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('dd if=/dev/zero of=/home/dd bs=1M count=300') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm1.get_vm(), script_file.name): test_util.test_fail("fail to create data in [vm:] %s" % (vm1.get_vm().uuid)) os.unlink(script_file.name) test_obj_dict.add_vm(vm1) vm1.stop() thread = threading.Thread(target=create_temp_image, args=()) thread.start() for i in range(0, 100): time.sleep(0.1) image_cond = res_ops.gen_query_conditions("status", '=', "Creating") image_cond = res_ops.gen_query_conditions( "name", '=', "test_create_image_template_progress", image_cond) image_query = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \ None, fields=['uuid']) if len(image_query) > 0: break if len(image_query) <= 0: test_util.test_fail("image is not in creating after 10 seconds") for i in range(0, 100): try: progress = res_ops.get_task_progress(image_query[0].uuid) break except: test_util.test_logger('task progress still not ready') time.sleep(0.1) if int(progress.progress) < 0 or int(progress.progress) > 100: test_util.test_fail( "Progress of task should be between 0 and 100, while it actually is %s" % (progress.progress)) for i in range(0, 3600): try: last_progress = progress progress = res_ops.get_task_progress(image_query[0].uuid) if int(progress.progress) < int(last_progress.progress): test_util.test_fail( "Progress (%s) of task is smaller than last time (%s)" % (progress.progress, last_progress.progress)) except: break image_cond = res_ops.gen_query_conditions("uuid", '=', image_query[0].uuid) image_query2 = res_ops.query_resource_fields(res_ops.IMAGE, image_cond, \ None, fields=['status']) if image_query2[0].status != "Ready": test_util.test_fail("Image should be ready when no progress anymore") thread.join() image.check() vm1.destroy() image.delete() if bs_type == 'Ceph': time.sleep(60) image.check() if bs_type == 'Ceph': conf_ops.change_global_config('ceph', 'imageCache.cleanup.interval', origin_interval) test_util.test_pass('Create Image Template Test Success')
def test(): allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE, "SharedBlock"] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) flavor = case_flavor[os.environ.get('CASE_FLAVOR')] test_util.test_dsc('Create original vm') vm = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm) vm1 = test_stub.create_vlan_vm() test_obj_dict.add_vm(vm1) test_util.test_dsc('Create Sharable Data Volume obj.') disk_offering = test_lib.lib_get_disk_offering_by_name(os.environ.get('smallDiskOfferingName')) volume_creation_option = test_util.VolumeOption() volume_creation_option.set_name('sharable volume') volume_creation_option.set_disk_offering_uuid(disk_offering.uuid) volume_creation_option.set_system_tags(['ephemeral::shareable', 'capability::virtio-scsi']) volume = test_stub.create_volume(volume_creation_option) test_obj_dict.add_volume(volume) volume.attach(vm) if flavor['vm_running'] == False: vm.stop() test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, 22) if flavor['vm_running'] == True: allow_ps_list = [inventory.CEPH_PRIMARY_STORAGE_TYPE] test_lib.skip_test_when_ps_type_not_in_list(allow_ps_list) test_util.test_dsc('create data volume snapshot') snapshots_data = test_obj_dict.get_volume_snapshot(volume.get_volume().uuid) snapshots_data.set_utility_vm(vm1) snapshots_data.create_snapshot('create_data_snapshot1') snapshots_data.check() snapshot1 = snapshots_data.get_current_snapshot() snapshots_data.create_snapshot('create_data_snapshot2') snapshots_data.check() #check data snapshots if flavor['vm_running'] == True: vm.stop() test_lib.lib_wait_target_down(vm.get_vm().vmNics[0].ip, 22) snapshots_data.use_snapshot(snapshot1) snapshots_data.check() vm.start() test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22) else: snapshots_data.use_snapshot(snapshot1) snapshots_data.check() snapshots_data.create_snapshot('create_snapshot1.1.1') snapshot2 = snapshots_data.get_current_snapshot() snapshots_data.check() snapshots_data.create_snapshot('create_snapshot1.2.1') snapshots_data.check() snapshots_data.delete_snapshot(snapshot2) snapshots_data.check() #check delete snapshot1 sp = snapshots_data.get_current_snapshot() snapshots_data.delete_snapshot(snapshot1) snapshots_data.check() if flavor['vm_running'] == False: vm.start() test_lib.lib_wait_target_up(vm.get_vm().vmNics[0].ip, 22) import tempfile with tempfile.NamedTemporaryFile() as script: script.write(''' device=/dev/`ls -ltr --file-type /dev | awk '$4~/disk/ {print $NF}' | grep -v '[[:digit:]]' | tail -1`1 mkdir -p %s >/dev/null mount $device %s >/dev/null mkdir -p %s >/dev/null checking_result='' ls %s umount %s >/dev/null ''' % (test_lib.WOODPECKER_MOUNT_POINT, \ test_lib.WOODPECKER_MOUNT_POINT, \ zstack_sp_header.checking_point_folder, \ zstack_sp_header.checking_point_folder, \ test_lib.WOODPECKER_MOUNT_POINT)) script.flush() rsp = test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script.name) if rsp: result_list = rsp.result.split() temp_checking_list = list(result_list) if len(temp_checking_list) == 3: test_util.test_logger('Checker result: snapshot: %s integrity checking pass' % sp.get_snapshot().uuid) else: test_util.test_fail('Checker fail.') else: test_util.test_fail('Cmd fail.') test_lib.lib_robot_cleanup(test_obj_dict) test_util.test_pass('Test sharable volume snapshot success.')
def test(): test_util.test_dsc('Create test vm with EIP and check.') vm = test_stub.create_vlan_vm(os.environ.get('l3VlanNetworkName1')) test_obj_dict.add_vm(vm) pri_l3_name = os.environ.get('l3VlanNetworkName1') pri_l3_uuid = test_lib.lib_get_l3_by_name(pri_l3_name).uuid pub_l3_name = os.environ.get('l3PublicNetworkName') pub_l3_uuid = test_lib.lib_get_l3_by_name(pub_l3_name).uuid vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid vip = test_stub.create_vip('create_eip_test', pub_l3_uuid) test_obj_dict.add_vip(vip) eip = test_stub.create_eip('create eip test', vip_uuid=vip.get_vip().uuid, vnic_uuid=vm_nic_uuid, vm_obj=vm) vip.attach_eip(eip) vm.check() if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('ping -c 4 223.5.5.5') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to tracepath in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write(''' while [ 1 -eq 1 ]; do route -n | grep 0.0.0.0 if [ $? -ne 0 ]; then pkill dhclient dhclient fi sleep 40 done ''') script_file.close() try: test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name, timeout=2) except: test_util.test_logger('ignore') os.unlink(script_file.name) net_ops.detach_l3(vm_nic_uuid) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to not be able to ping vip while it success') #vm.stop() net_ops.attach_l3(pri_l3_uuid, vm.get_vm().uuid) #vm.start() vm.check() vm_nic = vm.vm.vmNics[0] vm_nic_uuid = vm_nic.uuid net_ops.attach_eip(eip.get_eip().uuid, vm_nic_uuid) vm.check() script_file = tempfile.NamedTemporaryFile(delete=False) script_file.write('ping -c 4 223.5.5.5') script_file.close() if not test_lib.lib_execute_shell_script_in_vm(vm.get_vm(), script_file.name): test_util.test_fail("fail to tracepath in [vm:] %s" % (vm.get_vm().uuid)) os.unlink(script_file.name) if not test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('expected to be able to ping vip while it fail') vm.destroy() test_obj_dict.rm_vm(vm) if test_lib.lib_check_directly_ping(vip.get_vip().ip): test_util.test_fail('not expected to be able to ping vip while it succeed') eip.delete() vip.delete() test_obj_dict.rm_vip(vip) test_util.test_pass('Create EIP for VM Success')