def add_remove_distributed_vpool_test(): """ Create a vPool with 'distributed' BackendType and remove it Related ticket: http://jira.cloudfounders.com/browse/OVS-4050 """ # Verify if an unused disk is available to mount unused_disks = GeneralDisk.get_unused_disks() if len(unused_disks) == 0: logger.info('No available disks found to mount locally for the distributed backend') return # Raise if vPool already exists vpool_name = 'add-remove-distr-vpool' vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) if vpool is not None: raise RuntimeError('vPool with name "{0}" still exists'.format(vpool_name)) unused_disk = unused_disks[0] if not unused_disk.startswith('/dev/'): raise ValueError('Unused disk must be absolute path') # Create a partition on the disk local_sr = GeneralStorageRouter.get_local_storagerouter() disk = GeneralDisk.get_disk_by_devicename(storagerouter=local_sr, device_name=unused_disk) partition = GeneralDisk.partition_disk(disk=disk) # Mount the unused disk if partition.mountpoint is None: GeneralDisk.configure_disk(storagerouter=local_sr, disk=disk, offset=0, size=disk.size, roles=[], partition=partition) partition.discard() # Re-initializes the object # Add vPool and validate health vpool, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'vpool_name': vpool_name, 'type': 'distributed', 'distributed_mountpoint': partition.mountpoint}) assert vpool is not None, 'vPool {0} was not created'.format(vpool_name) GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params) # Retrieve vPool information before removal guid = vpool.guid name = vpool.name backend_type = vpool.backend_type.code files = GeneralVPool.get_related_files(vpool) directories = GeneralVPool.get_related_directories(vpool) storagerouters = [sd.storagerouter for sd in vpool.storagedrivers] # Remove vPool and validate removal GeneralVPool.remove_vpool(vpool=vpool) vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) assert vpool is None, 'vPool {0} was not deleted'.format(vpool_name) GeneralVPool.check_vpool_cleanup(vpool_info={'guid': guid, 'name': name, 'type': backend_type, 'files': files, 'directories': directories}, storagerouters=storagerouters) GeneralDisk.unpartition_disk(disk)
def ovs_3700_validate_test(): """ Validate something test """ def _get_scrubber_log_size(): scrubber_log_name = '/var/log/upstart/ovs-scrubber.log' if os.path.exists(scrubber_log_name): return os.stat(scrubber_log_name).st_size return 0 loop = 'loop0' vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name) vdisk = GeneralVDisk.create_volume(size=2, vpool=vpool, name='ovs-3700-disk', loop_device=loop, wait=True) GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap0') GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '1'), size=512) GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap1') GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '2'), size=512) GeneralVDisk.delete_snapshot(disk=vdisk, snapshot_name='snap1') GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '3'), size=512) GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap2') pre_scrubber_logsize = _get_scrubber_log_size() ScheduledTaskController.gather_scrub_work() post_scrubber_logsize = _get_scrubber_log_size() GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, loop_device=loop) assert post_scrubber_logsize > pre_scrubber_logsize, "Scrubber actions were not logged!"
def add_remove_alba_vpool_test(): """ Create a vPool using default values (from autotest.cfg) If a vPool with name already exists, remove it and create a new vPool Validate the newly created vPool is correctly running Remove the newly created vPool and validate everything related to the vPool has been cleaned up """ # Raise if vPool already exists vpool_name = 'add-delete-alba-vpool' vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) if vpool is not None: raise RuntimeError('vPool with name "{0}" still exists'.format(vpool_name)) # Add vPool and validate health vpool, vpool_params = GeneralVPool.add_vpool(vpool_parameters={'vpool_name': vpool_name}) assert vpool is not None, 'vPool {0} was not created'.format(vpool_name) GeneralVPool.validate_vpool_sanity(expected_settings=vpool_params) # Retrieve vPool information before removal guid = vpool.guid name = vpool.name backend_type = vpool.backend_type.code files = GeneralVPool.get_related_files(vpool) directories = GeneralVPool.get_related_directories(vpool) storagerouters = [sd.storagerouter for sd in vpool.storagedrivers] # Remove vPool and validate removal GeneralVPool.remove_vpool(vpool=vpool) vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) assert vpool is None, 'vPool {0} was not deleted'.format(vpool_name) GeneralVPool.check_vpool_cleanup(vpool_info={'guid': guid, 'name': name, 'type': backend_type, 'files': files, 'directories': directories}, storagerouters=storagerouters)
def teardown(): """ Teardown for VirtualMachine package, will be executed when all started tests in this package have ended Removal actions of possible things left over after the test-run :return: None """ vpool_name = General.get_config().get('vpool', 'name') vpool = GeneralVPool.get_vpool_by_name(vpool_name) assert vpool is not None, "No vpool found where one was expected" GeneralVMachine.logger.info("Cleaning vpool") GeneralVPool.remove_vpool(vpool) alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name')) if alba_backend is not None: GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
def ovs_2703_kill_various_services_test(): """ Kill various services and see if they recover """ # @TODO 1: This test does not belong in the vPool tests, its a service test which happens to create a vPool # @TODO 2: Make test smarter to test all required services on all node types vpool = GeneralVPool.get_vpool_by_name(General.get_config().get('vpool', 'name')) if vpool is None: vpool, _ = GeneralVPool.add_vpool() errors = [] root_client = SSHClient(GeneralStorageRouter.get_local_storagerouter(), username='******') for service_name in GeneralService.get_all_service_templates(): if GeneralService.has_service(name=service_name, client=root_client) is False: continue if GeneralService.get_service_status(name=service_name, client=root_client) is False: errors.append('Service {0} not found in running state'.format(service_name)) continue pid_before = GeneralService.get_service_pid(name=service_name, client=root_client) if pid_before == -1: errors.append('Service {0} has unknown PID before being killed'.format(service_name)) continue GeneralService.kill_service(name=service_name, client=root_client) time.sleep(5) if GeneralService.get_service_status(name=service_name, client=root_client) is False: errors.append('Service {0} not found in running state after killing it'.format(service_name)) continue pid_after = GeneralService.get_service_pid(name=service_name, client=root_client) if pid_after == -1: errors.append('Service {0} has unknown PID after being killed'.format(service_name)) continue if pid_before == pid_after: errors.append('Kill command did not work on service {0}'.format(service_name)) GeneralVPool.remove_vpool(vpool) assert len(errors) == 0, "Following issues were found with the services:\n - {0}".format('\n - '.join(errors))
def ovs_3756_metadata_size_test(): """ Validate get/set metadata cache size for a vdisk """ disk_name = 'ovs-3756-disk' loop = 'loop0' vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name) # vdisk_size in GiB vdisk_size = 2 vdisk = GeneralVDisk.create_volume(size=vdisk_size, vpool=vpool, name=disk_name, loop_device=loop, wait=True) storagedriver_config = StorageDriverConfiguration('storagedriver', vdisk.vpool_guid, vdisk.storagedriver_id) # "size" of a page = amount of entries in a page (addressable by 6 bits) metadata_page_capacity = 64 cluster_size = storagedriver_config.configuration.get('volume_manager', {}).get('default_cluster_size', 4096) cache_capacity = int(min(vdisk.size, 2 * 1024 ** 4) / float(metadata_page_capacity * cluster_size)) default_metadata_cache_size = StorageDriverClient.DEFAULT_METADATA_CACHE_SIZE def _validate_setting_cache_value(value_to_verify): if value_to_verify > default_metadata_cache_size: value_to_verify = default_metadata_cache_size disk_config_params = GeneralVDisk.get_config_params(vdisk) disk_config_params['metadata_cache_size'] = value_to_verify GeneralVDisk.set_config_params(vdisk, {'new_config_params': disk_config_params}) disk_config_params = GeneralVDisk.get_config_params(vdisk) actual_value = disk_config_params['metadata_cache_size'] assert actual_value == value_to_verify,\ 'Value after set/get differs, actual: {0}, expected: {1}'.format(actual_value, value_to_verify) config_params = GeneralVDisk.get_config_params(vdisk) # validate default metadata cache as it was not set explicitly default_implicit_value = config_params['metadata_cache_size'] assert default_implicit_value == default_metadata_cache_size,\ 'Expected default cache size: {0}, got {1}'.format(default_metadata_cache_size, default_implicit_value) # verify set/get of specific value - larger than default _validate_setting_cache_value(10000 * cache_capacity) # verify set/get of specific value - default value _validate_setting_cache_value(default_metadata_cache_size) GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, loop_device=loop, wait=True)
def teardown(): """ Teardown for VirtualDisk package, will be executed when all started tests in this package have ended Removal actions of possible things left over after the test-run :return: None """ vpool_name = General.get_config().get("vpool", "name") vpool = GeneralVPool.get_vpool_by_name(vpool_name) for vd in VDiskList.get_vdisks(): GeneralVDisk.delete_volume(vd, vpool, loop_device='loop0') if vpool is not None: GeneralVPool.remove_vpool(vpool) alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name')) if alba_backend is not None: GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
def validate_clone_disk_test(): """ Validate vdisk clone method """ disk_name = 'clone-disk' clone_disk_name = 'new-cloned-disk' test_file_name = 'file-contents' test_file_size = 5000 loop = 'loop0' clone_loop = 'loop1' vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name) vdisk = GeneralVDisk.create_volume(size=50, vpool=vpool, name=disk_name, loop_device=loop, wait=True) TestVDisk.logger.info('clone_disk_test - create initial snapshot') GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap0') TestVDisk.logger.info('clone_disk_test - create 1st {0} GB test file'.format(test_file_size / 1000.0)) GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '1'), size=test_file_size) TestVDisk.logger.info('clone_disk_test - create 2nd {0} GB test file'.format(test_file_size / 1000.0)) GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '2'), size=test_file_size) GeneralVDisk.logger.info(General.execute_command('sync')) TestVDisk.logger.info('clone_disk_test - cloning disk') cloned_vdisk = GeneralVDisk.clone_volume(vdisk, clone_disk_name) TestVDisk.logger.info('clone_disk_test - cloned disk') GeneralVDisk.connect_volume(vpool, name=clone_disk_name, loop_device=clone_loop) md5_sum_1 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '1'))[0].split(' ')[0] md5_sum_2 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(loop, test_file_name, '2'))[0].split(' ')[0] md5_clone_1 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(clone_loop, test_file_name, '1'))[0].split(' ')[0] md5_clone_2 = General.execute_command('md5sum /mnt/{0}/{1}_{2}.txt'.format(clone_loop, test_file_name, '2'))[0].split(' ')[0] GeneralVDisk.disconnect_volume(loop_device=clone_loop) GeneralVDisk.delete_volume(VDisk(cloned_vdisk['vdisk_guid']), vpool, wait=True) GeneralVDisk.delete_volume(vdisk, vpool, loop, wait=True) assert md5_sum_1 == md5_clone_1,\ 'file contents for /mnt/{0}/{1}_{2}.txt is not identical on source and clone!'.format(loop, vdisk.name, '1') assert md5_sum_2 == md5_clone_2,\ 'file contents for /mnt/{0}/{1}_{2}.txt is not identical on source and clone!'.format(loop, vdisk.name, '2')
def ovs_3791_validate_backend_sync_test(): """ Validate vdisk backend sync method """ disk_name = 'ovs-3791-disk' loop = 'loop0' vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name) vdisk = GeneralVDisk.create_volume(size=2, vpool=vpool, name=disk_name, loop_device=loop, wait=True) _, snap_id1 = GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap0') GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '1'), size=512) _, snap_id2 = GeneralVDisk.create_snapshot(vdisk=vdisk, snapshot_name='snap1') GeneralVDisk.generate_hash_file(full_name='/mnt/{0}/{1}_{2}.txt'.format(loop, vdisk.name, '2'), size=512) tlog_name = GeneralVDisk.schedule_backend_sync(vdisk) assert tlog_name[:5] == 'tlog_' and len(tlog_name) == 41,\ 'Unexpected result: {0} does not match tlog type'.format(tlog_name) timeout = 300 status = False while timeout > 0: status = GeneralVDisk.is_volume_synced_up_to_snapshot(vdisk=vdisk, snapshot_id=snap_id2) print 'sync up to snapshot: {0}'.format(status) if status is True: break timeout -= 1 assert status is True, 'Snapshot not synced to backend within 5 minutes' status = False timeout = 300 while timeout > 0: status = GeneralVDisk.is_volume_synced_up_to_tlog(vdisk=vdisk, tlog_name=tlog_name) print 'sync up to tlog: {0}'.format(status) if status is True: break timeout -= 1 assert status is True, 'Tlog not synced to backend within 5 minutes' GeneralVDisk.delete_volume(vdisk, vpool, loop)
def ovs_3756_metadata_size_test(): """ Validate get/set metadata cache size for a vdisk """ metadata_cache_page_size = 256 * 24 default_metadata_cache_size = 8192 * metadata_cache_page_size disk_name = 'ovs-3756-disk' loop = 'loop0' vpool = GeneralVPool.get_vpool_by_name(TestVDisk.vpool_name) vdisk = GeneralVDisk.create_volume(size=2, vpool=vpool, name=disk_name, loop_device=loop, wait=True) def _validate_setting_cache_value(value_to_verify): disk_config_params = GeneralVDisk.get_config_params(vdisk) disk_config_params['metadata_cache_size'] = value_to_verify GeneralVDisk.set_config_params(vdisk, {'new_config_params': disk_config_params}) disk_config_params = GeneralVDisk.get_config_params(vdisk) actual_value = disk_config_params['metadata_cache_size'] assert actual_value == value_to_verify,\ 'Value after set/get differs, actual: {0}, expected: {1}'.format(actual_value, value_to_verify) config_params = GeneralVDisk.get_config_params(vdisk) # validate default metadata cache as it was not set explicitly default_implicit_value = config_params['metadata_cache_size'] assert default_implicit_value == default_metadata_cache_size,\ 'Expected default cache size: {0}, got {1}'.format(default_metadata_cache_size, default_implicit_value) # verify set/get of specific value - larger than default _validate_setting_cache_value(10000 * metadata_cache_page_size) # verify set/get of specific value - default value _validate_setting_cache_value(default_metadata_cache_size) # verify set/get of specific value - smaller than default value _validate_setting_cache_value(100 * metadata_cache_page_size) GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, loop_device=loop, wait=True)
def vms_with_fio_test(): """ Test virtual machines with FIO """ timeout = 30 timer_step = 5 nr_of_disks = 10 vpool_name = General.get_config().get('vpool', 'name') vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) assert vpool, "No vpool found where one was expected" for disk_number in range(nr_of_disks): disk_name = "disk-{0}".format(disk_number) GeneralVMachine.logger.info("Starting RAW disk creation") template_folder = GeneralVMachine.template_target_folder image_name = GeneralVMachine.template_image out, err, _ = General.execute_command('qemu-img convert -O raw {0}{1} /mnt/{2}/{3}.raw'.format(template_folder, image_name, vpool_name, disk_name)) if err: GeneralVMachine.logger.error("Error while creating raw disk: {0}".format(err)) vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) assert len(vpool.vdisks) == nr_of_disks, "Only {0} out of {1} VDisks have been created".format(len(vpool.vdisks), nr_of_disks) for vm_number in range(nr_of_disks): machine_name = "machine-{0}".format(vm_number) disk_name = "disk-{0}".format(vm_number) GeneralVMachine.logger.info("Starting vmachine creation from RAW disk") out, err, _ = General.execute_command('virt-install --connect qemu:///system -n {0} -r 512 --disk /mnt/{1}/{2}.raw,' 'device=disk --noautoconsole --graphics vnc,listen=0.0.0.0 --vcpus=1 --network network=default,mac=RANDOM,' 'model=e1000 --import'.format(machine_name, vpool_name, disk_name)) if err: GeneralVMachine.logger.error("Error while creating vmachine: {0}".format(err)) counter = timeout / timer_step while counter > 0: vms = GeneralVMachine.get_vmachines() if len(vms) == nr_of_disks: counter = 0 else: counter -= 1 time.sleep(timer_step) vms = GeneralVMachine.get_vmachines() assert len(vms) == nr_of_disks, "Only {0} out of {1} VMachines have been created after {2} seconds".format(len(vms), nr_of_disks, timeout) # Waiting for 5 minutes of FIO activity on the vmachines time.sleep(300) vms = GeneralVMachine.get_vmachines() for vm in vms: assert vm.hypervisor_status == 'RUNNING', "Machine {0} has wrong status on the hypervisor: {1}".format(vm.name, vm.hypervisor_status) for vm_number in range(nr_of_disks): vmachine_name = "machine-{0}".format(vm_number) GeneralVMachine.logger.info("Removing {0} vmachine".format(vmachine_name)) out, err, _ = General.execute_command('virsh destroy {0}'.format(vmachine_name)) if err: GeneralVMachine.logger.error("Error while stopping vmachine: {0}".format(err)) out, err, _ = General.execute_command('virsh undefine {0}'.format(vmachine_name)) if err: GeneralVMachine.logger.error("Error while removing vmachine: {0}".format(err)) counter = timeout / timer_step while counter > 0: vms = GeneralVMachine.get_vmachines() if len(vms): counter -= 1 time.sleep(timer_step) else: counter = 0 vms = GeneralVMachine.get_vmachines() assert len(vms) == 0, "Still some machines left on the vpool after waiting for {0} seconds: {1}".format(timeout, [vm.name for vm in vms]) GeneralVMachine.logger.info("Removing vpool vdisks from {0} vpool".format(vpool_name)) out, err, _ = General.execute_command("rm -rf /mnt/{0}/*.raw".format(vpool_name)) if err: GeneralVMachine.logger.error("Error while removing vdisks: {0}".format(err)) counter = timeout / timer_step while counter > 0: vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) if len(vpool.vdisks): counter -= 1 time.sleep(timer_step) else: counter = 0 vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) assert len(vpool.vdisks) == 0, "Still some disks left on the vpool after waiting {0} seconds: {1}".format(timeout, vpool.vdisks_guids)
def check_scrubbing_test(): """ Check scrubbing of vdisks test """ initial_counter = 100 step = 5 vdisk = None vpool_name = General.get_config().get('vpool', 'name') vpool = GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) assert vpool, "No vpool found where one was expected" template_folder = GeneralVMachine.template_target_folder image_name = GeneralVMachine.template_image disk_name = "scrubdisk" GeneralVMachine.logger.info("Starting RAW disk creation") out, err, _ = General.execute_command('qemu-img convert -O raw {0}{1} /mnt/{2}/{3}.raw'.format(template_folder, image_name, vpool_name, disk_name)) if err: GeneralVMachine.logger.error("Error while creating raw disk: {0}".format(err)) def snapshot_vdisk(vdisk): metadata = {'label': 'snap-' + vdisk.name, 'is_consistent': True, 'timestamp': time.time(), 'is_automatic': False, 'is_sticky': False} VDiskController.create_snapshot(vdisk.guid, metadata) counter = initial_counter while counter and vdisk is None: time.sleep(step) vdisk = VDiskList.get_by_devicename_and_vpool('/' + disk_name + '.raw', vpool) counter -= step assert counter > 0, "Vdisk with name {0} didn't appear in the model after 60 seconds".format(disk_name) # snapshot disks for the first time snapshot_vdisk(vdisk) counter = initial_counter while counter > 0: time.sleep(step) out, err, _ = General.execute_command('dd if=/dev/zero of=/mnt/{0}/{1}.raw bs=10K count=1000 conv=notrunc'.format(vpool_name, disk_name)) counter -= step snapshot_vdisk(vdisk) # saving disk 'stored' info / the only attribute that is lowered after scrubbing vdisk.invalidate_dynamics(['statistics']) disk_backend_data = vdisk.statistics['stored'] # deleting middle snapshots for snapshot in vdisk.snapshots[1:-1]: VDiskController.delete_snapshot(vdisk.guid, snapshot['guid']) # starting scrubber try: GenericController.execute_scrub() # waiting for model to catch up counter = initial_counter while counter > 0: time.sleep(step) vdisk.invalidate_dynamics(['statistics']) # checking result of scrub work if vdisk.statistics['stored'] < disk_backend_data: GeneralVMachine.logger.info("It took {0} seconds for the value to change from {1} to {2}\n".format((initial_counter - counter) * step, disk_backend_data, vdisk.statistics['stored'])) break counter -= step finally: # removing vdisk GeneralVMachine.logger.info("Removing vpool vdisks from {0} vpool".format(vpool_name)) out, err, _ = General.execute_command("rm -rf /mnt/{0}/*.raw".format(vpool_name)) if err: GeneralVMachine.logger.error("Error while removing vdisk: {0}".format(err)) assert counter > 0, "Scrubbing didn't run as expected, backend size of vdisk remained at {0}:\n".format(disk_backend_data)