def remove_vpools(self): """ Method to remove all vpools """ self.LOGGER.info( "Starting to validate removal of vpools concurrently.") dels = {} for vpool in self.vpools: for storagedriver in vpool.storagedrivers: dels[vpool.guid] = self.api.post( api='vpools/{0}/shrink_vpool'.format(vpool.guid), data=json.dumps({ 'storagerouter_guid': storagedriver.storagerouter_guid })) for vpool_guid, task_id in dels.iteritems(): deletion_completed = self.api.wait_for_task(task_id, timeout=600)[0] if deletion_completed is True: self.LOGGER.info( 'vpool with guid {0} is deleted successfully'.format( vpool_guid)) self.LOGGER.info('Concurrent removal of vpools finished.') leftover_vpools = [] for vpool in self.vpools: try: VPoolHelper.get_vpool_by_name(vpool.name) leftover_vpools.append(vpool) except VPoolNotFoundError: pass if len(leftover_vpools) > 0: raise RuntimeError('Following vpools are not removed: {}.'.format( ', '.join([vpool.name for vpool in leftover_vpools])))
def add_vpools(self): """ Add a predefined number of vpools concurrently. """ self.LOGGER.info( "Starting to validate addition of vpools concurrently.") tasks = {} for i in range(1, self.NUMBER_OF_VPOOLS + 1): sr = self.valid_storagerouters[(i - 1) % len(self.valid_storagerouters)] vpool_name = 'vpool{0}'.format(i) data = json.dumps({ 'call_parameters': { 'backend_info': { 'preset': 'default', 'alba_backend_guid': random.choice(self.alba_backends).guid }, 'config_params': { 'cluster_size': 4, 'dtl_mode': 'a_sync', 'dtl_transport': 'tcp', 'sco_size': 4, 'write_buffer': 128 }, 'connection_info': { 'host': '' }, # Empty host will force the framework to fill in local details 'fragment_cache_on_read': False, 'fragment_cache_on_write': False, 'parallelism': { 'proxies': 1 }, 'storage_ip': sr.ip, 'storagerouter_ip': sr.ip, 'vpool_name': vpool_name, 'writecache_size': 5 } }) tasks[vpool_name] = self.api.post( api='storagerouters/{0}/add_vpool'.format(sr.guid), data=data) for vpool_name, task_id in tasks.iteritems(): addition_completed = self.api.wait_for_task(task_id, timeout=600)[0] if addition_completed is True: self.LOGGER.info( 'Creation of {0} completed.'.format(vpool_name)) try: vpool = VPoolHelper.get_vpool_by_name(vpool_name) if vpool.STATUSES.RUNNING is 'RUNNING': self.vpools.append(vpool) except VPoolNotFoundError: self.LOGGER.exception( 'Unable to find vpool with name {0}.'.format(vpool_name)) assert self.NUMBER_OF_VPOOLS == len( self.vpools ), 'Failed to create {0} vpools: only {1} found!'.format( self.NUMBER_OF_VPOOLS, len(self.vpools))
def get_vpool_with_2_storagedrivers(cls): """ Check for all vpools if there is at least one containing two storagedrivers :return: ovs.dal.hybrids.vpool """ vpool = None for vp in VPoolHelper.get_vpools(): if len(vp.storagedrivers) >= 2 and vp.configuration['dtl_mode'] == 'sync' and vp.name in cls.get_vpool_names(): vpool = vp return vpool assert vpool is not None, 'We need at least one vpool with two storagedrivers'
def validate_rollback(cls): """ Validate if scrubbing works on a vpool INFO: 1 vPool should be available on 1 storagerouter :return: """ cls.LOGGER.info("Starting to validate the rollback") vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" vpool = vpools[0] # just pick the first vpool you find assert len( vpool.storagedrivers) >= 1, "Not enough Storagedrivers to test" # create vdisks and write some stuff on it storagedriver = vpool.storagedrivers[ 0] # just pick the first storagedriver you find # start actual test for cloned in list(cls.TYPE_TEST_RUN): start = time.time() cls.LOGGER.info("Starting deployment of required vdisks") deployed_vdisks = cls._deploy_vdisks(vpool=vpool, storagedriver=storagedriver, cloned=cloned) cls.LOGGER.info("Received vdisks to be rolledback: `{0}`".format( deployed_vdisks[0])) cls._rollback_vdisks(stored_vdisks=deployed_vdisks[0], vpool=vpool) cls.LOGGER.info( "Finished rolling back vdisks, start deleting possible base vdisks: {0}" .format(deployed_vdisks[1])) end = time.time() # clean base disks from clones if cloned: cls._delete_remaining_vdisks(base_vdisks=deployed_vdisks[1]) cls.LOGGER.info("Finished deleting base vdisks") else: cls.LOGGER.info("Skipped deleting base vdisks") # display run time cls.LOGGER.info( "Run with clone status `{0}` took {1} seconds".format( cloned, int(end - start))) cls.LOGGER.info("Finished to validate the rollback")
def setup(cls): vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" vpool = random.choice(vpools) assert len( vpool.storagedrivers) >= 1, "Not enough Storagedrivers to test" # check if enough images available images = cls.get_images() assert len( images) >= 1, 'We require an cloud init bootable image file.' # Setup base information storagedriver = vpool.storagedrivers[0] client = SSHClient(storagedriver.storagerouter, username='******') # Check if image exists assert client.file_exists( images[0]), "Image `{0}` does not exists on `{1}`!".format( images[0], storagedriver.storage_ip) image_path = images[0] return storagedriver, image_path
def setup(cls): vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" vpool = vpools[0] # Just pick the first vpool you find assert len( vpool.storagedrivers) >= 1, "Not enough Storagedrivers to test" storagedriver = vpool.storagedrivers[ 0] # just pick the first storagedriver you find source_str = storagedriver.storagerouter client = SSHClient(source_str, username='******') is_ee = SystemHelper.get_ovs_version(source_str) == 'ee' if is_ee is True: fio_bin_loc = cls.FIO_BIN_EE['location'] fio_bin_url = cls.FIO_BIN_EE['url'] else: fio_bin_loc = cls.FIO_BIN['location'] fio_bin_url = cls.FIO_BIN['url'] client.run(['wget', fio_bin_url, '-O', fio_bin_loc]) client.file_chmod(fio_bin_loc, 755) return storagedriver, fio_bin_loc, is_ee
def validate_vdisk_deployment(cls): """ Validate if vdisk deployment works via various ways INFO: 1 vPool should be available on 1 storagerouter :return: """ cls.LOGGER.info("Starting to validate the vdisk deployment") vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" vpool = vpools[0] # just pick the first vpool you find assert len( vpool.storagedrivers) >= 1, "Not enough Storagedrivers to test" # setup base information storagedriver = vpool.storagedrivers[0] protocol = storagedriver.cluster_node_config[ 'network_server_uri'].split(':')[0] storage_ip = storagedriver.storage_ip edge_port = storagedriver.ports['edge'] client = SSHClient(storagedriver.storage_ip, username='******') # ======= # VIA API # ======= for size in cls.VDISK_SIZES: api_disk_name = cls.PREFIX + str(size) + '-api' cls.LOGGER.info( "Starting to create vdisk `{0}` on vPool `{1}` with size `{2}` " "on node `{3}`".format(api_disk_name, vpool.name, size, storagedriver.storagerouter.ip)) VDiskSetup.create_vdisk( vdisk_name=api_disk_name + '.raw', vpool_name=vpool.name, size=size, storagerouter_ip=storagedriver.storagerouter.ip, timeout=cls.VDISK_CREATE_TIMEOUT) cls.LOGGER.info( "Finished creating vdisk `{0}`".format(api_disk_name)) cls._check_vdisk(vdisk_name=api_disk_name, vpool_name=vpool.name) cls.LOGGER.info( "Starting to delete vdisk `{0}`".format(api_disk_name)) VDiskRemover.remove_vdisk_by_name(api_disk_name, vpool.name) cls.LOGGER.info( "Finished deleting vdisk `{0}`".format(api_disk_name)) # ======== # VIA QEMU # ======== for size in cls.VDISK_SIZES: qemu_disk_name = cls.PREFIX + str(size) + '-qemu' edge_info = { 'port': edge_port, 'protocol': protocol, 'ip': storage_ip, } if SystemHelper.get_ovs_version( storagedriver.storagerouter) == 'ee': edge_info.update(cls.get_shell_user()) VMHandler.create_image(client, qemu_disk_name, size, edge_info) cls.LOGGER.info( "Finished creating vdisk `{0}`".format(qemu_disk_name)) cls._check_vdisk(vdisk_name=qemu_disk_name, vpool_name=vpool.name) cls.LOGGER.info( "Starting to delete vdisk `{0}`".format(qemu_disk_name)) VDiskRemover.remove_vdisk_by_name(qemu_disk_name, vpool.name) cls.LOGGER.info( "Finished deleting vdisk `{0}`".format(qemu_disk_name)) # ============ # VIA TRUNCATE # ============ for size in cls.VDISK_SIZES: truncate_disk_name = cls.PREFIX + str(size) + '-trunc' cls.LOGGER.info( "Starting to create vdisk `{0}` on vPool `{1}` on node `{2}` " "with size `{3}`".format(truncate_disk_name, vpool.name, storagedriver.storage_ip, size)) client.run([ "truncate", "-s", str(size), "/mnt/{0}/{1}.raw".format(vpool.name, truncate_disk_name) ]) cls.LOGGER.info( "Finished creating vdisk `{0}`".format(truncate_disk_name)) cls._check_vdisk(vdisk_name=truncate_disk_name, vpool_name=vpool.name) cls.LOGGER.info( "Starting to delete vdisk `{0}`".format(truncate_disk_name)) VDiskRemover.remove_vdisk_by_name(truncate_disk_name, vpool.name) cls.LOGGER.info( "Finished deleting vdisk `{0}`".format(truncate_disk_name)) cls.LOGGER.info("Finished to validate the vdisk deployment")
def validate_vdisk_clone(cls): """ Validate if vdisk deployment works via various ways INFO: 1 vPool should be available on 2 storagerouters :return: """ cls.LOGGER.info("Starting to validate clone vdisks") vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" try: vpool = next( (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2)) except StopIteration: assert False, "Not enough Storagedrivers to test" # Setup base information storagedriver_source = vpool.storagedrivers[0] storagedriver_destination = vpool.storagedrivers[1] vdisks = [] try: # Create required vdisk for test original_vdisk_name = '{0}_{1}'.format(cls.PREFIX, str(1).zfill(3)) cls.LOGGER.info( "Creating the vdisk: {0} to clone".format(original_vdisk_name)) original_vdisk = VDiskHelper.get_vdisk_by_guid( VDiskSetup.create_vdisk( vdisk_name=original_vdisk_name, vpool_name=vpool.name, size=cls.VDISK_SIZE, storagerouter_ip=storagedriver_source.storagerouter.ip)) vdisks.append(original_vdisk) time.sleep(cls.CLONE_SLEEP_AFTER_CREATE) ############### # Clone vdisk # ############### cloned_vdisk_name = original_vdisk_name + '-clone-nosnapshot' cloned_vdisk = VDiskHelper.get_vdisk_by_guid( VDiskSetup.create_clone( vdisk_name=original_vdisk_name, vpool_name=vpool.name, new_vdisk_name=cloned_vdisk_name, storagerouter_ip=storagedriver_destination.storagerouter.ip )['vdisk_guid']) vdisks.append(cloned_vdisk) time.sleep(cls.CLONE_SLEEP_BEFORE_CHECK) ###################################### # clone vdisk from existing snapshot # ###################################### cloned_vdisk_name = original_vdisk_name + '-clone-snapshot' snapshot_id = VDiskSetup.create_snapshot( vdisk_name=original_vdisk_name, vpool_name=vpool.name, snapshot_name=cls.PREFIX + 'snapshot') cloned_vdisk = VDiskHelper.get_vdisk_by_guid( VDiskSetup.create_clone( vdisk_name=original_vdisk_name, vpool_name=vpool.name, new_vdisk_name=cloned_vdisk_name, storagerouter_ip=storagedriver_destination.storagerouter. ip, snapshot_id=snapshot_id)['vdisk_guid']) vdisks.append(cloned_vdisk) finally: VDiskRemover.remove_vdisks_with_structure(vdisks) cls.LOGGER.info("Finished validating clone vdisks")
def _execute_test(cls, amount_vdisks=AMOUNT_VDISKS): """ Executes a offline migration :param amount_vdisks: amount of vdisks to test :type amount_vdisks: int :return: """ cls.LOGGER.info("Starting offline migrate test.") vpool = None # Get a suitable vpool for vp in VPoolHelper.get_vpools(): if len(vp.storagedrivers) >= 2: vpool = vp break assert vpool is not None, "Not enough vPools to test. Requires 1 with at least 2 storagedrivers and found 0." ########################## # Setup base information # ########################## # Executor storagedriver_1 is current system std_1 = random.choice([st for st in vpool.storagedrivers]) # Get a random other storagedriver to migrate to std_2 = random.choice([st for st in vpool.storagedrivers if st != std_1]) # Cache to validate properties values_to_check = { 'source_std': std_1.serialize(), 'target_std': std_2.serialize() } ############################### # start deploying & migrating # ############################### created_vdisks = [] try: for i in xrange(amount_vdisks): ################ # create vdisk # ################ vdisk_name = "{0}_{1}".format(cls.TEST_NAME, i) try: vdisk_guid = VDiskSetup.create_vdisk(vdisk_name=vdisk_name + '.raw', vpool_name=vpool.name, size=cls.AMOUNT_TO_WRITE * 5, storagerouter_ip=std_1.storagerouter.ip) vdisk = VDiskHelper.get_vdisk_by_guid(vdisk_guid) # Fetch to validate if it was properly created created_vdisks.append(vdisk) values_to_check['vdisk'] = vdisk.serialize() except TimeOutError: cls.LOGGER.error("Creation of the vdisk has timed out.") raise except (RuntimeError, TimeOutError) as ex: cls.LOGGER.info("Creation of vdisk failed: {0}".format(ex)) raise else: time.sleep(cls.SLEEP_TIME) try: cls.LOGGER.info("Moving vdisk {0} from {1} to {2}".format(vdisk_guid, std_1.storage_ip, std_2.storage_ip)) VDiskSetup.move_vdisk(vdisk_guid=vdisk_guid, target_storagerouter_guid=std_2.storagerouter_guid) time.sleep(cls.SLEEP_TIME) cls.LOGGER.info("Validating move...") cls._validate_move(values_to_check) except Exception as ex: cls.LOGGER.exception('Failed during migation: {0}'.format(ex)) raise finally: for vdisk in created_vdisks: VDiskRemover.remove_vdisk(vdisk.guid) cls.LOGGER.info("Finished offline migrate test.")
def validate_add_extend_remove_vpool(cls, timeout=ADD_EXTEND_REMOVE_VPOOL_TIMEOUT): """ Validate if we can add, extend and/or remove a vPool, testing the following scenarios: * Normal with no accelerated backend * Accelerated vPool with hdd_backend & ssd_backend INFO: * at least 2 storagerouters should be available * at least 2 backends should be available with default preset :param timeout: specify a timeout :type timeout: int :return: """ cls.LOGGER.info("Starting to validate add-extend-remove vpool") storagerouter_ips = [] for storagerouter_ip in StoragerouterHelper.get_storagerouter_ips(): try: RoleValidation.check_required_roles(VPoolSetup.REQUIRED_VPOOL_ROLES, storagerouter_ip, "LOCAL") storagerouter_ips.append(storagerouter_ip) cls.LOGGER.info("Added `{0}` to list of eligible storagerouters".format(storagerouter_ip)) except RuntimeError as ex: cls.LOGGER.warning("Did not add `{0}` to list of eligible " "storagerouters because: {1}".format(storagerouter_ip, ex)) pass # Filter storagerouters without required roles assert len(storagerouter_ips) > 1, "We need at least 2 storagerouters with valid roles: {0}"\ .format(storagerouter_ips) alba_backends = BackendHelper.get_alba_backends() assert len(alba_backends) >= 2, "We need at least 2 or more backends!" # Global vdisk details vdisk_deployment_ip = storagerouter_ips[0] # Determine backends (2) hdd_backend = alba_backends[0] ssd_backend = alba_backends[1] # Add preset to all alba_backends (we only use the first two as seen above) for alba_backend in alba_backends[0:2]: cls.LOGGER.info("Adding custom preset to backend {0}".format(alba_backend.name)) preset_result = BackendSetup.add_preset(albabackend_name=alba_backend.name, preset_details=cls.PRESET, timeout=cls.PRESET_CREATE_TIMEOUT) assert preset_result is True, 'Failed to add preset to backend {0}'.format(alba_backend.name) cls.LOGGER.info("Finished adding custom preset to backend {0}".format(alba_backend.name)) # Vpool configs, regressing https://github.com/openvstorage/alba/issues/560 & more vpool_configs = { "no_fragment_cache_on_disk": { "strategy": {"cache_on_read": False, "cache_on_write": False}, "location": "disk" }, "no_fragment_cache_on_accel": { "strategy": {"cache_on_read": False, "cache_on_write": False}, "location": "backend", "backend": { "name": ssd_backend.name, "preset": cls.PRESET['name'] } } } for cfg_name, cfg in vpool_configs.iteritems(): # Create vpool block_cache_cfg = None if SystemHelper.get_ovs_version().lower() == 'ee': block_cache_cfg = cfg for storagerouter_ip in storagerouter_ips: cls.LOGGER.info("Add/extend vPool `{0}` on storagerouter `{1}`".format(cls.VPOOL_NAME, storagerouter_ip)) start = time.time() try: cls._add_vpool(vpool_name=cls.VPOOL_NAME, fragment_cache_cfg=cfg, block_cache_cfg=block_cache_cfg, albabackend_name=hdd_backend.name, timeout=timeout, preset_name=cls.PRESET['name'], storagerouter_ip=storagerouter_ip) except TimeOutError: cls.LOGGER.warning('Adding/extending the vpool has timed out after {0}s. Polling for another {1}s.' .format(timeout, cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING - timeout)) # Lets be a bit forgiving and give the fwk 5 mins to actually complete the task vpool = VPoolHelper.get_vpool_by_name(cls.VPOOL_NAME) while vpool.status != 'RUNNING': if time.time() - start > cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING: raise RuntimeError('The vpool was not added or extended after {0}s'.format(cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING)) cls.LOGGER.warning('Vpool status is still {0} after {1}s.'.format(vpool.status, time.time() - start)) time.sleep(1) vpool.discard() cls.LOGGER.warning('The vpool was added or extended after {0}s.'.format(time.time() - start)) except RuntimeError as ex: cls.LOGGER.error('Adding/extending the vpool has failed with {0}.'.format(str(ex))) raise # Check #proxies vpool = VPoolHelper.get_vpool_by_name(cls.VPOOL_NAME) for storagedriver in vpool.storagedrivers: assert len(storagedriver.alba_proxies) == 2, 'The vpool did not get setup with 2 proxies. Found {} instead.'.format(len(storagedriver.alba_proxies)) # Deploy a vdisk vdisk_name = cls.PREFIX + cfg_name cls.LOGGER.info("Starting to create vdisk `{0}` on vPool `{1}` with size `{2}` on node `{3}`" .format(vdisk_name, cls.VPOOL_NAME, cls.VDISK_SIZE, vdisk_deployment_ip)) VDiskSetup.create_vdisk(vdisk_name=vdisk_name + '.raw', vpool_name=cls.VPOOL_NAME, size=cls.VDISK_SIZE, storagerouter_ip=vdisk_deployment_ip, timeout=cls.VDISK_CREATE_TIMEOUT) cls.LOGGER.info("Finished creating vdisk `{0}`".format(vdisk_name)) cls.LOGGER.info("Starting to delete vdisk `{0}`".format(vdisk_name)) VDiskRemover.remove_vdisk_by_name(vdisk_name, cls.VPOOL_NAME) cls.LOGGER.info("Finished deleting vdisk `{0}`".format(vdisk_name)) # Delete vpool for storagerouter_ip in storagerouter_ips: storagedrivers_to_delete = len(vpool.storagedrivers) cls.LOGGER.info("Deleting vpool `{0}` on storagerouter `{1}`".format(cls.VPOOL_NAME, storagerouter_ip)) try: VPoolRemover.remove_vpool(vpool_name=cls.VPOOL_NAME, storagerouter_ip=storagerouter_ip, timeout=timeout) except TimeOutError: try: vpool.discard() # Discard is needed to update the vpool status as it was running before while vpool.status != 'RUNNING': cls.LOGGER.warning('Removal/shrinking the vpool has timed out after {0}s. Polling for another {1}s.' .format(timeout, cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING - timeout)) if time.time() - start > cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING: raise RuntimeError('The vpool was not removed or extended after {0}s'.format(cls.ADD_EXTEND_REMOVE_VPOOL_TIMEOUT_FORGIVING)) cls.LOGGER.warning('Vpool status is still {0} after {1}s.'.format(vpool.status, time.time() - start)) time.sleep(1) vpool.discard() except ObjectNotFoundException: if storagedrivers_to_delete != 1: # Should be last one raise except RuntimeError as ex: cls.LOGGER.error('Shrinking/removing the vpool has failed with {0}.'.format(str(ex))) raise cls.LOGGER.info('Vpool has been fully removed.') # Delete presets for alba_backend in alba_backends[0:2]: cls.LOGGER.info("Removing custom preset from backend {0}".format(alba_backend.name)) remove_preset_result = BackendRemover.remove_preset(albabackend_name=alba_backend.name, preset_name=cls.PRESET['name'], timeout=cls.PRESET_REMOVE_TIMEOUT) assert remove_preset_result is True, 'Failed to remove preset from backend {0}'.format(alba_backend.name) cls.LOGGER.info("Finshed removing custom preset from backend {0}".format(alba_backend.name)) cls.LOGGER.info("Finished to validate add-extend-remove vpool")
def validate_vdisk_clone(cls, amount_vdisks=AMOUNT_VDISKS, amount_to_write=AMOUNT_TO_WRITE): """ Validate if vdisk deployment works via various ways INFO: 1 vPool should be available on 2 storagerouters :return: """ cls.LOGGER.info("Starting to regress template memleak vdisks") vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" try: vpool = next( (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2)) except StopIteration: assert False, "Not enough Storagedrivers to test" # setup base information storagedriver_source = vpool.storagedrivers[0] client = SSHClient(storagedriver_source.storage_ip, username='******') # create required vdisk for test vdisk_name = VDiskTemplateChecks.PREFIX + '1' assert VDiskSetup.create_vdisk( vdisk_name=vdisk_name + '.raw', vpool_name=vpool.name, size=VDiskTemplateChecks.VDISK_SIZE, storagerouter_ip=storagedriver_source.storagerouter.ip) is not None time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_AFTER_CREATE) ################## # template vdisk # ################## VDiskSetup.set_vdisk_as_template(vdisk_name=vdisk_name + '.raw', vpool_name=vpool.name) time.sleep(VDiskTemplateChecks.TEMPLATE_SLEEP_AFTER_CREATE) ###################### # log current memory # ###################### memory_usage_beginning = StatisticsHelper.get_current_memory_usage( storagedriver_source.storage_ip) cls.LOGGER.info("Starting memory usage monitor: {0}/{1}".format( memory_usage_beginning[0], memory_usage_beginning[1])) pid = int( client.run( "pgrep -a volumedriver | grep {0} | cut -d ' ' -f 1".format( vpool.name), allow_insecure=True)) cls.LOGGER.info( "Starting extended memory monitor on pid {0}: \n{1}".format( pid, StatisticsHelper.get_current_memory_usage_of_process( storagedriver_source.storage_ip, pid))) ################################################################## # create vdisks from template, perform fio and delete them again # ################################################################## for vdisk in xrange(amount_vdisks): # create vdisk from template clone_vdisk_name = vdisk_name + '-template-' + str(vdisk) VDiskSetup.create_from_template( vdisk_name=vdisk_name + '.raw', vpool_name=vpool.name, new_vdisk_name=clone_vdisk_name + '.raw', storagerouter_ip=storagedriver_source.storagerouter.ip) # perform fio test client.run([ "fio", "--name=test", "--filename=/mnt/{0}/{1}.raw".format(vpool.name, clone_vdisk_name), "--ioengine=libaio", "--iodepth=4", "--rw=write", "--bs=4k", "--direct=1", "--size={0}M".format(amount_to_write), "--output-format=json", "--output={0}.json".format(vdisk_name) ]) # delete vdisk time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE) VDiskRemover.remove_vdisk_by_name(vdisk_name=clone_vdisk_name, vpool_name=vpool.name) ################### # remove template # ################### time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE) VDiskRemover.remove_vtemplate_by_name(vdisk_name=vdisk_name, vpool_name=vpool.name) ###################### # log current memory # ###################### memory_usage_ending = StatisticsHelper.get_current_memory_usage( storagedriver_source.storage_ip) cls.LOGGER.info("Finished memory usage monitor: {0}/{1}".format( memory_usage_ending[0], memory_usage_ending[1])) pid = int( client.run( "pgrep -a volumedriver | grep {0} | cut -d ' ' -f 1".format( vpool.name), allow_insecure=True)) cls.LOGGER.info( "Finished extended memory monitor on pid {0}: \n{1}".format( pid, StatisticsHelper.get_current_memory_usage_of_process( storagedriver_source.storage_ip, pid))) cls.LOGGER.info("Finished to regress template memleak vdisks")
def validate_vdisk_clone(cls): """ Validate if vdisk deployment works via various ways INFO: 1 vPool should be available on 2 storagerouters :return: """ cls.LOGGER.info("Starting to validate template vdisks") vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" try: vpool = next( (vpool for vpool in vpools if len(vpool.storagedrivers) >= 2)) except StopIteration: assert False, "Not enough Storagedrivers to test" # setup base information storagedriver_source = vpool.storagedrivers[0] vdisks = [] try: # create required vdisk for test parent_vdisk_name = '{0}_{1}'.format(cls.PREFIX, str(1).zfill(3)) parent_vdisk = VDiskHelper.get_vdisk_by_guid( VDiskSetup.create_vdisk( vdisk_name=parent_vdisk_name, vpool_name=vpool.name, size=cls.VDISK_SIZE, storagerouter_ip=storagedriver_source.storagerouter.ip)) vdisks.append(parent_vdisk) time.sleep(cls.TEMPLATE_SLEEP_AFTER_CREATE) # Create vdisk template # VDiskSetup.set_vdisk_as_template(vdisk_name=parent_vdisk_name, vpool_name=vpool.name) time.sleep(cls.TEMPLATE_SLEEP_AFTER_CREATE) clone_vdisk_name = '{0}_from-template'.format(parent_vdisk_name) clone_vdisk = VDiskHelper.get_vdisk_by_guid( VDiskSetup.create_from_template( vdisk_name=parent_vdisk_name, vpool_name=vpool.name, new_vdisk_name=clone_vdisk_name, storagerouter_ip=storagedriver_source.storagerouter.ip) ['vdisk_guid']) vdisks.append(clone_vdisk) time.sleep(cls.TEMPLATE_SLEEP_BEFORE_DELETE) try: # try to delete template with clones (should fail) # VDiskRemover.remove_vtemplate_by_name( vdisk_name=parent_vdisk_name, vpool_name=vpool.name) error_msg = "Removing vtemplate `{0}` should have failed!" cls.LOGGER.error(error_msg) raise RuntimeError(error_msg) except HttpException: cls.LOGGER.info( "Removing vtemplate `{0}` has failed as expected (because of leftover clones)!" .format(parent_vdisk_name)) finally: while len(vdisks) > 0: vdisk = vdisks.pop() VDiskRemover.remove_vdisk(vdisk.guid) try: # template vdisk from clone (should fail) # parent_vdisk = VDiskHelper.get_vdisk_by_guid( VDiskSetup.create_vdisk( vdisk_name=parent_vdisk_name, vpool_name=vpool.name, size=cls.VDISK_SIZE, storagerouter_ip=storagedriver_source.storagerouter.ip)) vdisks.append(parent_vdisk) # create a clone from the vdisk clone_vdisk_name = '{0}_clone'.format(parent_vdisk_name) cloned_vdisk = VDiskHelper.get_vdisk_by_guid( VDiskSetup.create_clone( vdisk_name=parent_vdisk_name, vpool_name=vpool.name, new_vdisk_name=clone_vdisk_name, storagerouter_ip=storagedriver_source.storagerouter.ip) ['vdisk_guid']) vdisks.append(cloned_vdisk) # try to create a vTemplate from a clone try: VDiskSetup.set_vdisk_as_template(vdisk_name=clone_vdisk_name, vpool_name=vpool.name) error_msg = "Setting vdisk `{0}` as template should have failed!".format( clone_vdisk_name) cls.LOGGER.error(error_msg) raise RuntimeError(error_msg) except RuntimeError: cls.LOGGER.info( "Setting vdisk `{0}` as template failed as expected (because vdisk is clone)!" .format(clone_vdisk_name)) finally: parent_vdisks = [] while len(vdisks) > 0: # Remove clones first vdisk = vdisks.pop() if vdisk.parent_vdisk_guid is None: parent_vdisks.append(vdisk) continue VDiskRemover.remove_vdisk(vdisk.guid) for parent_vdisk in parent_vdisks: VDiskRemover.remove_vdisk(parent_vdisk.guid) cls.LOGGER.info("Finished to validate template vdisks")
def _execute_test(cls): """ Validate if DTL is configured as desired REQUIREMENTS: * 1 vPool should be available with 1 storagedriver * 1 vPool should be available with 2 or more storagedrivers in 2 separate domains OPTIONAL: * 1 vPool with 1 storagedriver with disabled DTL :return: """ cls.LOGGER.info("Starting to validate the basic DTL") ########################## # get deployment details # ########################## vpools = VPoolHelper.get_vpools() assert len(vpools) >= 1, "Not enough vPools to test" # Get a suitable vpools vpool_single_sd = None vpool_multi_sd = None vpool_dtl_disabled = None for vp in VPoolHelper.get_vpools(): if vp.configuration['dtl_mode'] != VPoolHelper.DtlStatus.DISABLED: if len(vp.storagedrivers) == 1 and vpool_single_sd is None: vpool_single_sd = vp cls.LOGGER.info( "vPool `{0}` has been chosen for SINGLE vPool DTL tests" .format(vp.name)) elif len(vp.storagedrivers) >= 2 and vpool_multi_sd is None: vpool_multi_sd = vp cls.LOGGER.info( "vPool `{0}` has been chosen for MULTI vPool DTL tests" .format(vp.name)) else: cls.LOGGER.info( "vPool `{0}` is not suited for tests".format(vp.name)) else: cls.LOGGER.info( "vPool `{0}` with DISABLED DTL is available and will be tested!" .format(vp.name)) vpool_dtl_disabled = vp assert vpool_single_sd is not None, "A vPool should be available with 1 storagedriver" assert vpool_multi_sd is not None, "A vPool should be available with 2 or more storagedrivers" # pick a random storagedriver storagedriver_single = vpool_single_sd.storagedrivers[0] storagedriver_multi = random.choice(vpool_multi_sd.storagedrivers) storagedrivers = [storagedriver_single, storagedriver_multi] # check disabled DTL storagedriver_disabled_dtl = None if vpool_dtl_disabled is not None: storagedriver_disabled_dtl = random.choice( vpool_dtl_disabled.storagedrivers) storagedrivers.append(storagedriver_disabled_dtl) # key = amount of storagedrivers or a_s # value = list with the vpool & storagedriver to test vpools_to_test = { 1: [{ "vpool": vpool_single_sd, "storagedriver": storagedriver_single }], 2: [{ "vpool": vpool_multi_sd, "storagedriver": storagedriver_multi }] } # check if disabled DTL vpool needs to be added if vpool_dtl_disabled is not None: a_s = len(vpool_dtl_disabled.storagedrivers) v_s = { "vpool": vpool_dtl_disabled, "storagedriver": storagedriver_disabled_dtl } if a_s in vpools_to_test: vpools_to_test[a_s].append(v_s) else: vpools_to_test[a_s] = [v_s] ############## # start test # ############## for a_s, vpools in vpools_to_test.iteritems(): start = time.time() for vpool in vpools: cls.LOGGER.info( "Starting DTL test with vPool {0} and {1} storagedrivers". format(vpool['vpool'].name, len(vpool['vpool'].storagedrivers))) vdisk_name = "{0}-{1}-{2}".format( cls.VDISK_NAME, vpool['vpool'].name, str(len(vpool['vpool'].storagedrivers))) try: vdisk_guid = VDiskSetup.create_vdisk( vdisk_name=vdisk_name + '.raw', vpool_name=vpool['vpool'].name, size=cls.SIZE_VDISK, storagerouter_ip=vpool['storagedriver'].storagerouter. ip) # Fetch to validate if it was properly created vdisk = VDiskHelper.get_vdisk_by_guid( vdisk_guid=vdisk_guid) except TimeOutError: cls.LOGGER.error("Creation of the vDisk has timed out.") raise except RuntimeError as ex: cls.LOGGER.info("Creation of vDisk failed: {0}".format(ex)) raise else: ##################################### # check DTL status after deployment # ##################################### correct_msg = "vDisk {0} with {1} storagedriver(s) has correct DTL status: ".format( vdisk_name, a_s) if a_s == 1 and vdisk.dtl_status == VDiskHelper.DtlStatus.STANDALONE: cls.LOGGER.info(correct_msg + vdisk.dtl_status) elif a_s >= 2 and vdisk.dtl_status == VDiskHelper.DtlStatus.SYNC: cls.LOGGER.info(correct_msg + vdisk.dtl_status) elif vdisk.dtl_status == VDiskHelper.DtlStatus.DISABLED and vpool[ 'vpool'].configuration[ 'dtl_mode'] == VPoolHelper.DtlStatus.DISABLED: cls.LOGGER.info( correct_msg + " Note: vdisk DTL is disabled but vPool DTL is also disabled!" ) else: error_msg = "vDisk {0} with {1} storagedriver(s) has WRONG DTL status: {2}".format( vdisk_name, a_s, vdisk.dtl_status) cls.LOGGER.error(error_msg) raise RuntimeError(error_msg) ################################ # try to change the DTL config # ################################ base_config = { "sco_size": 4, "dtl_mode": VPoolHelper.DtlStatus.SYNC, "write_buffer": 512 } if a_s == 1: ######################################################################################## # change config to domain with non existing storagedrivers of this vpool (should fail) # ######################################################################################## cls.LOGGER.info( "Starting test: change config to domain with non existing storagedrivers " "of this vpool (should fail)") base_config['dtl_target'] = [ random.choice([ domain_guid for domain_guid in DomainHelper.get_domain_guids() if domain_guid not in vpool['storagedriver']. storagerouter.regular_domains ]) ] cls.LOGGER.info("Changing dtl_target to: {0}".format( DomainHelper.get_domain_by_guid( domain_guid=base_config['dtl_target'] [0]).name)) try: cls.LOGGER.info(base_config) VDiskSetup.set_config_params( vdisk_name=vdisk_name + '.raw', vpool_name=vpool['vpool'].name, config=base_config) error_msg = "Changing config to a domain with non existing storagedrivers should have failed with vdisk: {0}!".format( vdisk_name) cls.LOGGER.error(error_msg) raise Exception(error_msg) except TimeOutError: cls.LOGGER.error( "Changing config to a same domain with only 1 storagedriver has timed out." ) raise except RuntimeError: cls.LOGGER.info( "Changing config to a domain with non existing storagedrivers has failed as expected!" ) ############################################################################################## # change config to domain where there are other storagedrivers but not of ours (should fail) # ############################################################################################## cls.LOGGER.info( "Starting test: change config to domain where there are other storagedrivers but not of ours (should fail)" ) filtered_domains = list( set(DomainHelper.get_domain_guids()) - set(vpool['storagedriver'].storagerouter. regular_domains)) base_config['dtl_target'] = [filtered_domains[0]] cls.LOGGER.info( "Current vdisk domain location: {0}".format( DomainHelper.get_domain_by_guid( domain_guid=vpool['storagedriver']. storagerouter.regular_domains[0]).name)) cls.LOGGER.info("Changing dtl_target to: {0}".format( DomainHelper.get_domain_by_guid( domain_guid=base_config['dtl_target'] [0]).name)) try: VDiskSetup.set_config_params( vdisk_name=vdisk_name + '.raw', vpool_name=vpool['vpool'].name, config=base_config) error_msg = "Changing config to a same domain with only 1 storagedriver should have failed with vdisk: {0}!".format( vdisk_name) cls.LOGGER.error(error_msg) raise Exception(error_msg) except TimeOutError: cls.LOGGER.error( "Changing config to a domain with non existing storagedrivers has timed out." ) raise except RuntimeError: cls.LOGGER.info( "Changing config to a same domain with only 1 storagedriver has failed as expected!" ) elif a_s >= 2: ####################################################################### # change config to domain with active storagedrivers (should succeed) # ####################################################################### cls.LOGGER.info( "Starting test: change config to domain with active storagedrivers (should succeed)" ) # change current target domain to other target domain current_vdisk_domains = StoragedriverHelper.get_storagedriver_by_id( storagedriver_id=vdisk.storagedriver_id ).storagerouter.regular_domains cls.LOGGER.info( "Currently the vdisk is living in: {0}".format( current_vdisk_domains)) vpool_domains = VPoolHelper.get_domains_by_vpool( vpool_name=vdisk.vpool.name) cls.LOGGER.info( "Currently the vpool {0} is available in: {1}". format(vdisk.vpool.name, vpool_domains)) future_domains = list( set(vpool_domains) - set(current_vdisk_domains)) cls.LOGGER.info( "DTL will be moved to other domain: {0}".format( future_domains)) base_config['dtl_target'] = future_domains # change settings try: VDiskSetup.set_config_params( vdisk_name=vdisk_name + '.raw', vpool_name=vpool['vpool'].name, config=base_config) except TimeOutError: cls.LOGGER.error( "Changing config to a same domain with only 1 storagedriver has timed out." ) raise except RuntimeError: cls.LOGGER.error( "Changing config to a same domain with only 1 storagedriver was unsuccesful!" ) raise cls.LOGGER.info( "Changing config to a same domain with only 1 storagedriver was successful!" ) cls.LOGGER.info("Removing vDisk {0}".format(vdisk.name)) VDiskRemover.remove_vdisk(vdisk_guid=vdisk.guid) cls.LOGGER.info("Finished removing vDisk {0}".format( vdisk.name)) end = time.time() # display run time cls.LOGGER.info("Run testing the DTL took {0} seconds".format( int(end - start))) cls.LOGGER.info("Finished to validate the basic DTL")