def test_storagedriver_config_set(self): """ Validates whether storagedriver configuration is generated as expected """ PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', 3) vpools, storagerouters, storagedrivers, services, mds_services, _ = self._build_service_structure( {'vpools': [1, 2], 'storagerouters': [1, 2, 3, 4, 5, 6], 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 2, 4), (6, 2, 5), (7, 2, 6)], # (<id>, <vpool_id>, <sr_id>) 'mds_services': [(1, 1), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 7)]} # (<id>, <sd_id>) ) vdisks = {} start_id = 1 for mds_service in mds_services.itervalues(): vdisks.update(self._create_vdisks_for_mds_service(10, start_id, mds_service=mds_service)) start_id += 10 mds_services[1].capacity = 11 # on 1, vpool 1 mds_services[1].save() mds_services[2].capacity = 20 # on 1, vpool 1 mds_services[2].save() mds_services[3].capacity = 12 # on 2, vpool 1 mds_services[3].save() mds_services[4].capacity = 14 # on 3, vpool 1 mds_services[4].save() mds_services[5].capacity = 16 # on 4, vpool 1 mds_services[5].save() mds_services[6].capacity = 11 # on 4, vpool 2 mds_services[6].save() mds_services[7].capacity = 13 # on 5, vpool 2 mds_services[7].save() mds_services[8].capacity = 19 # on 6, vpool 2 mds_services[8].save() mds_services[9].capacity = 15 # on 6, vpool 2 mds_services[9].save() config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1]) expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 2}, {'host': '10.0.0.4', 'port': 5}, {'host': '10.0.0.3', 'port': 4}], storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3}, {'host': '10.0.0.1', 'port': 2}, {'host': '10.0.0.4', 'port': 5}], storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4}, {'host': '10.0.0.1', 'port': 2}, {'host': '10.0.0.4', 'port': 5}], storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5}, {'host': '10.0.0.1', 'port': 2}, {'host': '10.0.0.3', 'port': 4}]} self.assertDictEqual(config, expected, 'Test 1. Got:\n{0}'.format(json.dumps(config, indent=2))) mds_services[2].capacity = 10 # on 1, vpool 1 mds_services[2].save() config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1]) expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 1}, {'host': '10.0.0.4', 'port': 5}, {'host': '10.0.0.3', 'port': 4}], storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3}, {'host': '10.0.0.4', 'port': 5}, {'host': '10.0.0.3', 'port': 4}], storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4}, {'host': '10.0.0.4', 'port': 5}, {'host': '10.0.0.2', 'port': 3}], storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5}, {'host': '10.0.0.3', 'port': 4}, {'host': '10.0.0.2', 'port': 3}]} self.assertDictEqual(config, expected, 'Test 2. Got:\n{0}'.format(json.dumps(config, indent=2)))
def add_vpool(cls, parameters): """ Add a vPool to the machine this task is running on :param parameters: Parameters for vPool creation :type parameters: dict :return: None :rtype: NoneType """ # TODO: Add logging cls._logger.debug('Adding vpool. Parameters: {}'.format(parameters)) # VALIDATIONS if not isinstance(parameters, dict): raise ValueError( 'Parameters passed to create a vPool should be of type dict') # Check StorageRouter existence storagerouter = StorageRouterList.get_by_ip( ip=parameters.get('storagerouter_ip')) if storagerouter is None: raise RuntimeError('Could not find StorageRouter') # Validate requested vPool configurations vp_installer = VPoolInstaller(name=parameters.get('vpool_name')) vp_installer.validate(storagerouter=storagerouter) # Validate requested StorageDriver configurations cls._logger.info( 'vPool {0}: Validating StorageDriver configurations'.format( vp_installer.name)) sd_installer = StorageDriverInstaller( vp_installer=vp_installer, configurations={ 'storage_ip': parameters.get('storage_ip'), 'caching_info': parameters.get('caching_info'), 'backend_info': { 'main': parameters.get('backend_info'), StorageDriverConfiguration.CACHE_BLOCK: parameters.get('backend_info_bc'), StorageDriverConfiguration.CACHE_FRAGMENT: parameters.get('backend_info_fc') }, 'connection_info': { 'main': parameters.get('connection_info'), StorageDriverConfiguration.CACHE_BLOCK: parameters.get('connection_info_bc'), StorageDriverConfiguration.CACHE_FRAGMENT: parameters.get('connection_info_fc') }, 'sd_configuration': parameters.get('config_params') }) partitions_mutex = volatile_mutex('add_vpool_partitions_{0}'.format( storagerouter.guid)) try: # VPOOL CREATION # Create the vPool as soon as possible in the process to be displayed in the GUI (INSTALLING/EXTENDING state) if vp_installer.is_new is True: vp_installer.create(rdma_enabled=sd_installer.rdma_enabled) vp_installer.configure_mds( config=parameters.get('mds_config_params', {})) else: vp_installer.update_status(status=VPool.STATUSES.EXTENDING) # ADDITIONAL VALIDATIONS # Check StorageRouter connectivity cls._logger.info( 'vPool {0}: Validating StorageRouter connectivity'.format( vp_installer.name)) linked_storagerouters = [storagerouter] if vp_installer.is_new is False: linked_storagerouters += [ sd.storagerouter for sd in vp_installer.vpool.storagedrivers ] sr_client_map = SSHClient.get_clients( endpoints=linked_storagerouters, user_names=['ovs', 'root']) offline_nodes = sr_client_map.pop('offline') if storagerouter in offline_nodes: raise RuntimeError( 'Node on which the vPool is being {0} is not reachable'. format('created' if vp_installer.is_new is True else 'extended')) sr_installer = StorageRouterInstaller( root_client=sr_client_map[storagerouter]['root'], sd_installer=sd_installer, vp_installer=vp_installer, storagerouter=storagerouter) # When 2 or more jobs simultaneously run on the same StorageRouter, we need to check and create the StorageDriver partitions in locked context partitions_mutex.acquire(wait=60) sr_installer.partition_info = StorageRouterController.get_partition_info( storagerouter_guid=storagerouter.guid) sr_installer.validate_vpool_extendable() sr_installer.validate_global_write_buffer( requested_size=parameters.get('writecache_size', 0)) sr_installer.validate_local_cache_size( requested_proxies=parameters.get('parallelism', {}).get( 'proxies', 2)) # MODEL STORAGEDRIVER AND PARTITION JUNCTIONS sd_installer.create() sd_installer.create_partitions() partitions_mutex.release() vp_installer.refresh_metadata() except Exception: cls._logger.exception( 'Something went wrong during the validation or modeling of vPool {0} on StorageRouter {1}' .format(vp_installer.name, storagerouter.name)) partitions_mutex.release() vp_installer.revert_vpool(status=VPool.STATUSES.RUNNING) raise # Arakoon setup counter = 0 while counter < 300: try: if StorageDriverController.manual_voldrv_arakoon_checkup( ) is True: break except Exception: cls._logger.exception( 'Arakoon checkup for voldrv cluster failed') vp_installer.revert_vpool(status=VPool.STATUSES.RUNNING) raise counter += 1 time.sleep(1) if counter == 300: vp_installer.revert_vpool(status=VPool.STATUSES.RUNNING) raise RuntimeError( 'Arakoon checkup for the StorageDriver cluster could not be started' ) # Cluster registry try: vp_installer.configure_cluster_registry(allow_raise=True) except Exception: if vp_installer.is_new is True: vp_installer.revert_vpool(status=VPool.STATUSES.RUNNING) else: vp_installer.revert_vpool(status=VPool.STATUSES.FAILURE) raise try: sd_installer.setup_proxy_configs() sd_installer.configure_storagedriver_service() DiskController.sync_with_reality(storagerouter.guid) MDSServiceController.prepare_mds_service( storagerouter=storagerouter, vpool=vp_installer.vpool) # Update the MDS safety if changed via API (vpool.configuration will be available at this point also for the newly added StorageDriver) vp_installer.vpool.invalidate_dynamics('configuration') if vp_installer.mds_safety is not None and vp_installer.vpool.configuration[ 'mds_config']['mds_safety'] != vp_installer.mds_safety: Configuration.set( key='/ovs/vpools/{0}/mds_config|mds_safety'.format( vp_installer.vpool.guid), value=vp_installer.mds_safety) sd_installer.start_services( ) # Create and start watcher volumedriver, DTL, proxies and StorageDriver services # Post creation/extension checkups mds_config_set = MDSServiceController.get_mds_storagedriver_config_set( vpool=vp_installer.vpool, offline_nodes=offline_nodes) for sr, clients in sr_client_map.iteritems(): for current_storagedriver in [ sd for sd in sr.storagedrivers if sd.vpool_guid == vp_installer.vpool.guid ]: storagedriver_config = StorageDriverConfiguration( vpool_guid=vp_installer.vpool.guid, storagedriver_id=current_storagedriver.storagedriver_id ) if storagedriver_config.config_missing is False: # Filesystem section in StorageDriver configuration are all parameters used for vDisks created directly on the filesystem # So when a vDisk gets created on the filesystem, these MDSes will be assigned to them storagedriver_config.configure_filesystem( fs_metadata_backend_mds_nodes=mds_config_set[ sr.guid]) storagedriver_config.save(client=clients['ovs']) # Everything's reconfigured, refresh new cluster configuration for current_storagedriver in vp_installer.vpool.storagedrivers: if current_storagedriver.storagerouter not in sr_client_map: continue vp_installer.vpool.storagedriver_client.update_cluster_node_configs( str(current_storagedriver.storagedriver_id), req_timeout_secs=10) except Exception: cls._logger.exception('vPool {0}: Creation failed'.format( vp_installer.name)) vp_installer.update_status(status=VPool.STATUSES.FAILURE) raise # When a node is offline, we can run into errors, but also when 1 or more volumes are not running # Scheduled tasks below, so don't really care whether they succeed or not try: VDiskController.dtl_checkup(vpool_guid=vp_installer.vpool.guid, ensure_single_timeout=600) except: pass for vdisk in vp_installer.vpool.vdisks: try: MDSServiceController.ensure_safety(vdisk_guid=vdisk.guid) except: pass vp_installer.update_status(status=VPool.STATUSES.RUNNING) cls._logger.info('Add vPool {0} ended successfully'.format( vp_installer.name))
def test_storagedriver_config_set(self): """ Validates whether storagedriver configuration is generated as expected """ PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', 3) vpools, storagerouters, storagedrivers, services, mds_services, _ = self._build_service_structure( { 'vpools': [1, 2], 'storagerouters': [1, 2, 3, 4, 5, 6], 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 2, 4), (6, 2, 5), (7, 2, 6)], # (<id>, <vpool_id>, <sr_id>) 'mds_services': [(1, 1), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 7)] } # (<id>, <sd_id>) ) vdisks = {} start_id = 1 for mds_service in mds_services.itervalues(): vdisks.update( self._create_vdisks_for_mds_service(10, start_id, mds_service=mds_service)) start_id += 10 mds_services[1].capacity = 11 # on 1, vpool 1 mds_services[1].save() mds_services[2].capacity = 20 # on 1, vpool 1 mds_services[2].save() mds_services[3].capacity = 12 # on 2, vpool 1 mds_services[3].save() mds_services[4].capacity = 14 # on 3, vpool 1 mds_services[4].save() mds_services[5].capacity = 16 # on 4, vpool 1 mds_services[5].save() mds_services[6].capacity = 11 # on 4, vpool 2 mds_services[6].save() mds_services[7].capacity = 13 # on 5, vpool 2 mds_services[7].save() mds_services[8].capacity = 19 # on 6, vpool 2 mds_services[8].save() mds_services[9].capacity = 15 # on 6, vpool 2 mds_services[9].save() config = MDSServiceController.get_mds_storagedriver_config_set( vpools[1]) expected = { storagerouters[1].guid: [{ 'host': '10.0.0.1', 'port': 2 }, { 'host': '10.0.0.4', 'port': 5 }, { 'host': '10.0.0.3', 'port': 4 }], storagerouters[2].guid: [{ 'host': '10.0.0.2', 'port': 3 }, { 'host': '10.0.0.1', 'port': 2 }, { 'host': '10.0.0.4', 'port': 5 }], storagerouters[3].guid: [{ 'host': '10.0.0.3', 'port': 4 }, { 'host': '10.0.0.1', 'port': 2 }, { 'host': '10.0.0.4', 'port': 5 }], storagerouters[4].guid: [{ 'host': '10.0.0.4', 'port': 5 }, { 'host': '10.0.0.1', 'port': 2 }, { 'host': '10.0.0.3', 'port': 4 }] } self.assertDictEqual( config, expected, 'Test 1. Got:\n{0}'.format(json.dumps(config, indent=2))) mds_services[2].capacity = 10 # on 1, vpool 1 mds_services[2].save() config = MDSServiceController.get_mds_storagedriver_config_set( vpools[1]) expected = { storagerouters[1].guid: [{ 'host': '10.0.0.1', 'port': 1 }, { 'host': '10.0.0.4', 'port': 5 }, { 'host': '10.0.0.3', 'port': 4 }], storagerouters[2].guid: [{ 'host': '10.0.0.2', 'port': 3 }, { 'host': '10.0.0.4', 'port': 5 }, { 'host': '10.0.0.3', 'port': 4 }], storagerouters[3].guid: [{ 'host': '10.0.0.3', 'port': 4 }, { 'host': '10.0.0.4', 'port': 5 }, { 'host': '10.0.0.2', 'port': 3 }], storagerouters[4].guid: [{ 'host': '10.0.0.4', 'port': 5 }, { 'host': '10.0.0.3', 'port': 4 }, { 'host': '10.0.0.2', 'port': 3 }] } self.assertDictEqual( config, expected, 'Test 2. Got:\n{0}'.format(json.dumps(config, indent=2)))