def wrapped(*args, **kwargs): if lock_type == 'local': _mutex = file_mutex(key) elif lock_type == 'cluster': _mutex = volatile_mutex(key) else: raise ValueError( 'Lock type {0} is not supported!'.format(lock_type)) try: _mutex.acquire(wait=0.005) local_sr = System.get_my_storagerouter() CacheHelper.set(key=key, item={ 'ip': local_sr.ip, 'hostname': local_sr.name }, expire_time=60) return func(*args, **kwargs) except (NoFileLockAvailableException, NoVolatileLockAvailableException): if callback is None: return else: executor_info = None start = time.time() while executor_info is None: # Calculated guesswork. If a callback function would be expected, the acquire has happened for another executor the volatilekey should be set eventually # However by setting it after the acquire, the callback executor and original method executor can race between fetch and set # A better implementation would be relying on the fwk ensure_single_decorator as they check for various races themselves # This is just a poor mans, temporary implementation if start - time.time() > 5: raise ValueError( 'Timed out after 5 seconds while fetching the information about the executor.' ) try: executor_info = CacheHelper.get(key=key) except: pass callback_func = callback.__func__ if isinstance( callback, staticmethod) else callback argnames = inspect.getargspec(callback_func)[0] arguments = list(args) kwargs.update({'test_name': func.__name__}) if executor_info is not None: kwargs.update(executor_info) if 'result_handler' in argnames: result_handler = kwargs.get('result_handler') for index, arg in enumerate(arguments): if isinstance(arg, HCResults.HCResultCollector): result_handler = arguments.pop(index) break if result_handler is None: raise TypeError( 'Expected an instance of {}'.format( type(HCResults.HCResultCollector))) kwargs['result_handler'] = result_handler return callback_func(*tuple(arguments), **kwargs) finally: _mutex.release()
def asd_add(slot_id): # type: (str) -> None """ Add an ASD to the slot specified :param slot_id: Identifier of the slot :type slot_id: str :return: None :rtype: NoneType """ disk = DiskList.get_by_alias(slot_id) if disk.available is True: with file_mutex('add_disk'), file_mutex( 'disk_{0}'.format(slot_id)): DiskController.prepare_disk(disk=disk) disk = Disk(disk.id) with file_mutex('add_asd'): ASDController.create_asd(disk)
def update_execute_migration_code(): # type: () -> None """ Run some migration code after an update has been done :return: None :rtype: NoneType """ with file_mutex('post_update'): SDMUpdateController.execute_migration_code()
def update(package_name): # type: (str) -> None """ Install the specified package :param package_name: Name of the package to update :type package_name: str :return: None :rtype: NoneType """ with file_mutex('package_update'): SDMUpdateController.update(package_name=package_name)
def restart_services(request_data): # type: (dict) -> None """ Restart services :param request_data: Data about the request (given by the decorator) :type request_data: dict :return: None :rtype: NoneType """ with file_mutex('package_update'): SDMUpdateController.restart_services(service_names=json.loads( request_data.get('service_names', [])))
def get_package_information_new(): # type: () -> dict """ Retrieve update information This call is used by the new framework code (as off 30 Nov 2016) In case framework has new code, but SDM runs old code, the asdmanager.py plugin will adjust the old format to the new format :return: Installed and candidate for install version for all SDM related packages :rtype: dict """ with file_mutex('package_update'): API._logger.info('Locking in place for package update') return SDMUpdateController.get_package_information()
def lock(self, name, wait=None, expiration=60): """ Returns the file mutex implementation :param name: Name to give to the lock :type name: str :param wait: Wait time for the lock (in seconds) :type wait: float :param expiration: Expiration time for the lock (in seconds) :type expiration: float :return: The lock implementation :rtype: ArakoonConfigurationLock """ return file_mutex(name, wait)
def add_fstab(cls, partition_aliases, mountpoint, filesystem): """ Add entry to /etc/fstab for mountpoint :param partition_aliases: Possible aliases of the partition to add :type partition_aliases: list :param mountpoint: Mountpoint on which device is mounted :type mountpoint: str :param filesystem: Filesystem used :type filesystem: str :return: None """ if len(partition_aliases) == 0: raise ValueError('No partition aliases provided') with open('/etc/fstab', 'r') as fstab_file: lines = [line.strip() for line in fstab_file.readlines()] osmanager = cls._get_os_manager() used_path = None used_index = None mount_line = None for device_alias in partition_aliases: for index, line in enumerate(lines): if line.startswith('#'): continue if line.startswith(device_alias) and re.match( '^{0}\s+'.format(re.escape(device_alias)), line): used_path = device_alias used_index = index if len(line.split()) == 6 and line.split( )[1] == mountpoint: # Example line: 'UUID=40d99523-a1e7-4374-84f2-85b5d14b516e / swap sw 0 0' mount_line = line if used_path is not None: break if used_path is None: # Partition not yet present with any of its possible aliases lines.append( osmanager.get_fstab_entry(partition_aliases[0], mountpoint, filesystem)) else: # Partition present, update information lines.pop(used_index) lines.insert( used_index, osmanager.get_fstab_entry(used_path, mountpoint, filesystem)) if mount_line is not None: # Mount point already in use by another device (potentially same device, but other device_path) lines.remove(mount_line) with file_mutex('ovs-fstab-lock'): with open('/etc/fstab', 'w') as fstab_file: fstab_file.write('{0}\n'.format('\n'.join(lines)))
def new_function(*args, **kw): """ Executes the decorated function in a locked context """ filemutex = file_mutex('messaging') try: filemutex.acquire(wait=60) mutex = volatile_mutex('messaging') try: mutex.acquire(wait=60) return f(*args, **kw) finally: mutex.release() finally: filemutex.release()
def execute_update(status): # type: (str) -> str """ This call is required when framework has old code and SDM has been updated (as off 30 Nov 2016) Old code tries to initiate update providing a status, while new code no longer requires this status :param status: Unused :type status: str :return: The status of the ongoing update :rtype: str """ _ = status with file_mutex('package_update'): SDMUpdateController.update(package_name='alba') SDMUpdateController.update(package_name='openvstorage-sdm') return 'done'
def wrapped(*args, **kwargs): if lock_type == 'local': _mutex = file_mutex(key) elif lock_type == 'cluster': _mutex = volatile_mutex(key) else: raise ValueError('Lock type {0} is not supported!'.format(lock_type)) try: _mutex.acquire(wait=0.005) local_sr = System.get_my_storagerouter() CacheHelper.set(key=key, item={'ip': local_sr.ip, 'hostname': local_sr.name}, expire_time=60) return func(*args, **kwargs) except (NoFileLockAvailableException, NoVolatileLockAvailableException): if callback is None: return else: executor_info = None start = time.time() while executor_info is None: # Calculated guesswork. If a callback function would be expected, the acquire has happened for another executor the volatilekey should be set eventually # However by setting it after the acquire, the callback executor and original method executor can race between fetch and set # A better implementation would be relying on the fwk ensure_single_decorator as they check for various races themselves # This is just a poor mans, temporary implementation if start - time.time() > 5: raise ValueError('Timed out after 5 seconds while fetching the information about the executor.') try: executor_info = CacheHelper.get(key=key) except: pass callback_func = callback.__func__ if isinstance(callback, staticmethod) else callback argnames = inspect.getargspec(callback_func)[0] arguments = list(args) kwargs.update({'test_name': func.__name__}) if executor_info is not None: kwargs.update(executor_info) if 'result_handler' in argnames: result_handler = kwargs.get('result_handler') for index, arg in enumerate(arguments): if isinstance(arg, HCResults.HCResultCollector): result_handler = arguments.pop(index) break if result_handler is None: raise TypeError('Expected an instance of {0}'.format(HCResults.HCResultCollector)) kwargs['result_handler'] = result_handler return callback_func(**kwargs) finally: _mutex.release()
def slot_restart(slot_id): # type: (str) -> None """ Restart a slot :param slot_id: Identifier of the slot (eg: 'pci-0000:03:00.0-sas-0x5000c29f4cf04566-lun-0') :type slot_id: str :return: None """ disk = DiskList.get_by_alias(slot_id) with file_mutex('slot_{0}'.format(slot_id)): API._logger.info( 'Got lock for restarting slot {0}'.format(slot_id)) for asd in disk.asds: ASDController.stop_asd(asd=asd) DiskController.remount_disk(disk=disk) for asd in disk.asds: ASDController.start_asd(asd=asd)
def update_registration(cls, node_identifier): """ Register the update for the node Falls back to a file mutex when the persistent store is not implemented :param node_identifier: Identifier of the node :type node_identifier: str :raises: UpdateInProgressException if an update is already in progress for this component """ registration_key = cls.get_registration_key() try: persistent = cls.get_persistent_client() try: transaction = persistent.begin_transaction() # Simple lock based on key name persistent.assert_value(registration_key, None, transaction) persistent.set(registration_key, node_identifier, transaction) try: persistent.apply_transaction(transaction) except AssertException: identifier = persistent.get(registration_key) raise UpdateInProgressException('An update is already in progress for component {} with identifier {}'.format(cls.COMPONENT, identifier)) # Start code execution cls.logger.info("Got a hold of the lock") yield finally: # End lock transaction = persistent.begin_transaction() persistent.assert_value(registration_key, node_identifier, transaction) persistent.delete(registration_key, False, transaction) try: persistent.apply_transaction(transaction) except AssertException: # Something overwrote our value pass except NotImplementedError: # Fallback to file mutex cls.logger.warning("Falling back to a file lock. No distributed lock available") mutex = file_mutex(registration_key) try: mutex.acquire() yield finally: mutex.release()
def _add_hypervisor(hypervisor_credentials): ip = hypervisor_credentials.ip username = hypervisor_credentials.user password = hypervisor_credentials.password hvtype = hypervisor_credentials.type mutex = file_mutex('hypervisor_{0}'.format(hash(hypervisor_credentials))) try: mutex.acquire(30) if hypervisor_credentials not in HypervisorFactory.hypervisors: if hvtype == 'VMWARE': # Not yet tested. Needs to be rewritten raise NotImplementedError("{0} has not yet been implemented".format(hvtype)) from .hypervisors.vmware import VMware hypervisor = VMware(ip, username, password) elif hvtype == 'KVM': from .hypervisors.kvm import KVM hypervisor = KVM(ip, username, password) else: raise NotImplementedError('Hypervisor {0} is not yet supported'.format(hvtype)) HypervisorFactory.hypervisors[hypervisor_credentials] = hypervisor return hypervisor finally: mutex.release()
def get_package_information_old(): # type: () -> dict """ Retrieve update information This call is required when framework has old code and SDM has been updated (as off 30 Nov 2016) Old code tries to call /update/information and expects data formatted in the old style :return: Installed and candidate for install version for all SDM related packages :rtype: dict """ return_value = {'version': '', 'installed': ''} with file_mutex('package_update'): API._logger.info('Locking in place for package update') update_info = SDMUpdateController.get_package_information().get( 'alba', {}) if 'openvstorage-sdm' in update_info: return_value['version'] = update_info['openvstorage-sdm'][ 'candidate'] return_value['installed'] = update_info['openvstorage-sdm'][ 'installed'] elif 'alba' in update_info: return_value['version'] = update_info['alba']['candidate'] return_value['installed'] = update_info['alba']['installed'] return return_value
def clear_slot(slot_id): # type: (str) -> None """ Clears a slot :param slot_id: Identifier of the slot :type slot_id: str :return: None :rtype: NoneType """ try: disk = DiskList.get_by_alias(slot_id) except ObjectNotFoundException: API._logger.warning( 'Disk with ID {0} is no longer present (or cannot be managed)'. format(slot_id)) return None if disk.available is True: raise HttpNotAcceptableException( error='disk_not_configured', error_description='Disk not yet configured') with file_mutex('disk_{0}'.format(slot_id)): last_exception = None for asd in disk.asds: try: ASDController.remove_asd(asd=asd) except Exception as ex: last_exception = ex disk = Disk(disk.id) if len(disk.asds) == 0: DiskController.clean_disk(disk=disk) elif last_exception is not None: raise last_exception else: raise RuntimeError( 'Still some ASDs configured on Disk {0}'.format(slot_id))
def migrate(cls): # type: () -> None """ Execute the migration logic. :return: None :rtype: NoneType """ with file_mutex('package_update_pu'): local_client = SSHClient(endpoint='127.0.0.1', username='******') # Override the created openvstorage_sdm_id during package install, with currently available SDM ID if local_client.file_exists(BOOTSTRAP_FILE): with open(BOOTSTRAP_FILE) as bstr_file: node_id = json.load(bstr_file)['node_id'] local_client.file_write(filename='/etc/openvstorage_sdm_id', contents=node_id + '\n') else: with open('/etc/openvstorage_sdm_id', 'r') as id_file: node_id = id_file.read().strip() key = '{0}/versions'.format( ASD_NODE_CONFIG_LOCATION.format(node_id)) version = Configuration.get(key) if Configuration.exists( key) else 0 asd_manager_service_name = 'asd-manager' if cls.service_manager.has_service( asd_manager_service_name, local_client) and cls.service_manager.get_service_status( asd_manager_service_name, local_client) == 'active': cls.logger.info('Stopping asd-manager service') cls.service_manager.stop_service(asd_manager_service_name, local_client) # @TODO: Move these migrations to alba_node.client.update_execute_migration_code() if version < cls.CURRENT_VERSION: try: # DB migrations from source.controllers.asd import ASDController from source.controllers.disk import DiskController from source.dal.asdbase import ASDBase from source.dal.lists.asdlist import ASDList from source.dal.lists.disklist import DiskList from source.dal.objects.asd import ASD if not local_client.file_exists('{0}/main.db'.format( ASDBase.DATABASE_FOLDER)): local_client.dir_create([ASDBase.DATABASE_FOLDER]) asd_map = dict( (asd.asd_id, asd) for asd in ASDList.get_asds()) DiskController.sync_disks() for disk in DiskList.get_usable_disks(): if disk.state == 'MISSING' or disk.mountpoint is None: continue for asd_id in local_client.dir_list(disk.mountpoint): if asd_id in asd_map: asd = asd_map[asd_id] else: asd = ASD() asd.disk = disk asd.asd_id = asd_id asd.folder = asd_id if asd.has_config: if asd.port is None or asd.hosts is None: config = Configuration.get( key=asd.config_key) asd.port = config['port'] asd.hosts = config.get('ips', []) asd.save() # Adjustment of open file descriptors for ASD/maintenance services to 8192 asd_service_names = list(ASDController.list_asd_services()) maintenance_service_names = list( MaintenanceController.get_services()) for service_name in asd_service_names + maintenance_service_names: if cls.service_manager.has_service( name=service_name, client=local_client): if cls.service_manager.__class__ == Systemd: path = '/lib/systemd/system/{0}.service'.format( service_name) check = 'LimitNOFILE=8192' else: path = '/etc/init/{0}.conf'.format( service_name) check = 'limit nofile 8192 8192' restart_required = False if os.path.exists(path): with open(path, 'r') as system_file: if check not in system_file.read(): restart_required = True if restart_required is False: continue configuration_key = ServiceFactory.SERVICE_CONFIG_KEY.format( node_id, service_name) if Configuration.exists(configuration_key): # Rewrite the service file cls.service_manager.add_service( name=ASDController.ASD_PREFIX if service_name in asd_service_names else MaintenanceController.MAINTENANCE_PREFIX, client=local_client, params=Configuration.get( configuration_key), target_name=service_name) # Let the update know that the ASD / maintenance services need to be restarted # Inside `if Configuration.exists`, because useless to rapport restart if we haven't rewritten service file ExtensionsToolbox.edit_version_file( client=local_client, package_name='alba', old_run_file='{0}/{1}.version'.format( ServiceFactory.RUN_FILE_DIR, service_name)) if cls.service_manager.__class__ == Systemd: local_client.run(['systemctl', 'daemon-reload']) # Version 3: Addition of 'ExecReload' for ASD/maintenance SystemD services if cls.service_manager.__class__ == Systemd: # Upstart does not have functionality to reload a process' configuration reload_daemon = False asd_service_names = list( ASDController.list_asd_services()) maintenance_service_names = list( MaintenanceController.get_services()) for service_name in asd_service_names + maintenance_service_names: if not cls.service_manager.has_service( name=service_name, client=local_client): continue path = '/lib/systemd/system/{0}.service'.format( service_name) if os.path.exists(path): with open(path, 'r') as system_file: if 'ExecReload' not in system_file.read(): reload_daemon = True configuration_key = ServiceFactory.SERVICE_CONFIG_KEY.format( node_id, service_name) if Configuration.exists( configuration_key): # No need to edit the service version file, since this change only requires a daemon-reload cls.service_manager.add_service( name=ASDController.ASD_PREFIX if service_name in asd_service_names else MaintenanceController. MAINTENANCE_PREFIX, client=local_client, params=Configuration.get( configuration_key), target_name=service_name) if reload_daemon is True: local_client.run(['systemctl', 'daemon-reload']) # Version 6: Introduction of Active Drive all_local_ips = OSFactory.get_manager().get_ip_addresses( client=local_client) for asd in ASDList.get_asds(): if asd.has_config: asd_config = Configuration.get(asd.config_key) if 'multicast' not in asd_config: asd_config['multicast'] = None if 'ips' in asd_config: asd_ips = asd_config['ips'] or all_local_ips else: asd_ips = all_local_ips asd.hosts = asd_ips asd_config['ips'] = asd_ips Configuration.set(asd.config_key, asd_config) asd.save() # Version 7: Moving flask certificate files to config dir for file_name in [ 'passphrase', 'server.crt', 'server.csr', 'server.key' ]: if local_client.file_exists( '/opt/asd-manager/source/{0}'.format( file_name)): local_client.file_move( source_file_name='/opt/asd-manager/source/{0}'. format(file_name), destination_file_name= '/opt/asd-manager/config/{0}'.format( file_name)) except: cls.logger.exception( 'Error while executing post-update code on node {0}'. format(node_id)) Configuration.set(key, cls.CURRENT_VERSION) if cls.service_manager.has_service( asd_manager_service_name, local_client) and cls.service_manager.get_service_status( asd_manager_service_name, local_client) != 'active': cls.logger.info('Starting asd-manager service') cls.service_manager.start_service(asd_manager_service_name, local_client) cls.logger.info('Post-update logic executed')
def _deploy_stack_and_scrub(queue, vpool, scrub_info, error_messages): """ Executes scrub work for a given vDisk queue and vPool, based on scrub_info :param queue: a Queue with vDisk guids that need to be scrubbed (they should only be member of a single vPool) :type queue: Queue :param vpool: the vPool object of the vDisks :type vpool: VPool :param scrub_info: A dict containing scrub information: `scrub_path` with the path where to scrub `storage_router` with the StorageRouter that needs to do the work :type scrub_info: dict :param error_messages: A list of error messages to be filled (by reference) :type error_messages: list :return: None :rtype: NoneType """ if len(vpool.storagedrivers ) == 0 or not vpool.storagedrivers[0].storagedriver_id: error_messages.append( 'vPool {0} does not have any valid StorageDrivers configured'. format(vpool.name)) return service_manager = ServiceFactory.get_manager() client = None lock_time = 5 * 60 storagerouter = scrub_info['storage_router'] partition_guid = scrub_info['partition_guid'] alba_proxy_service = 'ovs-albaproxy_{0}_{1}_{2}_scrub'.format( vpool.name, storagerouter.name, partition_guid) scrub_directory = '{0}/scrub_work_{1}_{2}'.format( scrub_info['scrub_path'], vpool.name, partition_guid) scrub_config_key = 'ovs/vpools/{0}/proxies/scrub/scrub_config_{1}'.format( vpool.guid, partition_guid) backend_config_key = 'ovs/vpools/{0}/proxies/scrub/backend_config_{1}'.format( vpool.guid, partition_guid) # Deploy a proxy try: with file_mutex(name='ovs_albaproxy_scrub', wait=lock_time): GenericController._logger.info( 'Scrubber - vPool {0} - StorageRouter {1} - Deploying ALBA proxy {2}' .format(vpool.name, storagerouter.name, alba_proxy_service)) client = SSHClient(storagerouter, 'root') client.dir_create(scrub_directory) client.dir_chmod( scrub_directory, 0777 ) # Celery task executed by 'ovs' user and should be able to write in it if service_manager.has_service( name=alba_proxy_service, client=client ) is True and service_manager.get_service_status( name=alba_proxy_service, client=client) == 'active': GenericController._logger.info( 'Scrubber - vPool {0} - StorageRouter {1} - Re-using existing proxy service {2}' .format(vpool.name, storagerouter.name, alba_proxy_service)) scrub_config = Configuration.get(scrub_config_key) else: machine_id = System.get_my_machine_id(client) port_range = Configuration.get( '/ovs/framework/hosts/{0}/ports|storagedriver'.format( machine_id)) with volatile_mutex('deploy_proxy_for_scrub_{0}'.format( storagerouter.guid), wait=30): port = System.get_free_ports(selected_range=port_range, nr=1, client=client)[0] scrub_config = Configuration.get( 'ovs/vpools/{0}/proxies/scrub/generic_scrub'.format( vpool.guid)) scrub_config['port'] = port scrub_config['transport'] = 'tcp' Configuration.set(scrub_config_key, json.dumps(scrub_config, indent=4), raw=True) params = { 'VPOOL_NAME': vpool.name, 'LOG_SINK': LogHandler.get_sink_path(alba_proxy_service), 'CONFIG_PATH': Configuration.get_configuration_path(scrub_config_key) } service_manager.add_service(name='ovs-albaproxy', params=params, client=client, target_name=alba_proxy_service) service_manager.start_service(name=alba_proxy_service, client=client) GenericController._logger.info( 'Scrubber - vPool {0} - StorageRouter {1} - Deployed ALBA proxy {2}' .format(vpool.name, storagerouter.name, alba_proxy_service)) backend_config = Configuration.get( 'ovs/vpools/{0}/hosts/{1}/config'.format( vpool.guid, vpool.storagedrivers[0].storagedriver_id ))['backend_connection_manager'] if backend_config.get('backend_type') != 'MULTI': backend_config['alba_connection_host'] = '127.0.0.1' backend_config['alba_connection_port'] = scrub_config[ 'port'] else: for value in backend_config.itervalues(): if isinstance(value, dict): value['alba_connection_host'] = '127.0.0.1' value['alba_connection_port'] = scrub_config[ 'port'] # Copy backend connection manager information in separate key Configuration.set( backend_config_key, json.dumps({"backend_connection_manager": backend_config}, indent=4), raw=True) except Exception: message = 'Scrubber - vPool {0} - StorageRouter {1} - An error occurred deploying ALBA proxy {2}'.format( vpool.name, storagerouter.name, alba_proxy_service) error_messages.append(message) GenericController._logger.exception(message) if client is not None and service_manager.has_service( name=alba_proxy_service, client=client) is True: if service_manager.get_service_status( name=alba_proxy_service, client=client) == 'active': service_manager.stop_service(name=alba_proxy_service, client=client) service_manager.remove_service(name=alba_proxy_service, client=client) if Configuration.exists(scrub_config_key): Configuration.delete(scrub_config_key) # Execute the actual scrubbing threads = [] threads_key = '/ovs/framework/hosts/{0}/config|scrub_stack_threads'.format( storagerouter.machine_id) amount_threads = Configuration.get( key=threads_key) if Configuration.exists(key=threads_key) else 2 if not isinstance(amount_threads, int): error_messages.append( 'Amount of threads to spawn must be an integer for StorageRouter with ID {0}' .format(storagerouter.machine_id)) return amount_threads = max(amount_threads, 1) # Make sure amount_threads is at least 1 amount_threads = min(min(queue.qsize(), amount_threads), 20) # Make sure amount threads is max 20 GenericController._logger.info( 'Scrubber - vPool {0} - StorageRouter {1} - Spawning {2} threads for proxy service {3}' .format(vpool.name, storagerouter.name, amount_threads, alba_proxy_service)) for index in range(amount_threads): thread = Thread(name='execute_scrub_{0}_{1}_{2}'.format( vpool.guid, partition_guid, index), target=GenericController._execute_scrub, args=(queue, vpool, scrub_info, scrub_directory, error_messages)) thread.start() threads.append(thread) for thread in threads: thread.join() # Delete the proxy again try: with file_mutex(name='ovs_albaproxy_scrub', wait=lock_time): GenericController._logger.info( 'Scrubber - vPool {0} - StorageRouter {1} - Removing service {2}' .format(vpool.name, storagerouter.name, alba_proxy_service)) client = SSHClient(storagerouter, 'root') client.dir_delete(scrub_directory) if service_manager.has_service(alba_proxy_service, client=client): service_manager.stop_service(alba_proxy_service, client=client) service_manager.remove_service(alba_proxy_service, client=client) if Configuration.exists(scrub_config_key): Configuration.delete(scrub_config_key) GenericController._logger.info( 'Scrubber - vPool {0} - StorageRouter {1} - Removed service {2}' .format(vpool.name, storagerouter.name, alba_proxy_service)) except Exception: message = 'Scrubber - vPool {0} - StorageRouter {1} - Removing service {2} failed'.format( vpool.name, storagerouter.name, alba_proxy_service) error_messages.append(message) GenericController._logger.exception(message)
def lock(cls): """ Returns a file lock context manager """ return file_mutex('{0}/main.lock'.format(cls.DATABASE_FOLDER))