def delete_preset(alba_backend_guid, name): """ Deletes a preset from the Alba backend :param alba_backend_guid: Guid of the ALBA backend :type alba_backend_guid: str :param name: Name of the preset :type name: str :return: None """ # VALIDATIONS alba_backend = AlbaBackend(alba_backend_guid) preset_default_map = dict((preset['name'], preset['is_default']) for preset in alba_backend.presets) if name not in preset_default_map: AlbaPresetController._logger.warning( 'Preset with name {0} for ALBA Backend {1} could not be found, so not deleting' .format(name, alba_backend.name)) return if preset_default_map[name] is True: raise RuntimeError('Cannot delete the default preset') # DELETE PRESET AlbaPresetController._logger.debug('Deleting preset {0}'.format(name)) config = Configuration.get_configuration_path( alba_backend.abm_cluster.config_location) AlbaCLI.run(command='delete-preset', config=config, extra_params=[name]) alba_backend.invalidate_dynamics()
def update_preset(alba_backend_guid, name, policies): """ Updates policies for an existing preset to Alba :param alba_backend_guid: Guid of the ALBA backend :type alba_backend_guid: str :param name: Name of backend :type name: str :param policies: New policy list to be sent to alba :type policies: list :return: None """ # VALIDATIONS AlbaPresetController._validate_policies_param(policies=policies) alba_backend = AlbaBackend(alba_backend_guid) if name not in [preset['name'] for preset in alba_backend.presets]: raise RuntimeError('Could not find a preset with name {0} for ALBA Backend {1}'.format(name, alba_backend.name)) # UPDATE PRESET AlbaPresetController._logger.debug('Updating preset {0} with policies {1}'.format(name, policies)) config = Configuration.get_configuration_path(ArakoonInstaller.CONFIG_KEY.format(AlbaController.get_abm_cluster_name(alba_backend=alba_backend))) temp_config_file = tempfile.mktemp() with open(temp_config_file, 'wb') as data_file: data_file.write(json.dumps({'policies': policies})) data_file.flush() AlbaCLI.run(command='update-preset', config=config, named_params={'input-url': temp_config_file}, extra_params=[name]) alba_backend.invalidate_dynamics() os.remove(temp_config_file)
def update_preset(alba_backend_guid, name, policies): """ Updates policies for an existing preset to Alba :param alba_backend_guid: Guid of the ALBA backend :type alba_backend_guid: str :param name: Name of preset :type name: str :param policies: New policy list to be sent to alba :type policies: list :return: None """ # VALIDATIONS AlbaPresetController._validate_policies_param(policies=policies) alba_backend = AlbaBackend(alba_backend_guid) if name not in [preset['name'] for preset in alba_backend.presets]: raise RuntimeError( 'Could not find a preset with name {0} for ALBA Backend {1}'. format(name, alba_backend.name)) # UPDATE PRESET AlbaPresetController._logger.debug( 'Updating preset {0} with policies {1}'.format(name, policies)) config = Configuration.get_configuration_path( alba_backend.abm_cluster.config_location) temp_config_file = tempfile.mktemp() with open(temp_config_file, 'wb') as data_file: data_file.write(json.dumps({'policies': policies})) data_file.flush() AlbaCLI.run(command='update-preset', config=config, named_params={'input-url': temp_config_file}, extra_params=[name]) alba_backend.invalidate_dynamics() os.remove(temp_config_file)
def create(self, request, version, abm_cluster=None, nsm_clusters=None): """ Creates an AlbaBackend :param request: Data regarding ALBA backend to create :type request: request :param version: version requested by the client :type version: int :param abm_cluster: ABM cluster to claim for this new ALBA Backend :type abm_cluster: str :param nsm_clusters: NSM clusters to claim for this new ALBA Backend :type nsm_clusters: list :return: The newly created ALBA Backend object :rtype: ovs.dal.hybrids.albabackend.AlbaBackend """ if version < 3: request.DATA['scaling'] = 'LOCAL' if nsm_clusters is None: nsm_clusters = [] serializer = FullSerializer(AlbaBackend, instance=AlbaBackend(), data=request.DATA, allow_passwords=True) alba_backend = serializer.deserialize() alba_backend.save() alba_backend.backend.status = 'INSTALLING' alba_backend.backend.save() AlbaController.add_cluster.delay(alba_backend.guid, abm_cluster, nsm_clusters) return alba_backend
def remove_alba_arakoon_clusters(cls, alba_backend_guid, validate_clusters_reachable=True): # type: (basestring, bool) -> None """ Removes all backend related Arakoon clusters :param alba_backend_guid: Guid of the ALBA Backend :type alba_backend_guid: str :param validate_clusters_reachable: Validate if all clusters are reachable :type validate_clusters_reachable: bool :return: None :rtype: NoneType """ alba_backend = AlbaBackend(alba_backend_guid) if validate_clusters_reachable: AlbaArakoonController.abms_reachable(alba_backend) AlbaArakoonController.nsms_reachable(alba_backend) if alba_backend.abm_cluster is not None: AlbaArakoonController._logger.debug( 'Removing clusters for ALBA Backend {0}'.format( alba_backend.name)) arakoon_clusters = list(Configuration.list('/ovs/arakoon')) # Remove the ABM cluster ABMInstaller.remove_abm_cluster(alba_backend.abm_cluster, arakoon_clusters) # Remove NSM Arakoon clusters and services for nsm_cluster in alba_backend.nsm_clusters: NSMInstaller.remove_nsm_cluster(nsm_cluster, arakoon_clusters)
def get_albabackend_by_guid(albabackend_guid): """ Get a albabackend by albabackend guid :param albabackend_guid: albabackend guid :type albabackend_guid: str :return: alba backend object :rtype: ovs.dal.hybrids.albabackend """ return AlbaBackend(albabackend_guid)
def ovs_3769_validation_test(): """ Create an albanode with an asd statistics part set to empty dictionary Assert code does not raise """ an = AlbaNode() an.password = '******' an.node_id = 'ovs3769an' an.port = 1234 an.ip = '127.0.0.1' an.username = '******' an.save() bet = GeneralBackend.get_backendtype_by_code('alba') be = Backend() be.backend_type = bet be.name = 'ovs3769be' be.save() abe = AlbaBackend() abe.backend = be abe.save() ad = AlbaDisk() ad.name = 'ovs3769ad' ad.alba_node = an ad.save() asd = AlbaASD() asd.alba_backend = abe asd.asd_id = 'ovs3769asd' asd.alba_disk = ad asd.save() try: abe.statistics except KeyError, ex: logger.error('Regression OVS-3769 - asd statistics raises a KeyError: {0}'.format(str(ex)))
def delete_preset(alba_backend_guid, name): """ Deletes a preset from the Alba backend :param alba_backend_guid: Guid of the ALBA backend :type alba_backend_guid: str :param name: Name of the preset :type name: str :return: None """ # VALIDATIONS alba_backend = AlbaBackend(alba_backend_guid) preset_default_map = dict((preset['name'], preset['is_default']) for preset in alba_backend.presets) if name not in preset_default_map: AlbaPresetController._logger.warning('Preset with name {0} for ALBA Backend {1} could not be found, so not deleting'.format(name, alba_backend.name)) return if preset_default_map[name] is True: raise RuntimeError('Cannot delete the default preset') # DELETE PRESET AlbaPresetController._logger.debug('Deleting preset {0}'.format(name)) config = Configuration.get_configuration_path(ArakoonInstaller.CONFIG_KEY.format(AlbaController.get_abm_cluster_name(alba_backend=alba_backend))) AlbaCLI.run(command='delete-preset', config=config, extra_params=[name]) alba_backend.invalidate_dynamics()
def add_preset(alba_backend_guid, name, compression, policies, encryption, fragment_size=None): """ Adds a preset to Alba :param alba_backend_guid: Guid of the ALBA backend :type alba_backend_guid: str :param name: Name of the preset :type name: str :param compression: Compression type for the preset (none | snappy | bz2) :type compression: str :param policies: Policies for the preset :type policies: list :param encryption: Encryption for the preset (none | aes-cbc-256 | aes-ctr-256) :type encryption: str :param fragment_size: Size of a fragment in bytes (e.g. 1048576) :type fragment_size: int :return: None """ # VALIDATIONS if not re.match(Toolbox.regex_preset, name): raise ValueError('Invalid preset name specified') compression_options = ['snappy', 'bz2', 'none'] if compression not in compression_options: raise ValueError( 'Invalid compression format specified, please choose from: "{0}"' .format('", "'.join(compression_options))) encryption_options = ['aes-cbc-256', 'aes-ctr-256', 'none'] if encryption not in encryption_options: raise ValueError( 'Invalid encryption format specified, please choose from: "{0}"' .format('", "'.join(encryption_options))) if fragment_size is not None and (not isinstance(fragment_size, int) or not 16 <= fragment_size <= 1024**3): raise ValueError( 'Fragment size should be a positive integer smaller than 1 GiB' ) AlbaPresetController._validate_policies_param(policies=policies) alba_backend = AlbaBackend(alba_backend_guid) if name in [preset['name'] for preset in alba_backend.presets]: raise RuntimeError( 'Preset with name {0} already exists'.format(name)) # ADD PRESET preset = { 'compression': compression, 'object_checksum': { 'default': ['crc-32c'], 'verify_upload': True, 'allowed': [['none'], ['sha-1'], ['crc-32c']] }, 'osds': ['all'], 'fragment_size': 16 * 1024**2 if fragment_size is None else int(fragment_size), 'policies': policies, 'fragment_checksum': ['crc-32c'], 'fragment_encryption': ['none'], 'in_use': False, 'name': name } # Generate encryption key temp_key_file = None if encryption != 'none': encryption_key = ''.join( random.choice(chr(random.randint(32, 126))) for _ in range(32)) temp_key_file = tempfile.mktemp() with open(temp_key_file, 'wb') as temp_file: temp_file.write(encryption_key) temp_file.flush() preset['fragment_encryption'] = [ '{0}'.format(encryption), '{0}'.format(temp_key_file) ] # Dump preset content on filesystem config = Configuration.get_configuration_path( alba_backend.abm_cluster.config_location) temp_config_file = tempfile.mktemp() with open(temp_config_file, 'wb') as data_file: data_file.write(json.dumps(preset)) data_file.flush() # Create preset AlbaPresetController._logger.debug( 'Adding preset {0} with compression {1} and policies {2}'.format( name, compression, policies)) AlbaCLI.run(command='create-preset', config=config, named_params={'input-url': temp_config_file}, extra_params=[name]) # Cleanup alba_backend.invalidate_dynamics() for filename in [temp_key_file, temp_config_file]: if filename and os.path.exists(filename) and os.path.isfile( filename): os.remove(filename)
def _alba_arakoon_checkup(cls, alba_backend_guid=None, abm_cluster=None, nsm_clusters=None): # type: (Optional[str], Optional[str], Optional[List[str]]) -> None """ Creates a new Arakoon cluster if required and extends cluster if possible on all available master nodes :param alba_backend_guid: Guid of the ALBA Backend :type alba_backend_guid: str :param nsm_clusters: NSM clusters for this ALBA Backend The code will claim the Arakoon clusters for this backend when provided :type nsm_clusters: list[str] :param abm_cluster: ABM cluster for this ALBA Backend The code will claim the Arakoon cluster for this backend when provided :type abm_cluster: str|None :return:None :rtype: NoneType """ slaves = StorageRouterList.get_slaves() masters = StorageRouterList.get_masters() clients = {} for storagerouter in masters + slaves: try: clients[storagerouter] = SSHClient(storagerouter) except UnableToConnectException: cls._logger.warning( 'Storage Router with IP {0} is not reachable'.format( storagerouter.ip)) available_storagerouters = cls.get_available_arakoon_storagerouters( clients) # Call here, because this potentially raises error, which should happen before actually making changes abm_installer = ABMInstaller(ssh_clients=clients) nsm_installer = NSMInstaller(version_str=abm_installer.version_str, ssh_clients=clients) # Cluster creation if alba_backend_guid is not None: alba_backend = AlbaBackend(alba_backend_guid) # @todo revisit. This might enforce the ABM name for externals (might be unintended) abm_cluster_name = '{0}-abm'.format(alba_backend.name) # ABM Arakoon cluster creation if alba_backend.abm_cluster is None: # Fallback to installing the cluster on an available storagerouter storagerouter, partition = available_storagerouters.items()[0] abm_installer.deploy_abm_cluster( alba_backend, abm_cluster_name, requested_abm_cluster_name=abm_cluster, storagerouter=storagerouter) # NSM Arakoon cluster creation if len(alba_backend.nsm_clusters ) == 0 and nsm_clusters is not None: storagerouter, partition = available_storagerouters.items()[0] nsm_installer.deploy_nsm_cluster(alba_backend, storagerouter=storagerouter, nsm_clusters=nsm_clusters) # ABM Cluster extension for alba_backend in AlbaBackendList.get_albabackends(): if alba_backend.abm_cluster is None: AlbaArakoonController._logger.warning( 'ALBA Backend {0} does not have an ABM cluster registered'. format(alba_backend.name)) continue cls.ensure_abm_cluster_safety(alba_backend.abm_cluster, available_storagerouters, abm_installer=abm_installer)
def test_asd_statistics(self): """ Validates whether the ASD statistics work as expected. * Add keys that were not passed in * Collapse certain keys * Calculate correct per-second, average, total, min and max values """ expected_0 = {'statistics': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range_entries': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'multi_get': {'max': 10, 'n_ps': 0, 'min': 1, 'avg': 13, 'n': 5}, 'apply': {'max': 5, 'n_ps': 0, 'min': 5, 'avg': 5, 'n': 1}, 'timestamp': None} expected_1 = {'statistics': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range_entries': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'multi_get': {'max': 10, 'n_ps': 1, 'min': 1, 'avg': 12.5, 'n': 10}, 'apply': {'max': 5, 'n_ps': 0, 'min': 5, 'avg': 5, 'n': 1}, 'timestamp': None} base_time = time.time() backend_type = BackendType() backend_type.code = 'alba' backend_type.name = 'ALBA' backend_type.save() backend = Backend() backend.name = 'foobar' backend.backend_type = backend_type backend.save() alba_backend = AlbaBackend() alba_backend.backend = backend alba_backend.save() alba_node = AlbaNode() alba_node.ip = '127.0.0.1' alba_node.port = 8500 alba_node.username = '******' alba_node.password = '******' alba_node.node_id = 'foobar' alba_node.save() alba_disk = AlbaDisk() alba_disk.name = 'foo' alba_disk.alba_node = alba_node alba_disk.save() asd = AlbaASD() asd.asd_id = 'foo' asd.alba_backend = alba_backend asd.alba_disk = alba_disk asd.save() service_type = ServiceType() service_type.name = 'AlbaManager' service_type.save() service = Service() service.name = 'foobar' service.type = service_type service.ports = [] service.save() abm_service = ABMService() abm_service.service = service abm_service.alba_backend = alba_backend abm_service.save() asdmanager_client = ASDManagerClient('') asdmanager_client._results['get_disks'] = [] AlbaCLI._run_results['asd-multistatistics'] = {'foo': {'success': True, 'result': {'Apply': {'n': 1, 'avg': 5, 'min': 5, 'max': 5}, 'MultiGet': {'n': 2, 'avg': 10, 'min': 5, 'max': 10}, 'MultiGet2': {'n': 3, 'avg': 15, 'min': 1, 'max': 5}}}} statistics = asd._statistics(AlbaASD._dynamics[0]) expected_0['timestamp'] = base_time self.assertDictEqual(statistics, expected_0, 'The first statistics should be as expected: {0} vs {1}'.format(statistics, expected_0)) time.sleep(5) asdmanager_client._results['get_disks'] = [] AlbaCLI._run_results['asd-multistatistics'] = {'foo': {'success': True, 'result': {'Apply': {'n': 1, 'avg': 5, 'min': 5, 'max': 5}, 'MultiGet': {'n': 5, 'avg': 10, 'min': 5, 'max': 10}, 'MultiGet2': {'n': 5, 'avg': 15, 'min': 1, 'max': 5}}}} statistics = asd._statistics(AlbaASD._dynamics[0]) expected_1['timestamp'] = base_time + 5 self.assertDictEqual(statistics, expected_1, 'The second statistics should be as expected: {0} vs {1}'.format(statistics, expected_1))
def test_asd_statistics(self): """ Validates whether the ASD statistics work as expected. * Add keys that were not passed in * Collapse certain keys * Calculate correct per-second, average, total, min and max values """ from ovs.extensions.plugins.albacli import AlbaCLI from ovs.extensions.plugins.asdmanager import ASDManagerClient from ovs.dal.hybrids.albaasd import AlbaASD from ovs.dal.hybrids.albanode import AlbaNode from ovs.dal.hybrids.albabackend import AlbaBackend from ovs.dal.hybrids.backend import Backend from ovs.dal.hybrids.backendtype import BackendType expected_0 = {'statistics': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range_entries': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'multi_get': {'max': 10, 'n_ps': 0, 'min': 1, 'avg': 13, 'n': 5}, 'apply': {'max': 5, 'n_ps': 0, 'min': 5, 'avg': 5, 'n': 1}, 'timestamp': None} expected_1 = {'statistics': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'range_entries': {'max': 0, 'n_ps': 0, 'min': 0, 'avg': 0, 'n': 0}, 'multi_get': {'max': 10, 'n_ps': 1, 'min': 1, 'avg': 12.5, 'n': 10}, 'apply': {'max': 5, 'n_ps': 0, 'min': 5, 'avg': 5, 'n': 1}, 'timestamp': None} base_time = time.time() backend_type = BackendType() backend_type.code = 'alba' backend_type.name = 'ALBA' backend_type.save() backend = Backend() backend.name = 'foobar' backend.backend_type = backend_type backend.save() alba_backend = AlbaBackend() alba_backend.backend = backend alba_backend.save() alba_node = AlbaNode() alba_node.ip = '127.0.0.1' alba_node.port = 8500 alba_node.username = '******' alba_node.password = '******' alba_node.node_id = 'foobar' alba_node.save() asd = AlbaASD() asd.asd_id = 'foo' asd.alba_backend = alba_backend asd.alba_node = alba_node asd.save() ASDManagerClient.results['get_disks'] = [] AlbaCLI.run_results['asd-multistatistics'] = {'foo': {'success': True, 'result': {'Apply': {'n': 1, 'avg': 5, 'min': 5, 'max': 5}, 'MultiGet': {'n': 2, 'avg': 10, 'min': 5, 'max': 10}, 'MultiGet2': {'n': 3, 'avg': 15, 'min': 1, 'max': 5}}}} statistics = asd._statistics(AlbaASD._dynamics[4]) expected_0['timestamp'] = base_time self.assertDictEqual(statistics, expected_0, 'The first statistics should be as expected: {0} vs {1}'.format(statistics, expected_0)) time.sleep(5) ASDManagerClient.results['get_disks'] = [] AlbaCLI.run_results['asd-multistatistics'] = {'foo': {'success': True, 'result': {'Apply': {'n': 1, 'avg': 5, 'min': 5, 'max': 5}, 'MultiGet': {'n': 5, 'avg': 10, 'min': 5, 'max': 10}, 'MultiGet2': {'n': 5, 'avg': 15, 'min': 1, 'max': 5}}}} statistics = asd._statistics(AlbaASD._dynamics[4]) expected_1['timestamp'] = base_time + 5 self.assertDictEqual(statistics, expected_1, 'The second statistics should be as expected: {0} vs {1}'.format(statistics, expected_1))
def nsm_checkup(alba_backend_guid=None, min_internal_nsms=1, external_nsm_cluster_names=None): # type: (Optional[str], Optional[int], Optional[List[str]]) -> None """ Validates the current NSM setup/configuration and takes actions where required. Assumptions: * A 2 node NSM is considered safer than a 1 node NSM. * When adding an NSM, the nodes with the least amount of NSM participation are preferred :param alba_backend_guid: Run for a specific ALBA Backend :type alba_backend_guid: str :param min_internal_nsms: Minimum amount of NSM hosts that need to be provided :type min_internal_nsms: int :param external_nsm_cluster_names: Information about the additional clusters to claim (only for externally managed Arakoon clusters) :type external_nsm_cluster_names: list :return: None :rtype: NoneType """ ############### # Validations # ############### if external_nsm_cluster_names is None: external_nsm_cluster_names = [] AlbaArakoonController._logger.info('NSM checkup started') if min_internal_nsms < 1: raise ValueError( 'Minimum amount of NSM clusters must be 1 or more') if not isinstance(external_nsm_cluster_names, list): raise ValueError( "'external_nsm_cluster_names' must be of type 'list'") if len(external_nsm_cluster_names) > 0: if alba_backend_guid is None: raise ValueError( 'Additional NSMs can only be configured for a specific ALBA Backend' ) if min_internal_nsms > 1: raise ValueError( "'min_internal_nsms' and 'external_nsm_cluster_names' are mutually exclusive" ) external_nsm_cluster_names = list(set( external_nsm_cluster_names)) # Remove duplicate cluster names for cluster_name in external_nsm_cluster_names: try: ArakoonInstaller.get_arakoon_metadata_by_cluster_name( cluster_name=cluster_name) except NotFoundException: raise ValueError( 'Arakoon cluster with name {0} does not exist'.format( cluster_name)) if alba_backend_guid is None: alba_backends = [ alba_backend for alba_backend in AlbaBackendList.get_albabackends() if alba_backend.backend.status == 'RUNNING' ] else: alba_backends = [AlbaBackend(alba_backend_guid)] masters = StorageRouterList.get_masters() storagerouters = set() for alba_backend in alba_backends: if alba_backend.abm_cluster is None: raise ValueError( 'No ABM cluster found for ALBA Backend {0}'.format( alba_backend.name)) if len(alba_backend.abm_cluster.abm_services) == 0: raise ValueError( 'ALBA Backend {0} does not have any registered ABM services' .format(alba_backend.name)) if len(alba_backend.nsm_clusters) + len( external_nsm_cluster_names) > MAX_NSM_AMOUNT: raise ValueError( 'The maximum of {0} NSM Arakoon clusters will be exceeded. Amount of clusters that can be deployed for this ALBA Backend: {1}' .format(MAX_NSM_AMOUNT, MAX_NSM_AMOUNT - len(alba_backend.nsm_clusters))) # Validate enough externally managed Arakoon clusters are available if alba_backend.abm_cluster.abm_services[ 0].service.is_internal is False: unused_cluster_names = set([ cluster_info['cluster_name'] for cluster_info in ArakoonInstaller.get_unused_arakoon_clusters( cluster_type=ServiceType.ARAKOON_CLUSTER_TYPES.NSM) ]) if set(external_nsm_cluster_names).difference( unused_cluster_names): raise ValueError( 'Some of the provided cluster_names have already been claimed before' ) storagerouters.update( set(masters) ) # For externally managed we need an available master node else: for abm_service in alba_backend.abm_cluster.abm_services: # For internally managed we need all StorageRouters online storagerouters.add(abm_service.service.storagerouter) for nsm_cluster in alba_backend.nsm_clusters: # For internally managed we need all StorageRouters online for nsm_service in nsm_cluster.nsm_services: storagerouters.add(nsm_service.service.storagerouter) ssh_clients = {} for storagerouter in storagerouters: try: ssh_clients[storagerouter] = SSHClient(endpoint=storagerouter) except UnableToConnectException: raise RuntimeError( 'StorageRouter {0} with IP {1} is not reachable'.format( storagerouter.name, storagerouter.ip)) version_str = AlbaArakoonInstaller.get_alba_version_string() nsm_installer = NSMInstaller(version_str=version_str, ssh_clients=ssh_clients) ################## # Check Clusters # ################## safety = Configuration.get( '/ovs/framework/plugins/alba/config|nsm.safety') maxload = Configuration.get( '/ovs/framework/plugins/alba/config|nsm.maxload') AlbaArakoonController._logger.debug( 'NSM safety is configured at: {0}'.format(safety)) AlbaArakoonController._logger.debug( 'NSM max load is configured at: {0}'.format(maxload)) master_client = None failed_backends = [] for alba_backend in alba_backends: try: # Gather information AlbaArakoonController._logger.info( 'ALBA Backend {0} - Ensuring NSM safety'.format( alba_backend.name)) internal = AlbaArakoonInstaller.is_internally_managed( alba_backend) nsm_loads = AlbaArakoonController.get_nsm_loads(alba_backend) nsm_storagerouters = AlbaArakoonController.get_nsms_per_storagerouter( alba_backend) sorted_nsm_clusters = sorted(alba_backend.nsm_clusters, key=lambda k: k.number) if not internal and len(external_nsm_cluster_names) > 0: for sr, cl in ssh_clients.iteritems(): if sr.node_type == 'MASTER': master_client = cl break if master_client is None: # Internal is False and we specified the NSM clusters to claim, but no MASTER nodes online raise ValueError( 'Could not find an online master node') AlbaArakoonController._logger.debug( 'ALBA Backend {0} - Arakoon clusters are {1} managed'. format(alba_backend.name, 'internally' if internal is True else 'externally')) for nsm_number, nsm_load in nsm_loads.iteritems(): AlbaArakoonController._logger.debug( 'ALBA Backend {0} - NSM Cluster {1} - Load {2}'.format( alba_backend.name, nsm_number, nsm_load)) for sr, count in nsm_storagerouters.iteritems(): AlbaArakoonController._logger.debug( 'ALBA Backend {0} - StorageRouter {1} - NSM Services {2}' .format(alba_backend.name, sr.name, count)) if internal: # Extend existing NSM clusters if safety not met for nsm_cluster in sorted_nsm_clusters: AlbaArakoonController._logger.debug( 'ALBA Backend {0} - Processing NSM {1} - Expected safety {2} - Current safety {3}' .format(alba_backend.name, nsm_cluster.number, safety, len(nsm_cluster.nsm_services))) AlbaArakoonController.ensure_nsm_cluster_safety( nsm_cluster, nsm_storagerouters, nsm_installer=nsm_installer) AlbaArakoonController.ensure_nsm_clusters_load( alba_backend, nsms_per_storagerouter=nsm_storagerouters, ssh_clients=ssh_clients, version_str=version_str, min_internal_nsms=min_internal_nsms, external_nsm_cluster_names=external_nsm_cluster_names) except Exception: AlbaArakoonController._logger.exception( 'NSM Checkup failed for Backend {0}'.format( alba_backend.name)) failed_backends.append(alba_backend.name)
def add_preset(alba_backend_guid, name, compression, policies, encryption, fragment_size=None): """ Adds a preset to Alba :param alba_backend_guid: Guid of the ALBA backend :type alba_backend_guid: str :param name: Name of the preset :type name: str :param compression: Compression type for the preset (none | snappy | bz2) :type compression: str :param policies: Policies for the preset :type policies: list :param encryption: Encryption for the preset (none | aes-cbc-256 | aes-ctr-256) :type encryption: str :param fragment_size: Size of a fragment in bytes (e.g. 1048576) :type fragment_size: int :return: None """ # VALIDATIONS if not re.match(Toolbox.regex_preset, name): raise ValueError('Invalid preset name specified') compression_options = ['snappy', 'bz2', 'none'] if compression not in compression_options: raise ValueError('Invalid compression format specified, please choose from: "{0}"'.format('", "'.join(compression_options))) encryption_options = ['aes-cbc-256', 'aes-ctr-256', 'none'] if encryption not in encryption_options: raise ValueError('Invalid encryption format specified, please choose from: "{0}"'.format('", "'.join(encryption_options))) if fragment_size is not None and (not isinstance(fragment_size, int) or not 16 <= fragment_size <= 1024 ** 3): raise ValueError('Fragment size should be a positive integer smaller than 1 GiB') AlbaPresetController._validate_policies_param(policies=policies) alba_backend = AlbaBackend(alba_backend_guid) if name in [preset['name'] for preset in alba_backend.presets]: raise RuntimeError('Preset with name {0} already exists'.format(name)) # ADD PRESET preset = {'compression': compression, 'object_checksum': {'default': ['crc-32c'], 'verify_upload': True, 'allowed': [['none'], ['sha-1'], ['crc-32c']]}, 'osds': ['all'], 'fragment_size': 16 * 1024 ** 2 if fragment_size is None else int(fragment_size), 'policies': policies, 'fragment_checksum': ['crc-32c'], 'fragment_encryption': ['none'], 'in_use': False, 'name': name} # Generate encryption key temp_key_file = None if encryption != 'none': encryption_key = ''.join(random.choice(chr(random.randint(32, 126))) for _ in range(32)) temp_key_file = tempfile.mktemp() with open(temp_key_file, 'wb') as temp_file: temp_file.write(encryption_key) temp_file.flush() preset['fragment_encryption'] = ['{0}'.format(encryption), '{0}'.format(temp_key_file)] # Dump preset content on filesystem config = Configuration.get_configuration_path(ArakoonInstaller.CONFIG_KEY.format(AlbaController.get_abm_cluster_name(alba_backend=alba_backend))) temp_config_file = tempfile.mktemp() with open(temp_config_file, 'wb') as data_file: data_file.write(json.dumps(preset)) data_file.flush() # Create preset AlbaPresetController._logger.debug('Adding preset {0} with compression {1} and policies {2}'.format(name, compression, policies)) AlbaCLI.run(command='create-preset', config=config, named_params={'input-url': temp_config_file}, extra_params=[name]) # Cleanup alba_backend.invalidate_dynamics() for filename in [temp_key_file, temp_config_file]: if filename and os.path.exists(filename) and os.path.isfile(filename): os.remove(filename)
def build_service_structure(structure, previous_structure=None): """ Builds a service structure Example: structure = Helper.build_service_structure({ 'alba_backends': [1], 'alba_nodes': [1] }) """ if previous_structure is None: previous_structure = {} backend_types = previous_structure.get('backend_types', {}) service_types = previous_structure.get('service_types', {}) alba_backends = previous_structure.get('alba_backends', {}) alba_nodes = previous_structure.get('alba_nodes', {}) alba_disks = previous_structure.get('alba_disks', {}) alba_osds = previous_structure.get('alba_osds', {}) if 1 not in backend_types: backend_type = BackendType() backend_type.code = 'alba' backend_type.name = 'ALBA' backend_type.save() backend_types[1] = backend_type if 1 not in service_types: service_type = ServiceType() service_type.name = 'AlbaManager' service_type.save() service_types[1] = service_type for ab_id in structure.get('alba_backends', ()): if ab_id not in alba_backends: backend = Backend() backend.name = 'backend_{0}'.format(ab_id) backend.backend_type = backend_types[1] backend.save() alba_backend = AlbaBackend() alba_backend.backend = backend alba_backend.scaling = AlbaBackend.SCALINGS.LOCAL alba_backend.save() alba_backends[ab_id] = alba_backend service = Service() service.name = 'backend_{0}_abm'.format(ab_id) service.type = service_types[1] service.ports = [] service.save() abm_service = ABMService() abm_service.service = service abm_service.alba_backend = alba_backend abm_service.save() for an_id in structure.get('alba_nodes', []): if an_id not in alba_nodes: alba_node = AlbaNode() alba_node.ip = '10.1.0.{0}'.format(an_id) alba_node.port = 8500 alba_node.username = str(an_id) alba_node.password = str(an_id) alba_node.node_id = 'node_{0}'.format(an_id) alba_node.save() alba_nodes[an_id] = alba_node for ad_id, an_id in structure.get('alba_disks', ()): if ad_id not in alba_disks: alba_disk = AlbaDisk() alba_disk.aliases = ['/dev/alba_disk_{0}'.format(ad_id)] alba_disk.alba_node = alba_nodes[an_id] alba_disk.save() alba_disks[ad_id] = alba_disk for ao_id, ad_id, ab_id in structure.get('alba_osds', ()): if ao_id not in alba_osds: osd = AlbaOSD() osd.osd_id = 'alba_osd_{0}'.format(ao_id) osd.osd_type = AlbaOSD.OSD_TYPES.ASD osd.alba_backend = alba_backends[ab_id] osd.alba_disk = alba_disks[ad_id] osd.save() alba_osds[ao_id] = osd return {'backend_types': backend_types, 'service_types': service_types, 'alba_backends': alba_backends, 'alba_nodes': alba_nodes, 'alba_disks': alba_disks, 'alba_osds': alba_osds}
def _stack(self): """ Returns an overview of this node's storage stack """ from ovs.dal.hybrids.albabackend import AlbaBackend from ovs.dal.lists.albabackendlist import AlbaBackendList def _move(info): for move in [('state', 'status'), ('state_detail', 'status_detail')]: if move[0] in info: info[move[1]] = info[move[0]] del info[move[0]] stack = {} node_down = False # Fetch stack from asd-manager try: remote_stack = self.client.get_stack() for slot_id, slot_data in remote_stack.iteritems(): stack[slot_id] = {'status': 'ok'} stack[slot_id].update(slot_data) # Migrate state > status _move(stack[slot_id]) for osd_data in slot_data.get('osds', {}).itervalues(): _move(osd_data) except (requests.ConnectionError, requests.Timeout, InvalidCredentialsError): self._logger.warning( 'Error during stack retrieval. Assuming that the node is down') node_down = True model_osds = {} found_osds = {} # Apply own model to fetched stack for osd in self.osds: model_osds[osd.osd_id] = osd # Initially set the info if osd.slot_id not in stack: stack[osd.slot_id] = { 'status': self.OSD_STATUSES.UNKNOWN if node_down is True else self.OSD_STATUSES.MISSING, 'status_detail': self.OSD_STATUS_DETAILS.NODEDOWN if node_down is True else '', 'osds': {} } osd_data = stack[osd.slot_id]['osds'].get(osd.osd_id, {}) stack[osd.slot_id]['osds'][ osd.osd_id] = osd_data # Initially set the info in the stack osd_data.update(osd.stack_info) if node_down is True: osd_data['status'] = self.OSD_STATUSES.UNKNOWN osd_data['status_detail'] = self.OSD_STATUS_DETAILS.NODEDOWN elif osd.alba_backend_guid is not None: # Osds has been claimed # Load information from alba if osd.alba_backend_guid not in found_osds: found_osds[osd.alba_backend_guid] = {} if osd.alba_backend.abm_cluster is not None: config = Configuration.get_configuration_path( osd.alba_backend.abm_cluster.config_location) try: for found_osd in AlbaCLI.run( command='list-all-osds', config=config): found_osds[osd.alba_backend_guid][ found_osd['long_id']] = found_osd except (AlbaError, RuntimeError): self._logger.exception( 'Listing all osds has failed') osd_data['status'] = self.OSD_STATUSES.UNKNOWN osd_data[ 'status_detail'] = self.OSD_STATUS_DETAILS.ALBAERROR continue if osd.osd_id not in found_osds[osd.alba_backend_guid]: # Not claimed by any backend thus not in use continue found_osd = found_osds[osd.alba_backend_guid][osd.osd_id] if found_osd['decommissioned'] is True: osd_data['status'] = self.OSD_STATUSES.UNAVAILABLE osd_data[ 'status_detail'] = self.OSD_STATUS_DETAILS.DECOMMISSIONED continue backend_interval_key = '/ovs/alba/backends/{0}/gui_error_interval'.format( osd.alba_backend_guid) if Configuration.exists(backend_interval_key): interval = Configuration.get(backend_interval_key) else: interval = Configuration.get( '/ovs/alba/backends/global_gui_error_interval') read = found_osd['read'] or [0] write = found_osd['write'] or [0] errors = found_osd['errors'] osd_data['status'] = self.OSD_STATUSES.WARNING osd_data['status_detail'] = self.OSD_STATUS_DETAILS.ERROR if len(errors) == 0 or (len(read + write) > 0 and max(min(read), min(write)) > max(error[0] for error in errors) + interval): osd_data['status'] = self.OSD_STATUSES.OK osd_data['status_detail'] = '' statistics = {} for slot_info in stack.itervalues(): for osd_id, osd in slot_info['osds'].iteritems(): if osd.get( 'status_detail') == self.OSD_STATUS_DETAILS.ACTIVATING: osd['claimed_by'] = 'unknown' # We won't be able to connect to it just yet continue if osd_id not in model_osds: # The osd is known by the remote node but not in the model # In that case, let's connect to the OSD to see whether we get some info from it try: ips = osd['hosts'] if 'hosts' in osd and len( osd['hosts']) > 0 else osd.get('ips', []) port = osd['port'] claimed_by = 'unknown' for ip in ips: try: # Output will be None if it is not claimed claimed_by = AlbaCLI.run('get-osd-claimed-by', named_params={ 'host': ip, 'port': port }) break except (AlbaError, RuntimeError): self._logger.warning( 'get-osd-claimed-by failed for IP:port {0}:{1}' .format(ip, port)) alba_backend = AlbaBackendList.get_by_alba_id( claimed_by) osd['claimed_by'] = alba_backend.guid if alba_backend is not None else claimed_by except KeyError: osd['claimed_by'] = 'unknown' except: self._logger.exception( 'Could not load OSD info: {0}'.format(osd_id)) osd['claimed_by'] = 'unknown' if osd.get('status') not in ['error', 'warning']: osd['status'] = self.OSD_STATUSES.ERROR osd['status_detail'] = self.OSD_STATUS_DETAILS.UNREACHABLE claimed_by = osd.get('claimed_by', 'unknown') if claimed_by == 'unknown': continue try: alba_backend = AlbaBackend(claimed_by) except ObjectNotFoundException: continue # Add usage information if alba_backend not in statistics: statistics[alba_backend] = alba_backend.osd_statistics osd_statistics = statistics[alba_backend] if osd_id not in osd_statistics: continue stats = osd_statistics[osd_id] osd['usage'] = { 'size': int(stats['capacity']), 'used': int(stats['disk_usage']), 'available': int(stats['capacity'] - stats['disk_usage']) } return stack
def build_dal_structure(structure, previous_structure=None): """ Builds a service structure Example: structure = AlbaDalHelper.build_service_structure({ 'alba_backends': [1], 'alba_nodes': [1] }) """ if previous_structure is None: previous_structure = {} alba_osds = previous_structure.get('alba_osds', {}) alba_nodes = previous_structure.get('alba_nodes', {}) backend_types = previous_structure.get('backend_types', {}) service_types = previous_structure.get('service_types', {}) alba_backends = previous_structure.get('alba_backends', {}) alba_abm_clusters = previous_structure.get('alba_abm_clusters', {}) alba_nsm_clusters = previous_structure.get('alba_nsm_clusters', {}) if 1 not in backend_types: backend_type = BackendType() backend_type.code = 'alba' backend_type.name = 'ALBA' backend_type.save() backend_types[1] = backend_type if 'AlbaManager' not in service_types: service_type = ServiceTypeList.get_by_name('AlbaManager') if service_type is None: service_type = ServiceType() service_type.name = 'AlbaManager' service_type.save() service_types['AlbaManager'] = service_type if 'NamespaceManager' not in service_types: service_type = ServiceTypeList.get_by_name('NamespaceManager') if service_type is None: service_type = ServiceType() service_type.name = 'NamespaceManager' service_type.save() service_types['NamespaceManager'] = service_type for ab_id, scaling in structure.get('alba_backends', ()): if ab_id not in alba_backends: backend = Backend() backend.name = 'backend_{0}'.format(ab_id) backend.backend_type = backend_types[1] backend.save() alba_backend = AlbaBackend() alba_backend.backend = backend alba_backend.scaling = getattr(AlbaBackend.SCALINGS, scaling) alba_backend.alba_id = str(ab_id) alba_backend.save() alba_backends[ab_id] = alba_backend for ab_id in structure.get('alba_abm_clusters', ()): if ab_id not in alba_abm_clusters: if ab_id not in alba_backends: raise ValueError('Non-existing ALBA Backend ID provided') alba_backend = alba_backends[ab_id] abm_cluster = ABMCluster() abm_cluster.name = '{0}-abm'.format(alba_backend.name) abm_cluster.alba_backend = alba_backend abm_cluster.config_location = '/ovs/arakoon/{0}-abm/config'.format( alba_backend.name) abm_cluster.save() abm_service = Service() abm_service.name = 'arakoon-{0}-abm'.format(alba_backend.name) abm_service.type = service_types['AlbaManager'] abm_service.ports = [] abm_service.storagerouter = None abm_service.save() abm_junction_service = ABMService() abm_junction_service.service = abm_service abm_junction_service.abm_cluster = abm_cluster abm_junction_service.save() alba_abm_clusters[ab_id] = abm_cluster for ab_id, amount in structure.get('alba_nsm_clusters', ()): if ab_id not in alba_nsm_clusters or amount != len( alba_nsm_clusters[ab_id]): if ab_id not in alba_backends: raise ValueError('Non-existing ALBA Backend ID provided') alba_backend = alba_backends[ab_id] alba_nsm_clusters[ab_id] = [] nsm_clusters = dict( (nsm_cluster.number, nsm_cluster) for nsm_cluster in alba_backend.nsm_clusters) for number in range(amount): if number in nsm_clusters: alba_nsm_clusters[ab_id].append(nsm_clusters[number]) continue nsm_cluster = NSMCluster() nsm_cluster.name = '{0}-nsm_{1}'.format( alba_backend.name, number) nsm_cluster.number = number nsm_cluster.alba_backend = alba_backend nsm_cluster.config_location = '/ovs/arakoon/{0}-nsm_{1}/config'.format( alba_backend.name, number) nsm_cluster.save() nsm_service = Service() nsm_service.name = 'arakoon-{0}-nsm_{1}'.format( alba_backend.name, number) nsm_service.type = service_types['NamespaceManager'] nsm_service.ports = [] nsm_service.storagerouter = None nsm_service.save() nsm_junction_service = NSMService() nsm_junction_service.service = nsm_service nsm_junction_service.nsm_cluster = nsm_cluster nsm_junction_service.save() alba_nsm_clusters[ab_id].append(nsm_cluster) for an_id in structure.get('alba_nodes', []): if an_id not in alba_nodes: alba_node = AlbaNode() alba_node.ip = '10.1.0.{0}'.format(an_id) alba_node.port = 8500 alba_node.username = str(an_id) alba_node.password = str(an_id) alba_node.node_id = 'node_{0}'.format(an_id) alba_node.save() alba_nodes[an_id] = alba_node if alba_node in ManagerClientMockup.test_results: ManagerClientMockup.test_results[alba_node].update( {'get_metadata': { '_version': 3 }}) else: ManagerClientMockup.test_results[alba_node] = { 'get_metadata': { '_version': 3 } } for ao_id, ab_id, an_id, slot_id in structure.get('alba_osds', ()): if ao_id not in alba_osds: osd = AlbaOSD() osd.osd_id = 'alba_osd_{0}'.format(ao_id) osd.osd_type = AlbaOSD.OSD_TYPES.ASD osd.alba_backend = alba_backends[ab_id] osd.alba_node = alba_nodes[an_id] osd.slot_id = 'alba_slot_{0}'.format(slot_id) osd.ips = ['127.0.0.{0}'.format(ao_id)] osd.port = 35000 + ao_id osd.save() alba_osds[ao_id] = osd return { 'alba_osds': alba_osds, 'alba_nodes': alba_nodes, 'backend_types': backend_types, 'service_types': service_types, 'alba_backends': alba_backends, 'alba_abm_clusters': alba_abm_clusters, 'alba_nsm_clusters': alba_nsm_clusters }