def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.service import Service from ovs.dal.hybrids.vpool import VPool from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.j_mdsservice import MDSService from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.lib.mdsservice import MDSServiceController # Globalize mocked classes global VDisk global VPool global Service global StorageRouter global StorageDriver global BackendType global PMachine global MDSService global ServiceType global MDSServiceVDisk global VolatileMutex global MDSServiceController _ = VDisk(), VPool(), Service(), MDSService(), MDSServiceVDisk(), ServiceType(), \ StorageRouter(), StorageDriver(), BackendType(), PMachine(), \ VolatileMutex('dummy'), MDSServiceController # Configuration def _get(key): c = PersistentFactory.get_client() if c.exists(key): return c.get(key) return None Configuration.get = staticmethod(_get) # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule sys.modules['ovs.extensions.hypervisor.hypervisors.kvm'] = KVMModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.j_mdsservice import MDSService from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk from ovs.lib.vdisk import VDiskController from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.vmachine import VMachine from ovs.dal.hybrids.vpool import VPool from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.failuredomain import FailureDomain from ovs.dal.hybrids.service import Service from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.lists.vdisklist import VDiskList from ovs.lib.mdsservice import MDSServiceController # Globalize mocked classes global VDisk global VDiskController global PMachine global VMachine global BackendType global VPool global StorageDriver global StorageRouter global FailureDomain global MDSService global MDSServiceVDisk global Service global ServiceType global VDiskList global MDSServiceController _ = VDisk(), PMachine(), VMachine(), VDiskController, VPool(), BackendType(), StorageDriver(), StorageRouter(), \ FailureDomain(), MDSService(), MDSServiceVDisk(), Service(), ServiceType(), VDiskList, MDSServiceController # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.disk import Disk from ovs.dal.hybrids.diskpartition import DiskPartition from ovs.dal.hybrids.failuredomain import FailureDomain from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.vmachine import VMachine from ovs.dal.hybrids.vpool import VPool from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.lib.vmachine import VMachineController from ovs.lib.vdisk import VDiskController from ovs.lib.scheduledtask import ScheduledTaskController # Globalize mocked classes global Disk global VDisk global VMachine global PMachine global VPool global BackendType global DiskPartition global FailureDomain global StorageRouter global VolatileMutex global VMachineController global VDiskController global ScheduledTaskController _ = VDisk(), VolatileMutex('dummy'), VMachine(), PMachine(), VPool(), BackendType(), FailureDomain(), \ VMachineController, VDiskController, ScheduledTaskController, StorageRouter(), Disk(), DiskPartition() # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def migrate(previous_version): """ Migrates from a given version to the current version. It uses 'previous_version' to be smart wherever possible, but the code should be able to migrate any version towards the expected version. When this is not possible, the code can set a minimum version and raise when it is not met. :param previous_version: The previous version from which to start the migration :type previous_version: float """ working_version = previous_version if working_version == 0: from ovs.dal.hybrids.servicetype import ServiceType # Initial version: # * Add any basic configuration or model entries # Add backends for backend_type_info in [('ALBA', 'alba')]: code = backend_type_info[1] backend_type = BackendTypeList.get_backend_type_by_code(code) if backend_type is None: backend_type = BackendType() backend_type.name = backend_type_info[0] backend_type.code = code backend_type.save() # Add service types for service_type_info in [ ServiceType.SERVICE_TYPES.NS_MGR, ServiceType.SERVICE_TYPES.ALBA_MGR, ServiceType.SERVICE_TYPES.ALBA_S3_TRANSACTION ]: service_type = ServiceType() service_type.name = service_type_info service_type.save() # From here on, all actual migration should happen to get to the expected state for THIS RELEASE elif working_version < DALMigrator.THIS_VERSION: import hashlib from ovs.dal.exceptions import ObjectNotFoundException from ovs.dal.helpers import HybridRunner, Descriptor from ovs.dal.hybrids.albaabmcluster import ABMCluster from ovs.dal.hybrids.albaosd import AlbaOSD from ovs.dal.hybrids.albansmcluster import NSMCluster from ovs.dal.hybrids.j_abmservice import ABMService from ovs.dal.hybrids.j_nsmservice import NSMService from ovs.dal.hybrids.service import Service from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.lists.albabackendlist import AlbaBackendList from ovs.dal.lists.albanodelist import AlbaNodeList from ovs.dal.lists.servicetypelist import ServiceTypeList from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.db.arakooninstaller import ArakoonClusterConfig, ArakoonInstaller from ovs.extensions.generic.configuration import Configuration, NotFoundException from ovs_extensions.generic.toolbox import ExtensionsToolbox from ovs.extensions.plugins.albacli import AlbaCLI from ovs.extensions.storage.persistentfactory import PersistentFactory # Migrate unique constraints & indexes client = PersistentFactory.get_client() hybrid_structure = HybridRunner.get_hybrids() for class_descriptor in hybrid_structure.values(): cls = Descriptor().load(class_descriptor).get_object() classname = cls.__name__.lower() unique_key = 'ovs_unique_{0}_{{0}}_'.format(classname) index_prefix = 'ovs_index_{0}|{{0}}|'.format(classname) index_key = 'ovs_index_{0}|{{0}}|{{1}}'.format(classname) uniques = [] indexes = [] # noinspection PyProtectedMember for prop in cls._properties: if prop.unique is True and len([ k for k in client.prefix( unique_key.format(prop.name)) ]) == 0: uniques.append(prop.name) if prop.indexed is True and len([ k for k in client.prefix( index_prefix.format(prop.name)) ]) == 0: indexes.append(prop.name) if len(uniques) > 0 or len(indexes) > 0: prefix = 'ovs_data_{0}_'.format(classname) for key, data in client.prefix_entries(prefix): for property_name in uniques: ukey = '{0}{1}'.format( unique_key.format(property_name), hashlib.sha1(str( data[property_name])).hexdigest()) client.set(ukey, key) for property_name in indexes: if property_name not in data: continue # This is the case when there's a new indexed property added. ikey = index_key.format( property_name, hashlib.sha1(str( data[property_name])).hexdigest()) index = list( client.get_multi([ikey], must_exist=False))[0] transaction = client.begin_transaction() if index is None: client.assert_value(ikey, None, transaction=transaction) client.set(ikey, [key], transaction=transaction) elif key not in index: client.assert_value(ikey, index[:], transaction=transaction) client.set(ikey, index + [key], transaction=transaction) client.apply_transaction(transaction) ############################################# # Introduction of ABMCluster and NSMCluster # ############################################# # Verify presence of unchanged ALBA Backends alba_backends = AlbaBackendList.get_albabackends() changes_required = False for alba_backend in alba_backends: if alba_backend.abm_cluster is None or len( alba_backend.nsm_clusters) == 0: changes_required = True break if changes_required: # Retrieve ABM and NSM clusters abm_cluster_info = [] nsm_cluster_info = [] for cluster_name in Configuration.list('/ovs/arakoon'): try: metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name( cluster_name=cluster_name) if metadata[ 'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.ABM: abm_cluster_info.append(metadata) elif metadata[ 'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.NSM: nsm_cluster_info.append(metadata) except NotFoundException: continue # Retrieve NSM Arakoon cluster information cluster_arakoon_map = {} for cluster_info in abm_cluster_info + nsm_cluster_info: cluster_name = cluster_info['cluster_name'] arakoon_config = ArakoonClusterConfig( cluster_id=cluster_name) cluster_arakoon_map[ cluster_name] = arakoon_config.export_dict() storagerouter_map = dict( (storagerouter.machine_id, storagerouter) for storagerouter in StorageRouterList.get_storagerouters()) alba_backend_id_map = dict((alba_backend.alba_id, alba_backend) for alba_backend in alba_backends) for cluster_info in abm_cluster_info: internal = cluster_info['internal'] cluster_name = cluster_info['cluster_name'] config_location = Configuration.get_configuration_path( key=ArakoonClusterConfig.CONFIG_KEY.format( cluster_name)) try: alba_id = AlbaCLI.run(command='get-alba-id', config=config_location, named_params={'attempts': 3})['id'] nsm_hosts = AlbaCLI.run(command='list-nsm-hosts', config=config_location, named_params={'attempts': 3}) except RuntimeError: continue alba_backend = alba_backend_id_map.get(alba_id) if alba_backend is None: # ALBA Backend with ID not found in model continue if alba_backend.abm_cluster is not None and len( alba_backend.nsm_clusters ) > 0: # Clusters already exist continue # Create ABM Cluster if alba_backend.abm_cluster is None: abm_cluster = ABMCluster() abm_cluster.name = cluster_name abm_cluster.alba_backend = alba_backend abm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format( cluster_name) abm_cluster.save() else: abm_cluster = alba_backend.abm_cluster # Create ABM Services abm_arakoon_config = cluster_arakoon_map[cluster_name] abm_arakoon_config.pop('global') arakoon_nodes = abm_arakoon_config.keys() if internal is False: services_to_create = 1 else: if set(arakoon_nodes).difference( set(storagerouter_map.keys())): continue services_to_create = len(arakoon_nodes) for index in range(services_to_create): service = Service() service.name = 'arakoon-{0}-abm'.format( alba_backend.name) service.type = ServiceTypeList.get_by_name( ServiceType.SERVICE_TYPES.ALBA_MGR) if internal is True: arakoon_node_config = abm_arakoon_config[ arakoon_nodes[index]] service.ports = [ arakoon_node_config['client_port'], arakoon_node_config['messaging_port'] ] service.storagerouter = storagerouter_map[ arakoon_nodes[index]] else: service.ports = [] service.storagerouter = None service.save() abm_service = ABMService() abm_service.service = service abm_service.abm_cluster = abm_cluster abm_service.save() # Create NSM Clusters for cluster_index, nsm_host in enumerate( sorted(nsm_hosts, key=lambda host: ExtensionsToolbox. advanced_sort(host['cluster_id'], '_'))): nsm_cluster_name = nsm_host['cluster_id'] nsm_arakoon_config = cluster_arakoon_map.get( nsm_cluster_name) if nsm_arakoon_config is None: continue number = cluster_index if internal is False else int( nsm_cluster_name.split('_')[-1]) nsm_cluster = NSMCluster() nsm_cluster.name = nsm_cluster_name nsm_cluster.number = number nsm_cluster.alba_backend = alba_backend nsm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format( nsm_cluster_name) nsm_cluster.save() # Create NSM Services nsm_arakoon_config.pop('global') arakoon_nodes = nsm_arakoon_config.keys() if internal is False: services_to_create = 1 else: if set(arakoon_nodes).difference( set(storagerouter_map.keys())): continue services_to_create = len(arakoon_nodes) for service_index in range(services_to_create): service = Service() service.name = 'arakoon-{0}-nsm_{1}'.format( alba_backend.name, number) service.type = ServiceTypeList.get_by_name( ServiceType.SERVICE_TYPES.NS_MGR) if internal is True: arakoon_node_config = nsm_arakoon_config[ arakoon_nodes[service_index]] service.ports = [ arakoon_node_config['client_port'], arakoon_node_config['messaging_port'] ] service.storagerouter = storagerouter_map[ arakoon_nodes[service_index]] else: service.ports = [] service.storagerouter = None service.save() nsm_service = NSMService() nsm_service.service = service nsm_service.nsm_cluster = nsm_cluster nsm_service.save() # Clean up all junction services no longer linked to an ALBA Backend all_nsm_services = [ service.nsm_service for service in ServiceTypeList.get_by_name( ServiceType.SERVICE_TYPES.NS_MGR).services if service.nsm_service.nsm_cluster is None ] all_abm_services = [ service.abm_service for service in ServiceTypeList.get_by_name( ServiceType.SERVICE_TYPES.ALBA_MGR).services if service.abm_service.abm_cluster is None ] for abm_service in all_abm_services: abm_service.delete() abm_service.service.delete() for nsm_service in all_nsm_services: nsm_service.delete() nsm_service.service.delete() ################################ # Introduction of Active Drive # ################################ # Update slot_id and Alba Node relation for all OSDs client = PersistentFactory.get_client() disk_osd_map = {} for key, data in client.prefix_entries('ovs_data_albaosd_'): alba_disk_guid = data.get('alba_disk', {}).get('guid') if alba_disk_guid is not None: if alba_disk_guid not in disk_osd_map: disk_osd_map[alba_disk_guid] = [] disk_osd_map[alba_disk_guid].append( key.replace('ovs_data_albaosd_', '')) try: value = client.get(key) value.pop('alba_disk', None) client.set(key=key, value=value) except Exception: pass # We don't care if we would have any leftover AlbaDisk information in _data, but its cleaner not to alba_guid_node_map = dict( (an.guid, an) for an in AlbaNodeList.get_albanodes()) for key, data in client.prefix_entries('ovs_data_albadisk_'): alba_disk_guid = key.replace('ovs_data_albadisk_', '') alba_node_guid = data.get('alba_node', {}).get('guid') if alba_disk_guid in disk_osd_map and alba_node_guid in alba_guid_node_map and len( data.get('aliases', [])) > 0: slot_id = data['aliases'][0].split('/')[-1] for osd_guid in disk_osd_map[alba_disk_guid]: try: osd = AlbaOSD(osd_guid) except ObjectNotFoundException: continue osd.slot_id = slot_id osd.alba_node = alba_guid_node_map[alba_node_guid] osd.save() client.delete(key=key, must_exist=False) # Remove unique constraints for AlbaNode IP for key in client.prefix('ovs_unique_albanode_ip_'): client.delete(key=key, must_exist=False) # Remove relation for all Alba Disks for key in client.prefix('ovs_reverseindex_albadisk_'): client.delete(key=key, must_exist=False) # Remove the relation between AlbaNode and AlbaDisk for key in client.prefix('ovs_reverseindex_albanode_'): if '|disks|' in key: client.delete(key=key, must_exist=False) return DALMigrator.THIS_VERSION
def migrate(previous_version): """ Migrates from any version to any version, running all migrations required If previous_version is for example 0 and this script is at version 3 it will execute two steps: - 1 > 2 - 2 > 3 @param previous_version: The previous version from which to start the migration. """ working_version = previous_version # Version 1 introduced: # - The datastore is still empty, add defaults if working_version < 1: from ovs.dal.hybrids.user import User from ovs.dal.hybrids.group import Group from ovs.dal.hybrids.role import Role from ovs.dal.hybrids.client import Client from ovs.dal.hybrids.failuredomain import FailureDomain from ovs.dal.hybrids.j_rolegroup import RoleGroup from ovs.dal.hybrids.j_roleclient import RoleClient from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.hybrids.branding import Branding from ovs.dal.lists.backendtypelist import BackendTypeList # Create groups admin_group = Group() admin_group.name = 'administrators' admin_group.description = 'Administrators' admin_group.save() viewers_group = Group() viewers_group.name = 'viewers' viewers_group.description = 'Viewers' viewers_group.save() # Create users admin = User() admin.username = '******' admin.password = hashlib.sha256('admin').hexdigest() admin.is_active = True admin.group = admin_group admin.save() # Create internal OAuth 2 clients admin_pw_client = Client() admin_pw_client.ovs_type = 'INTERNAL' admin_pw_client.grant_type = 'PASSWORD' admin_pw_client.user = admin admin_pw_client.save() admin_cc_client = Client() admin_cc_client.ovs_type = 'INTERNAL' admin_cc_client.grant_type = 'CLIENT_CREDENTIALS' admin_cc_client.client_secret = ''.join( random.choice(string.ascii_letters + string.digits + '|_=+*#@!/-[]{}<>.?,\'";:~') for _ in range(128)) admin_cc_client.user = admin admin_cc_client.save() # Create roles read_role = Role() read_role.code = 'read' read_role.name = 'Read' read_role.description = 'Can read objects' read_role.save() write_role = Role() write_role.code = 'write' write_role.name = 'Write' write_role.description = 'Can write objects' write_role.save() manage_role = Role() manage_role.code = 'manage' manage_role.name = 'Manage' manage_role.description = 'Can manage the system' manage_role.save() # Attach groups to roles mapping = [(admin_group, [read_role, write_role, manage_role]), (viewers_group, [read_role])] for setting in mapping: for role in setting[1]: rolegroup = RoleGroup() rolegroup.group = setting[0] rolegroup.role = role rolegroup.save() for user in setting[0].users: for role in setting[1]: for client in user.clients: roleclient = RoleClient() roleclient.client = client roleclient.role = role roleclient.save() # Add backends for backend_type_info in [('Ceph', 'ceph_s3'), ('Amazon', 'amazon_s3'), ('Swift', 'swift_s3'), ('Local', 'local'), ('Distributed', 'distributed'), ('ALBA', 'alba')]: code = backend_type_info[1] backend_type = BackendTypeList.get_backend_type_by_code(code) if backend_type is None: backend_type = BackendType() backend_type.name = backend_type_info[0] backend_type.code = code backend_type.save() # Add service types for service_type_info in [ ServiceType.SERVICE_TYPES.MD_SERVER, ServiceType.SERVICE_TYPES.ALBA_PROXY, ServiceType.SERVICE_TYPES.ARAKOON ]: service_type = ServiceType() service_type.name = service_type_info service_type.save() # Branding branding = Branding() branding.name = 'Default' branding.description = 'Default bootstrap theme' branding.css = 'bootstrap-default.min.css' branding.productname = 'Open vStorage' branding.is_default = True branding.save() slate = Branding() slate.name = 'Slate' slate.description = 'Dark bootstrap theme' slate.css = 'bootstrap-slate.min.css' slate.productname = 'Open vStorage' slate.is_default = False slate.save() # Failure Domain failure_domain = FailureDomain() failure_domain.name = 'Default' failure_domain.save() # We're now at version 1 working_version = 1 # Version 2 introduced: # - new Descriptor format if working_version < 2: import imp from ovs.dal.helpers import Descriptor from ovs.extensions.storage.persistentfactory import PersistentFactory client = PersistentFactory.get_client() keys = client.prefix('ovs_data') for key in keys: data = client.get(key) modified = False for entry in data.keys(): if isinstance(data[entry], dict) and 'source' in data[ entry] and 'hybrids' in data[entry]['source']: filename = data[entry]['source'] if not filename.startswith('/'): filename = '/opt/OpenvStorage/ovs/dal/{0}'.format( filename) module = imp.load_source(data[entry]['name'], filename) cls = getattr(module, data[entry]['type']) new_data = Descriptor(cls, cached=False).descriptor if 'guid' in data[entry]: new_data['guid'] = data[entry]['guid'] data[entry] = new_data modified = True if modified is True: data['_version'] += 1 client.set(key, data) # We're now at version 2 working_version = 2 # Version 3 introduced: # - new Descriptor format if working_version < 3: import imp from ovs.dal.helpers import Descriptor from ovs.extensions.storage.persistentfactory import PersistentFactory client = PersistentFactory.get_client() keys = client.prefix('ovs_data') for key in keys: data = client.get(key) modified = False for entry in data.keys(): if isinstance(data[entry], dict) and 'source' in data[entry]: module = imp.load_source(data[entry]['name'], data[entry]['source']) cls = getattr(module, data[entry]['type']) new_data = Descriptor(cls, cached=False).descriptor if 'guid' in data[entry]: new_data['guid'] = data[entry]['guid'] data[entry] = new_data modified = True if modified is True: data['_version'] += 1 client.set(key, data) working_version = 3 # Version 4 introduced: # - Flexible SSD layout if working_version < 4: import os from ovs.dal.hybrids.diskpartition import DiskPartition from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.lists.servicetypelist import ServiceTypeList from ovs.dal.lists.storagedriverlist import StorageDriverList from ovs.extensions.generic.remote import remote from ovs.extensions.generic.sshclient import SSHClient from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration for service in ServiceTypeList.get_by_name( ServiceType.SERVICE_TYPES.MD_SERVER).services: mds_service = service.mds_service storagedriver = None for current_storagedriver in service.storagerouter.storagedrivers: if current_storagedriver.vpool_guid == mds_service.vpool_guid: storagedriver = current_storagedriver break tasks = {} if storagedriver._data.get('mountpoint_md'): tasks['{0}/mds_{1}_{2}'.format( storagedriver._data.get('mountpoint_md'), storagedriver.vpool.name, mds_service.number)] = ( DiskPartition.ROLES.DB, StorageDriverPartition.SUBROLE.MDS) if storagedriver._data.get('mountpoint_temp'): tasks['{0}/mds_{1}_{2}'.format( storagedriver._data.get('mountpoint_temp'), storagedriver.vpool.name, mds_service.number)] = ( DiskPartition.ROLES.SCRUB, StorageDriverPartition.SUBROLE.MDS) for disk in service.storagerouter.disks: for partition in disk.partitions: for directory, (role, subrole) in tasks.iteritems(): with remote(storagedriver.storagerouter.ip, [os], username='******') as rem: stat_dir = directory while not rem.os.path.exists( stat_dir) and stat_dir != '/': stat_dir = stat_dir.rsplit('/', 1)[0] if not stat_dir: stat_dir = '/' inode = rem.os.stat(stat_dir).st_dev if partition.inode == inode: if role not in partition.roles: partition.roles.append(role) partition.save() number = 0 migrated = False for sd_partition in storagedriver.partitions: if sd_partition.role == role and sd_partition.sub_role == subrole: if sd_partition.mds_service == mds_service: migrated = True break if sd_partition.partition_guid == partition.guid: number = max( sd_partition.number, number) if migrated is False: sd_partition = StorageDriverPartition() sd_partition.role = role sd_partition.sub_role = subrole sd_partition.partition = partition sd_partition.storagedriver = storagedriver sd_partition.mds_service = mds_service sd_partition.size = None sd_partition.number = number + 1 sd_partition.save() client = SSHClient( storagedriver.storagerouter, username='******') path = sd_partition.path.rsplit('/', 1)[0] if path: client.dir_create(path) client.dir_chown(path, 'ovs', 'ovs') client.dir_create(directory) client.dir_chown(directory, 'ovs', 'ovs') client.symlink( {sd_partition.path: directory}) for storagedriver in StorageDriverList.get_storagedrivers(): migrated_objects = {} for disk in storagedriver.storagerouter.disks: for partition in disk.partitions: # Process all mountpoints that are unique and don't have a specified size for key, (role, sr_info) in { 'mountpoint_md': (DiskPartition.ROLES.DB, { 'metadata_{0}': StorageDriverPartition.SUBROLE.MD, 'tlogs_{0}': StorageDriverPartition.SUBROLE.TLOG }), 'mountpoint_fragmentcache': (DiskPartition.ROLES.WRITE, { 'fcache_{0}': StorageDriverPartition.SUBROLE.FCACHE }), 'mountpoint_foc': (DiskPartition.ROLES.WRITE, { 'fd_{0}': StorageDriverPartition.SUBROLE.FD, 'dtl_{0}': StorageDriverPartition.SUBROLE.DTL }), 'mountpoint_dtl': (DiskPartition.ROLES.WRITE, { 'fd_{0}': StorageDriverPartition.SUBROLE.FD, 'dtl_{0}': StorageDriverPartition.SUBROLE.DTL }), 'mountpoint_readcaches': (DiskPartition.ROLES.READ, { '': None }), 'mountpoint_writecaches': (DiskPartition.ROLES.WRITE, { 'sco_{0}': StorageDriverPartition.SUBROLE.SCO }) }.iteritems(): if key in storagedriver._data: is_list = isinstance(storagedriver._data[key], list) entries = storagedriver._data[ key][:] if is_list is True else [ storagedriver._data[key] ] for entry in entries: if not entry: if is_list: storagedriver._data[key].remove( entry) if len(storagedriver._data[key] ) == 0: del storagedriver._data[key] else: del storagedriver._data[key] else: with remote( storagedriver.storagerouter.ip, [os], username='******') as rem: inode = rem.os.stat(entry).st_dev if partition.inode == inode: if role not in partition.roles: partition.roles.append(role) partition.save() for folder, subrole in sr_info.iteritems( ): number = 0 migrated = False for sd_partition in storagedriver.partitions: if sd_partition.role == role and sd_partition.sub_role == subrole: if sd_partition.partition_guid == partition.guid: number = max( sd_partition. number, number) if migrated is False: sd_partition = StorageDriverPartition( ) sd_partition.role = role sd_partition.sub_role = subrole sd_partition.partition = partition sd_partition.storagedriver = storagedriver sd_partition.size = None sd_partition.number = number + 1 sd_partition.save() if folder: source = '{0}/{1}'.format( entry, folder.format( storagedriver. vpool.name)) else: source = entry client = SSHClient( storagedriver. storagerouter, username='******') path = sd_partition.path.rsplit( '/', 1)[0] if path: client.dir_create(path) client.dir_chown( path, 'ovs', 'ovs') client.symlink({ sd_partition.path: source }) migrated_objects[ source] = sd_partition if is_list: storagedriver._data[ key].remove(entry) if len(storagedriver._data[key] ) == 0: del storagedriver._data[ key] else: del storagedriver._data[key] storagedriver.save() if 'mountpoint_bfs' in storagedriver._data: storagedriver.mountpoint_dfs = storagedriver._data[ 'mountpoint_bfs'] if not storagedriver.mountpoint_dfs: storagedriver.mountpoint_dfs = None del storagedriver._data['mountpoint_bfs'] storagedriver.save() if 'mountpoint_temp' in storagedriver._data: del storagedriver._data['mountpoint_temp'] storagedriver.save() if migrated_objects: print 'Loading sizes' config = StorageDriverConfiguration( 'storagedriver', storagedriver.vpool_guid, storagedriver.storagedriver_id) config.load() for readcache in config.configuration.get( 'content_addressed_cache', {}).get('clustercache_mount_points', []): path = readcache.get('path', '').rsplit('/', 1)[0] size = int(readcache['size'].strip( 'KiB')) * 1024 if 'size' in readcache else None if path in migrated_objects: migrated_objects[path].size = long(size) migrated_objects[path].save() for writecache in config.configuration.get( 'scocache', {}).get('scocache_mount_points', []): path = writecache.get('path', '') size = int(writecache['size'].strip( 'KiB')) * 1024 if 'size' in writecache else None if path in migrated_objects: migrated_objects[path].size = long(size) migrated_objects[path].save() working_version = 4 # Version 5 introduced: # - Failure Domains if working_version < 5: import os from ovs.dal.hybrids.failuredomain import FailureDomain from ovs.dal.lists.failuredomainlist import FailureDomainList from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.generic.remote import remote from ovs.extensions.generic.sshclient import SSHClient failure_domains = FailureDomainList.get_failure_domains() if len(failure_domains) > 0: failure_domain = failure_domains[0] else: failure_domain = FailureDomain() failure_domain.name = 'Default' failure_domain.save() for storagerouter in StorageRouterList.get_storagerouters(): change = False if storagerouter.primary_failure_domain is None: storagerouter.primary_failure_domain = failure_domain change = True if storagerouter.rdma_capable is None: client = SSHClient(storagerouter, username='******') rdma_capable = False with remote(client.ip, [os], username='******') as rem: for root, dirs, files in rem.os.walk( '/sys/class/infiniband'): for directory in dirs: ports_dir = '/'.join( [root, directory, 'ports']) if not rem.os.path.exists(ports_dir): continue for sub_root, sub_dirs, _ in rem.os.walk( ports_dir): if sub_root != ports_dir: continue for sub_directory in sub_dirs: state_file = '/'.join( [sub_root, sub_directory, 'state']) if rem.os.path.exists(state_file): if 'ACTIVE' in client.run( 'cat {0}'.format( state_file)): rdma_capable = True storagerouter.rdma_capable = rdma_capable change = True if change is True: storagerouter.save() working_version = 5 # Version 6 introduced: # - Distributed scrubbing if working_version < 6: from ovs.dal.hybrids.diskpartition import DiskPartition from ovs.dal.lists.storagedriverlist import StorageDriverList from ovs.extensions.generic.sshclient import SSHClient for storage_driver in StorageDriverList.get_storagedrivers(): root_client = SSHClient(storage_driver.storagerouter, username='******') for partition in storage_driver.partitions: if partition.role == DiskPartition.ROLES.SCRUB: old_path = partition.path partition.sub_role = None partition.save() partition.invalidate_dynamics(['folder', 'path']) if root_client.dir_exists(partition.path): continue # New directory already exists if '_mds_' in old_path: if root_client.dir_exists(old_path): root_client.symlink({partition.path: old_path}) if not root_client.dir_exists(partition.path): root_client.dir_create(partition.path) root_client.dir_chmod(partition.path, 0777) working_version = 6 # Version 7 introduced: # - vPool status if working_version < 7: from ovs.dal.hybrids import vpool reload(vpool) from ovs.dal.hybrids.vpool import VPool from ovs.dal.lists.vpoollist import VPoolList for _vpool in VPoolList.get_vpools(): vpool = VPool(_vpool.guid) if hasattr(vpool, 'status') and vpool.status is None: vpool.status = VPool.STATUSES.RUNNING vpool.save() working_version = 7 # Version 10 introduced: # - Reverse indexes are stored in persistent store # - Store more non-changing metadata on disk iso using a dynamic property if working_version < 10: from ovs.dal.helpers import HybridRunner, Descriptor from ovs.dal.datalist import DataList from ovs.extensions.storage.persistentfactory import PersistentFactory from ovs.extensions.storage.volatilefactory import VolatileFactory persistent = PersistentFactory.get_client() for prefix in ['ovs_listcache', 'ovs_reverseindex']: for key in persistent.prefix(prefix): persistent.delete(key) for key in persistent.prefix('ovs_data_'): persistent.set(key, persistent.get(key)) base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}' hybrid_structure = HybridRunner.get_hybrids() for class_descriptor in hybrid_structure.values(): cls = Descriptor().load(class_descriptor).get_object() all_objects = DataList(cls, { 'type': DataList.where_operator.AND, 'items': [] }) for item in all_objects: guid = item.guid for relation in item._relations: if relation.foreign_type is None: rcls = cls rclsname = rcls.__name__.lower() else: rcls = relation.foreign_type rclsname = rcls.__name__.lower() key = relation.name rguid = item._data[key]['guid'] if rguid is not None: reverse_key = base_reverse_key.format( rclsname, rguid, relation.foreign_key, guid) persistent.set(reverse_key, 0) volatile = VolatileFactory.get_client() try: volatile._client.flush_all() except: pass from ovs.dal.lists.vdisklist import VDiskList for vdisk in VDiskList.get_vdisks(): try: vdisk.metadata = { 'lba_size': vdisk.info['lba_size'], 'cluster_multiplier': vdisk.info['cluster_multiplier'] } vdisk.save() except: pass working_version = 10 # Version 11 introduced: # - ALBA accelerated ALBA, meaning different vpool.metadata information if working_version < 11: from ovs.dal.lists.vpoollist import VPoolList for vpool in VPoolList.get_vpools(): vpool.metadata = {'backend': vpool.metadata} if 'metadata' in vpool.metadata['backend']: vpool.metadata['backend'][ 'arakoon_config'] = vpool.metadata['backend'].pop( 'metadata') if 'backend_info' in vpool.metadata['backend']: vpool.metadata['backend']['backend_info'][ 'fragment_cache_on_read'] = True vpool.metadata['backend']['backend_info'][ 'fragment_cache_on_write'] = False vpool.save() working_version = 11 return working_version
def test_happypath(self): """ Validates the happy path; Hourly snapshots are taken with a few manual consistent every now an then. The delete policy is executed every day """ # Setup # There are 2 machines; one with two disks, one with one disk and a stand-alone additional disk failure_domain = FailureDomain() failure_domain.name = 'Test' failure_domain.save() backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() vpool = VPool() vpool.name = 'vpool' vpool.status = 'RUNNING' vpool.backend_type = backend_type vpool.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'VMWARE' pmachine.save() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.pmachine = pmachine storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.primary_failure_domain = failure_domain storage_router.save() disk = Disk() disk.name = 'physical_disk_1' disk.path = '/dev/non-existent' disk.size = 500 * 1024**3 disk.state = 'OK' disk.is_ssd = True disk.storagerouter = storage_router disk.save() disk_partition = DiskPartition() disk_partition.id = 'disk_partition_id' disk_partition.disk = disk disk_partition.path = '/dev/disk/non-existent' disk_partition.size = 400 * 1024**3 disk_partition.state = 'OK' disk_partition.offset = 1024 disk_partition.roles = [DiskPartition.ROLES.SCRUB] disk_partition.mountpoint = '/var/tmp' disk_partition.save() vmachine_1 = VMachine() vmachine_1.name = 'vmachine_1' vmachine_1.devicename = 'dummy' vmachine_1.pmachine = pmachine vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() vdisk_1_2 = VDisk() vdisk_1_2.name = 'vdisk_1_2' vdisk_1_2.volume_id = 'vdisk_1_2' vdisk_1_2.vmachine = vmachine_1 vdisk_1_2.vpool = vpool vdisk_1_2.devicename = 'dummy' vdisk_1_2.size = 0 vdisk_1_2.save() vdisk_1_2.reload_client() vmachine_2 = VMachine() vmachine_2.name = 'vmachine_2' vmachine_2.devicename = 'dummy' vmachine_2.pmachine = pmachine vmachine_2.save() vdisk_2_1 = VDisk() vdisk_2_1.name = 'vdisk_2_1' vdisk_2_1.volume_id = 'vdisk_2_1' vdisk_2_1.vmachine = vmachine_2 vdisk_2_1.vpool = vpool vdisk_2_1.devicename = 'dummy' vdisk_2_1.size = 0 vdisk_2_1.save() vdisk_2_1.reload_client() vdisk_3 = VDisk() vdisk_3.name = 'vdisk_3' vdisk_3.volume_id = 'vdisk_3' vdisk_3.vpool = vpool vdisk_3.devicename = 'dummy' vdisk_3.size = 0 vdisk_3.save() vdisk_3.reload_client() for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: [ dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots' ][0].timeout = 0 # Run the testing scenario travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' if travis is True: print 'Running in Travis, reducing output.' debug = not travis amount_of_days = 50 base = datetime.datetime.now().date() day = datetime.timedelta(1) minute = 60 hour = minute * 60 for d in xrange(0, amount_of_days): base_timestamp = self._make_timestamp(base, day * d) print '' print 'Day cycle: {0}: {1}'.format( d, datetime.datetime.fromtimestamp(base_timestamp).strftime( '%Y-%m-%d')) # At the start of the day, delete snapshot policy runs at 00:30 print '- Deleting snapshots' ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) # Validate snapshots print '- Validating snapshots' for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: self._validate(vdisk, d, base, amount_of_days, debug) # During the day, snapshots are taken # - Create non consistent snapshot every hour, between 2:00 and 22:00 # - Create consistent snapshot at 6:30, 12:30, 18:30 print '- Creating snapshots' for h in xrange(2, 23): timestamp = base_timestamp + (hour * h) for vm in [vmachine_1, vmachine_2]: VMachineController.snapshot(machineguid=vm.guid, label='ss_i_{0}:00'.format( str(h)), is_consistent=False, timestamp=timestamp) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VMachineController.snapshot(machineguid=vm.guid, label='ss_c_{0}:30'.format( str(h)), is_consistent=True, timestamp=ts) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={ 'label': 'ss_i_{0}:00'.format( str(h)), 'is_consistent': False, 'timestamp': str(timestamp), 'machineguid': None }) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={ 'label': 'ss_c_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(ts), 'machineguid': None })
def test_happypath(self): """ Validates the happy path; Hourly snapshots are taken with a few manual consistents every now an then. The delelete policy is exectued every day """ # Setup # There are 2 machines; one with two disks, one with one disk and an additional disk vpool = VPool() vpool.name = 'vpool' vpool.backend_type = BackendType() vpool.save() vmachine_1 = VMachine() vmachine_1.name = 'vmachine_1' vmachine_1.devicename = 'dummy' vmachine_1.pmachine = PMachine() vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() vdisk_1_2 = VDisk() vdisk_1_2.name = 'vdisk_1_2' vdisk_1_2.volume_id = 'vdisk_1_2' vdisk_1_2.vmachine = vmachine_1 vdisk_1_2.vpool = vpool vdisk_1_2.devicename = 'dummy' vdisk_1_2.size = 0 vdisk_1_2.save() vdisk_1_2.reload_client() vmachine_2 = VMachine() vmachine_2.name = 'vmachine_2' vmachine_2.devicename = 'dummy' vmachine_2.pmachine = PMachine() vmachine_2.save() vdisk_2_1 = VDisk() vdisk_2_1.name = 'vdisk_2_1' vdisk_2_1.volume_id = 'vdisk_2_1' vdisk_2_1.vmachine = vmachine_2 vdisk_2_1.vpool = vpool vdisk_2_1.devicename = 'dummy' vdisk_2_1.size = 0 vdisk_2_1.save() vdisk_2_1.reload_client() vdisk_3 = VDisk() vdisk_3.name = 'vdisk_3' vdisk_3.volume_id = 'vdisk_3' vdisk_3.vpool = vpool vdisk_3.devicename = 'dummy' vdisk_3.size = 0 vdisk_3.save() vdisk_3.reload_client() for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: [dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 # Run the testing scenario debug = True amount_of_days = 50 now = int(mktime(datetime.now().date().timetuple())) # Last night minute = 60 hour = minute * 60 day = hour * 24 for d in xrange(0, amount_of_days): base_timestamp = now + (day * d) print '' print 'Day cycle: {}: {}'.format( d, datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d') ) # At the start of the day, delete snapshot policy runs at 00:30 print '- Deleting snapshots' ScheduledTaskController.deletescrubsnapshots(timestamp=base_timestamp + (minute * 30)) # Validate snapshots print '- Validating snapshots' for vdisk in [vdisk_3]: # [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: self._validate(vdisk, d, now, amount_of_days, debug) # During the day, snapshots are taken # - Create non consistent snapshot every hour, between 2:00 and 22:00 # - Create consistent snapshot at 6:30, 12:30, 18:30 print '- Creating snapshots' for h in xrange(2, 23): timestamp = base_timestamp + (hour * h) for vm in [vmachine_1, vmachine_2]: VMachineController.snapshot(machineguid=vm.guid, label='ss_i_{}:00'.format(str(h)), is_consistent=False, timestamp=timestamp) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VMachineController.snapshot(machineguid=vm.guid, label='ss_c_{}:30'.format(str(h)), is_consistent=True, timestamp=ts) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={'label': 'ss_i_{}:00'.format(str(h)), 'is_consistent': False, 'timestamp': timestamp, 'machineguid': None}) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={'label': 'ss_c_{}:30'.format(str(h)), 'is_consistent': True, 'timestamp': ts, 'machineguid': None})
def _build_service_structure(self, structure): """ Builds an MDS service structure """ vpools = {} storagerouters = {} storagedrivers = {} services = {} mds_services = {} service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() for vpool_id in structure['vpools']: vpool = VPool() vpool.name = str(vpool_id) vpool.backend_type = BackendType() vpool.save() vpools[vpool_id] = vpool for sr_id in structure['storagerouters']: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.pmachine = PMachine() storagerouter.save() storagerouters[sr_id] = storagerouter for sd_info in structure['storagedrivers']: sd_id, vpool_id, sr_id = sd_info storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint_temp = '/' storagedriver.mountpoint_foc = '/' storagedriver.mountpoint_readcaches = ['/'] storagedriver.mountpoint_writecaches = ['/'] storagedriver.mountpoint_temp = '/' storagedriver.mountpoint_md = '/' storagedriver.mountpoint_bfs = '/' storagedriver.mountpoint_fragmentcache = '/' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = [1, 2, 3] storagedriver.save() storagedrivers[sd_id] = storagedriver for mds_info in structure['mds_services']: mds_id, sd_id = mds_info sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service return vpools, storagerouters, storagedrivers, services, mds_services, service_type
def migrate(previous_version): """ Migrates from any version to any version, running all migrations required If previous_version is for example 0 and this script is at verison 3 it will execute two steps: - 1 > 2 - 2 > 3 @param previous_version: The previous version from which to start the migration. """ working_version = previous_version # Version 1 introduced: # - The datastore is still empty, add defaults if working_version < 1: from ovs.dal.hybrids.user import User from ovs.dal.hybrids.group import Group from ovs.dal.hybrids.role import Role from ovs.dal.hybrids.client import Client from ovs.dal.hybrids.j_rolegroup import RoleGroup from ovs.dal.hybrids.j_roleclient import RoleClient from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.hybrids.branding import Branding from ovs.dal.lists.backendtypelist import BackendTypeList # Create groups admin_group = Group() admin_group.name = 'administrators' admin_group.description = 'Administrators' admin_group.save() viewers_group = Group() viewers_group.name = 'viewers' viewers_group.description = 'Viewers' viewers_group.save() # Create users admin = User() admin.username = '******' admin.password = hashlib.sha256('admin').hexdigest() admin.is_active = True admin.group = admin_group admin.save() # Create internal OAuth 2 clients admin_pw_client = Client() admin_pw_client.ovs_type = 'INTERNAL' admin_pw_client.grant_type = 'PASSWORD' admin_pw_client.user = admin admin_pw_client.save() admin_cc_client = Client() admin_cc_client.ovs_type = 'INTERNAL' admin_cc_client.grant_type = 'CLIENT_CREDENTIALS' admin_cc_client.client_secret = ''.join( random.choice(string.ascii_letters + string.digits + '|_=+*#@!/-[]{}<>.?,\'";:~') for _ in range(128)) admin_cc_client.user = admin admin_cc_client.save() # Create roles read_role = Role() read_role.code = 'read' read_role.name = 'Read' read_role.description = 'Can read objects' read_role.save() write_role = Role() write_role.code = 'write' write_role.name = 'Write' write_role.description = 'Can write objects' write_role.save() manage_role = Role() manage_role.code = 'manage' manage_role.name = 'Manage' manage_role.description = 'Can manage the system' manage_role.save() # Attach groups to roles mapping = [(admin_group, [read_role, write_role, manage_role]), (viewers_group, [read_role])] for setting in mapping: for role in setting[1]: rolegroup = RoleGroup() rolegroup.group = setting[0] rolegroup.role = role rolegroup.save() for user in setting[0].users: for role in setting[1]: for client in user.clients: roleclient = RoleClient() roleclient.client = client roleclient.role = role roleclient.save() # Add backends for backend_type_info in [('Ceph', 'ceph_s3'), ('Amazon', 'amazon_s3'), ('Swift', 'swift_s3'), ('Local', 'local'), ('Distributed', 'distributed'), ('ALBA', 'alba')]: code = backend_type_info[1] backend_type = BackendTypeList.get_backend_type_by_code(code) if backend_type is None: backend_type = BackendType() backend_type.name = backend_type_info[0] backend_type.code = code backend_type.save() # Add service types for service_type_info in [ 'MetadataServer', 'AlbaProxy', 'Arakoon' ]: service_type = ServiceType() service_type.name = service_type_info service_type.save() # Brandings branding = Branding() branding.name = 'Default' branding.description = 'Default bootstrap theme' branding.css = 'bootstrap-default.min.css' branding.productname = 'Open vStorage' branding.is_default = True branding.save() slate = Branding() slate.name = 'Slate' slate.description = 'Dark bootstrap theme' slate.css = 'bootstrap-slate.min.css' slate.productname = 'Open vStorage' slate.is_default = False slate.save() # We're now at version 1 working_version = 1 # Version 2 introduced: # - new Descriptor format if working_version < 2: import imp from ovs.dal.helpers import Descriptor from ovs.extensions.storage.persistentfactory import PersistentFactory client = PersistentFactory.get_client() keys = client.prefix('ovs_data', max_elements=-1) for key in keys: data = client.get(key) modified = False for entry in data.keys(): if isinstance(data[entry], dict) and 'source' in data[ entry] and 'hybrids' in data[entry]['source']: filename = data[entry]['source'] if not filename.startswith('/'): filename = '/opt/OpenvStorage/ovs/dal/{0}'.format( filename) module = imp.load_source(data[entry]['name'], filename) cls = getattr(module, data[entry]['type']) new_data = Descriptor(cls, cached=False).descriptor if 'guid' in data[entry]: new_data['guid'] = data[entry]['guid'] data[entry] = new_data modified = True if modified is True: data['_version'] += 1 client.set(key, data) # We're now at version 2 working_version = 2 # Version 3 introduced: # - new Descriptor format if working_version < 3: import imp from ovs.dal.helpers import Descriptor from ovs.extensions.storage.persistentfactory import PersistentFactory client = PersistentFactory.get_client() keys = client.prefix('ovs_data', max_elements=-1) for key in keys: data = client.get(key) modified = False for entry in data.keys(): if isinstance(data[entry], dict) and 'source' in data[entry]: module = imp.load_source(data[entry]['name'], data[entry]['source']) cls = getattr(module, data[entry]['type']) new_data = Descriptor(cls, cached=False).descriptor if 'guid' in data[entry]: new_data['guid'] = data[entry]['guid'] data[entry] = new_data modified = True if modified is True: data['_version'] += 1 client.set(key, data) working_version = 3 return working_version
def _prepare(self): # Setup failure_domain = FailureDomain() failure_domain.name = 'Test' failure_domain.save() backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() vpool = VPool() vpool.name = 'vpool' vpool.backend_type = backend_type vpool.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'KVM' pmachine.save() vmachine_1 = VMachine() vmachine_1.name = 'vmachine_1' vmachine_1.devicename = 'dummy' vmachine_1.pmachine = pmachine vmachine_1.is_vtemplate = True vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.pmachine = pmachine storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.primary_failure_domain = failure_domain storage_router.save() storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storage_router storagedriver.name = '1' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storage_router.ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '1' storagedriver.ports = [1, 2, 3] storagedriver.save() service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() s_id = '{0}-{1}'.format(storagedriver.storagerouter.name, '1') service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [1] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() def ensure_safety(vdisk): pass class Dtl_Checkup(): @staticmethod def delay(vpool_guid=None, vdisk_guid=None, storagerouters_to_exclude=None): pass MDSServiceController.ensure_safety = staticmethod(ensure_safety) VDiskController.dtl_checkup = Dtl_Checkup return vdisk_1_1, pmachine
def migrate(previous_version): """ Migrates from any version to any version, running all migrations required If previous_version is for example 0 (0.0.1) and this script is at verison 3 (0.0.3) it will execute two steps: - 0.0.1 > 0.0.2 - 0.0.2 > 0.0.3 @param previous_version: The previous version from which to start the migration. """ working_version = previous_version # Version 0.0.1 introduced: if working_version < 1: # Create groups admin_group = Group() admin_group.name = 'administrators' admin_group.description = 'Administrators' admin_group.save() viewers_group = Group() viewers_group.name = 'viewers' viewers_group.description = 'Viewers' viewers_group.save() # Create users admin = User() admin.username = '******' admin.password = hashlib.sha256('admin').hexdigest() admin.is_active = True admin.group = admin_group admin.save() # Create internal OAuth 2 clients admin_client = Client() admin_client.ovs_type = 'FRONTEND' admin_client.grant_type = 'PASSWORD' admin_client.user = admin admin_client.save() # Create roles read_role = Role() read_role.code = 'read' read_role.name = 'Read' read_role.description = 'Can read objects' read_role.save() write_role = Role() write_role.code = 'write' write_role.name = 'Write' write_role.description = 'Can write objects' write_role.save() manage_role = Role() manage_role.code = 'manage' manage_role.name = 'Manage' manage_role.description = 'Can manage the system' manage_role.save() # Attach groups to roles mapping = [(admin_group, [read_role, write_role, manage_role]), (viewers_group, [read_role])] for setting in mapping: for role in setting[1]: rolegroup = RoleGroup() rolegroup.group = setting[0] rolegroup.role = role rolegroup.save() for user in setting[0].users: for role in setting[1]: roleclient = RoleClient() roleclient.client = user.clients[0] roleclient.role = role roleclient.save() # Add backends for backend_type_info in [('Ceph', 'ceph_s3'), ('Amazon', 'amason_s3'), ('Swift', 'swift_s3'), ('Local', 'local'), ('Distributed', 'distributed'), ('ALBA', 'alba')]: backend_type = BackendType() backend_type.name = backend_type_info[0] backend_type.code = backend_type_info[1] backend_type.save() # We're now at version 0.0.1 working_version = 1 # Version 0.0.2 introduced: if working_version < 2: # Execute some code that upgrades to version 2 # working_version = 2 pass return working_version
def build_dal_structure(structure, previous_structure=None): """ Builds a service structure Example: structure = AlbaDalHelper.build_service_structure({ 'alba_backends': [1], 'alba_nodes': [1] }) """ if previous_structure is None: previous_structure = {} alba_osds = previous_structure.get('alba_osds', {}) alba_nodes = previous_structure.get('alba_nodes', {}) backend_types = previous_structure.get('backend_types', {}) service_types = previous_structure.get('service_types', {}) alba_backends = previous_structure.get('alba_backends', {}) alba_abm_clusters = previous_structure.get('alba_abm_clusters', {}) alba_nsm_clusters = previous_structure.get('alba_nsm_clusters', {}) if 1 not in backend_types: backend_type = BackendType() backend_type.code = 'alba' backend_type.name = 'ALBA' backend_type.save() backend_types[1] = backend_type if 'AlbaManager' not in service_types: service_type = ServiceTypeList.get_by_name('AlbaManager') if service_type is None: service_type = ServiceType() service_type.name = 'AlbaManager' service_type.save() service_types['AlbaManager'] = service_type if 'NamespaceManager' not in service_types: service_type = ServiceTypeList.get_by_name('NamespaceManager') if service_type is None: service_type = ServiceType() service_type.name = 'NamespaceManager' service_type.save() service_types['NamespaceManager'] = service_type for ab_id, scaling in structure.get('alba_backends', ()): if ab_id not in alba_backends: backend = Backend() backend.name = 'backend_{0}'.format(ab_id) backend.backend_type = backend_types[1] backend.save() alba_backend = AlbaBackend() alba_backend.backend = backend alba_backend.scaling = getattr(AlbaBackend.SCALINGS, scaling) alba_backend.alba_id = str(ab_id) alba_backend.save() alba_backends[ab_id] = alba_backend for ab_id in structure.get('alba_abm_clusters', ()): if ab_id not in alba_abm_clusters: if ab_id not in alba_backends: raise ValueError('Non-existing ALBA Backend ID provided') alba_backend = alba_backends[ab_id] abm_cluster = ABMCluster() abm_cluster.name = '{0}-abm'.format(alba_backend.name) abm_cluster.alba_backend = alba_backend abm_cluster.config_location = '/ovs/arakoon/{0}-abm/config'.format( alba_backend.name) abm_cluster.save() abm_service = Service() abm_service.name = 'arakoon-{0}-abm'.format(alba_backend.name) abm_service.type = service_types['AlbaManager'] abm_service.ports = [] abm_service.storagerouter = None abm_service.save() abm_junction_service = ABMService() abm_junction_service.service = abm_service abm_junction_service.abm_cluster = abm_cluster abm_junction_service.save() alba_abm_clusters[ab_id] = abm_cluster for ab_id, amount in structure.get('alba_nsm_clusters', ()): if ab_id not in alba_nsm_clusters or amount != len( alba_nsm_clusters[ab_id]): if ab_id not in alba_backends: raise ValueError('Non-existing ALBA Backend ID provided') alba_backend = alba_backends[ab_id] alba_nsm_clusters[ab_id] = [] nsm_clusters = dict( (nsm_cluster.number, nsm_cluster) for nsm_cluster in alba_backend.nsm_clusters) for number in range(amount): if number in nsm_clusters: alba_nsm_clusters[ab_id].append(nsm_clusters[number]) continue nsm_cluster = NSMCluster() nsm_cluster.name = '{0}-nsm_{1}'.format( alba_backend.name, number) nsm_cluster.number = number nsm_cluster.alba_backend = alba_backend nsm_cluster.config_location = '/ovs/arakoon/{0}-nsm_{1}/config'.format( alba_backend.name, number) nsm_cluster.save() nsm_service = Service() nsm_service.name = 'arakoon-{0}-nsm_{1}'.format( alba_backend.name, number) nsm_service.type = service_types['NamespaceManager'] nsm_service.ports = [] nsm_service.storagerouter = None nsm_service.save() nsm_junction_service = NSMService() nsm_junction_service.service = nsm_service nsm_junction_service.nsm_cluster = nsm_cluster nsm_junction_service.save() alba_nsm_clusters[ab_id].append(nsm_cluster) for an_id in structure.get('alba_nodes', []): if an_id not in alba_nodes: alba_node = AlbaNode() alba_node.ip = '10.1.0.{0}'.format(an_id) alba_node.port = 8500 alba_node.username = str(an_id) alba_node.password = str(an_id) alba_node.node_id = 'node_{0}'.format(an_id) alba_node.save() alba_nodes[an_id] = alba_node if alba_node in ManagerClientMockup.test_results: ManagerClientMockup.test_results[alba_node].update( {'get_metadata': { '_version': 3 }}) else: ManagerClientMockup.test_results[alba_node] = { 'get_metadata': { '_version': 3 } } for ao_id, ab_id, an_id, slot_id in structure.get('alba_osds', ()): if ao_id not in alba_osds: osd = AlbaOSD() osd.osd_id = 'alba_osd_{0}'.format(ao_id) osd.osd_type = AlbaOSD.OSD_TYPES.ASD osd.alba_backend = alba_backends[ab_id] osd.alba_node = alba_nodes[an_id] osd.slot_id = 'alba_slot_{0}'.format(slot_id) osd.ips = ['127.0.0.{0}'.format(ao_id)] osd.port = 35000 + ao_id osd.save() alba_osds[ao_id] = osd return { 'alba_osds': alba_osds, 'alba_nodes': alba_nodes, 'backend_types': backend_types, 'service_types': service_types, 'alba_backends': alba_backends, 'alba_abm_clusters': alba_abm_clusters, 'alba_nsm_clusters': alba_nsm_clusters }