def update_storagedrivers(self, vpool, storagedriver_guid, storagerouter_guids=None, storagedriver_guids=None): """ Update Storage Drivers for a given vPool (both adding and removing Storage Drivers) """ storagerouters = [] if storagerouter_guids is not None: if storagerouter_guids.strip() != '': for storagerouter_guid in storagerouter_guids.strip().split( ','): storagerouter = StorageRouter(storagerouter_guid) storagerouters.append( (storagerouter.ip, storagerouter.machine_id)) valid_storagedriver_guids = [] if storagedriver_guids is not None: if storagedriver_guids.strip() != '': for storagedriver_guid in storagedriver_guids.strip().split( ','): storagedriver = StorageDriver(storagedriver_guid) if storagedriver.vpool_guid != vpool.guid: raise NotAcceptable( 'Given Storage Driver does not belong to this vPool' ) valid_storagedriver_guids.append(storagedriver.guid) storagedriver = StorageDriver(storagedriver_guid) parameters = { 'connection_host': None if vpool.connection is None else vpool.connection.split(':')[0], 'connection_port': None if vpool.connection is None else int( vpool.connection.split(':')[1]), 'connection_username': vpool.login, 'connection_password': vpool.password, 'storage_ip': storagedriver.storage_ip, 'type': vpool.backend_type.code, 'vpool_name': vpool.name } for field in parameters: if isinstance(parameters[field], basestring): parameters[field] = str(parameters[field]) return StorageRouterController.update_storagedrivers.delay( valid_storagedriver_guids, storagerouters, parameters)
def get_storagedriver_by_guid(storagedriver_guid): """ Fetches the storagedriver with its guid :param storagedriver_guid: guid of the storagedriver :type storagedriver_guid: str :return: The storagedriver DAL object :rtype: ovs.dal.hybrids.storagedriver.STORAGEDRIVER """ return StorageDriver(storagedriver_guid)
def get_by_storagedriver(storagedriver_guid): """ Returns a list of MDSServices based on the StorageDriver (via StorageRouter > Service and Vpool) * This list uses object relations instead of queries for better performance """ mdsservice_type = ServiceTypeList.get_by_name('MetadataServer') storagedriver = StorageDriver(storagedriver_guid) for service in storagedriver.storagerouter.services: if service.type_guid == mdsservice_type.guid and service.mds_service.vpool_guid == storagedriver.vpool_guid: return service.mds_service return None
def from_dict(data): # type: (Dict[str, Union[str, int, List[str]]]) -> VDiskBalance """ Instantiate a VDiskBalance through a dict. See to_dict method to check it's form :param data: Data dict :return: """ kwargs = data.copy() kwargs['storagedriver'] = StorageDriver( kwargs.pop('storagedriver_guid')) return VDiskBalance(**kwargs)
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.service import Service from ovs.dal.hybrids.vpool import VPool from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.j_mdsservice import MDSService from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.lib.mdsservice import MDSServiceController # Globalize mocked classes global VDisk global VPool global Service global StorageRouter global StorageDriver global BackendType global PMachine global MDSService global ServiceType global MDSServiceVDisk global VolatileMutex global MDSServiceController _ = VDisk(), VPool(), Service(), MDSService(), MDSServiceVDisk(), ServiceType(), \ StorageRouter(), StorageDriver(), BackendType(), PMachine(), \ VolatileMutex('dummy'), MDSServiceController # Configuration def _get(key): c = PersistentFactory.get_client() if c.exists(key): return c.get(key) return None Configuration.get = staticmethod(_get) # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def update_storagedrivers(storagedriver_guids, storagerouters, parameters): """ Add/remove multiple vPools @param storagedriver_guids: Storage Drivers to be removed @param storagerouters: StorageRouters on which to add a new link @param parameters: Settings for new links """ success = True # Add Storage Drivers for storagerouter_ip, storageapplaince_machineid in storagerouters: try: new_parameters = copy.copy(parameters) new_parameters['storagerouter_ip'] = storagerouter_ip local_machineid = System.get_my_machine_id() if local_machineid == storageapplaince_machineid: # Inline execution, since it's on the same node (preventing deadlocks) StorageRouterController.add_vpool(new_parameters) else: # Async execution, since it has to be executed on another node # @TODO: Will break in Celery 3.2, need to find another solution # Requirements: # - This code cannot continue until this new task is completed (as all these Storage Router # need to be handled sequentially # - The wait() or get() method are not allowed anymore from within a task to prevent deadlocks result = StorageRouterController.add_vpool.s(new_parameters).apply_async( routing_key='sr.{0}'.format(storageapplaince_machineid) ) result.wait() except: success = False # Remove Storage Drivers for storagedriver_guid in storagedriver_guids: try: storagedriver = StorageDriver(storagedriver_guid) storagerouter_machineid = storagedriver.storagerouter.machine_id local_machineid = System.get_my_machine_id() if local_machineid == storagerouter_machineid: # Inline execution, since it's on the same node (preventing deadlocks) StorageRouterController.remove_storagedriver(storagedriver_guid) else: # Async execution, since it has to be executed on another node # @TODO: Will break in Celery 3.2, need to find another solution # Requirements: # - This code cannot continue until this new task is completed (as all these VSAs need to be # handled sequentially # - The wait() or get() method are not allowed anymore from within a task to prevent deadlocks result = StorageRouterController.remove_storagedriver.s(storagedriver_guid).apply_async( routing_key='sr.{0}'.format(storagerouter_machineid) ) result.wait() except: success = False return success
def update_storagedrivers(self, vpool, storagedriver_guid, storagerouter_guids=None, storagedriver_guids=None): """ Update Storage Drivers for a given vPool (both adding and removing Storage Drivers) """ storagerouters = [] if storagerouter_guids is not None: if storagerouter_guids.strip() != '': for storagerouter_guid in storagerouter_guids.strip().split(','): storagerouter = StorageRouter(storagerouter_guid) storagerouters.append((storagerouter.ip, storagerouter.machine_id)) valid_storagedriver_guids = [] if storagedriver_guids is not None: if storagedriver_guids.strip() != '': for storagedriver_guid in storagedriver_guids.strip().split(','): storagedriver = StorageDriver(storagedriver_guid) if storagedriver.vpool_guid != vpool.guid: raise NotAcceptable('Given Storage Driver does not belong to this vPool') valid_storagedriver_guids.append(storagedriver.guid) storagedriver = StorageDriver(storagedriver_guid) parameters = {'vpool_name': vpool.name, 'type': vpool.backend_type.code, 'connection_host': None if vpool.connection is None else vpool.connection.split(':')[0], 'connection_port': None if vpool.connection is None else int(vpool.connection.split(':')[1]), 'connection_timeout': 0, # Not in use anyway 'connection_username': vpool.login, 'connection_password': vpool.password, 'mountpoint_bfs': storagedriver.mountpoint_bfs, 'mountpoint_temp': storagedriver.mountpoint_temp, 'mountpoint_md': storagedriver.mountpoint_md, 'mountpoint_readcache1': storagedriver.mountpoint_readcache1, 'mountpoint_readcache2': storagedriver.mountpoint_readcache2, 'mountpoint_writecache': storagedriver.mountpoint_writecache, 'mountpoint_foc': storagedriver.mountpoint_foc, 'storage_ip': storagedriver.storage_ip} for field in parameters: if not parameters[field] is int: parameters[field] = str(parameters[field]) return StorageRouterController.update_storagedrivers.delay(valid_storagedriver_guids, storagerouters, parameters)
def calculate_update_impact(storagedriver_guid, requested_config): """ Calculate the impact of the update for a StorageDriver with a given config :param storagedriver_guid: Guid of the storage driver :type storagedriver_guid: str :param requested_config: requested configuration :type requested_config: dict :return: dict indicating what will be needed to be done :rtype: dict """ # TODO: Implement for reconfiguring purposes storagedriver = StorageDriver(storagedriver_guid) # Get a difference between the current config and the requested one return {}
def refresh_configuration(storagedriver_guid): """ Refresh the StorageDriver's configuration (Configuration must have been updated manually) :param storagedriver_guid: Guid of the StorageDriver :type storagedriver_guid: str :return: Amount of changes the volumedriver detected :rtype: int """ storagedriver = StorageDriver(storagedriver_guid) try: client = SSHClient(endpoint=storagedriver.storagerouter) except UnableToConnectException: raise Exception( 'StorageRouter with IP {0} is not reachable. Cannot refresh the configuration' .format(storagedriver.storagerouter.ip)) storagedriver_config = StorageDriverConfiguration( vpool_guid=storagedriver.vpool_guid, storagedriver_id=storagedriver.storagedriver_id) return len(storagedriver_config.save(client=client, force_reload=True))
def build_service_structure(structure, previous_structure=None): """ Builds an MDS service structure Example: structure = Helper.build_service_structure( {'vpools': [1], 'domains': [], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouter_domains': []} # (<id>, <storagerouter_id>, <domain_id>) ) """ if previous_structure is None: previous_structure = {} vdisks = previous_structure.get("vdisks", {}) vpools = previous_structure.get("vpools", {}) domains = previous_structure.get("domains", {}) services = previous_structure.get("services", {}) mds_services = previous_structure.get("mds_services", {}) storagerouters = previous_structure.get("storagerouters", {}) storagedrivers = previous_structure.get("storagedrivers", {}) storagerouter_domains = previous_structure.get("storagerouter_domains", {}) service_type = ServiceTypeList.get_by_name("MetadataServer") if service_type is None: service_type = ServiceType() service_type.name = "MetadataServer" service_type.save() srclients = {} for domain_id in structure.get("domains", []): if domain_id not in domains: domain = Domain() domain.name = "domain_{0}".format(domain_id) domain.save() domains[domain_id] = domain for vpool_id in structure.get("vpools", []): if vpool_id not in vpools: vpool = VPool() vpool.name = str(vpool_id) vpool.status = "RUNNING" vpool.save() vpools[vpool_id] = vpool else: vpool = vpools[vpool_id] srclients[vpool_id] = StorageRouterClient(vpool.guid, None) for sr_id in structure.get("storagerouters", []): if sr_id not in storagerouters: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = "10.0.0.{0}".format(sr_id) storagerouter.rdma_capable = False storagerouter.node_type = "MASTER" storagerouter.machine_id = str(sr_id) storagerouter.save() storagerouters[sr_id] = storagerouter disk = Disk() disk.storagerouter = storagerouter disk.state = "OK" disk.name = "/dev/uda" disk.size = 1 * 1024 ** 4 disk.is_ssd = True disk.aliases = ["/dev/uda"] disk.save() partition = DiskPartition() partition.offset = 0 partition.size = disk.size partition.aliases = ["/dev/uda-1"] partition.state = "OK" partition.mountpoint = "/tmp/unittest/sr_{0}/disk_1/partition_1".format(sr_id) partition.disk = disk partition.roles = [DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB] partition.save() for sd_id, vpool_id, sr_id in structure.get("storagedrivers", ()): if sd_id not in storagedrivers: storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = "/" storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = "10.0.1.{0}".format(sr_id) storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = {"management": 1, "xmlrpc": 2, "dtl": 3, "edge": 4} storagedriver.save() storagedrivers[sd_id] = storagedriver Helper._set_vpool_storage_driver_configuration(vpool=vpools[vpool_id], storagedriver=storagedriver) for mds_id, sd_id in structure.get("mds_services", ()): if mds_id not in mds_services: sd = storagedrivers[sd_id] s_id = "{0}-{1}".format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service StorageDriverController.add_storagedriverpartition( sd, { "size": None, "role": DiskPartition.ROLES.DB, "sub_role": StorageDriverPartition.SUBROLE.MDS, "partition": sd.storagerouter.disks[0].partitions[0], "mds_service": mds_service, }, ) for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get("vdisks", ()): if vdisk_id not in vdisks: vpool = vpools[vpool_id] devicename = "vdisk_{0}".format(vdisk_id) mds_backend_config = Helper._generate_mdsmetadatabackendconfig( [] if mds_id is None else [mds_services[mds_id]] ) volume_id = srclients[vpool_id].create_volume(devicename, mds_backend_config, 0, str(storage_driver_id)) vdisk = VDisk() vdisk.name = str(vdisk_id) vdisk.devicename = devicename vdisk.volume_id = volume_id vdisk.vpool = vpool vdisk.size = 0 vdisk.save() vdisk.reload_client("storagedriver") vdisks[vdisk_id] = vdisk for srd_id, sr_id, domain_id, backup in structure.get("storagerouter_domains", ()): if srd_id not in storagerouter_domains: sr_domain = StorageRouterDomain() sr_domain.backup = backup sr_domain.domain = domains[domain_id] sr_domain.storagerouter = storagerouters[sr_id] sr_domain.save() storagerouter_domains[srd_id] = sr_domain return { "vdisks": vdisks, "vpools": vpools, "domains": domains, "services": services, "service_type": service_type, "mds_services": mds_services, "storagerouters": storagerouters, "storagedrivers": storagedrivers, "storagerouter_domains": storagerouter_domains, }
def remove_storagedriver(storagedriver_guid): """ Removes a Storage Driver (and, if it was the last Storage Driver for a vPool, the vPool is removed as well) """ # Get objects & Make some checks storagedriver = StorageDriver(storagedriver_guid) storagerouter = storagedriver.storagerouter ip = storagerouter.ip pmachine = storagerouter.pmachine vmachines = VMachineList.get_customer_vmachines() pmachine_guids = [vm.pmachine_guid for vm in vmachines] vpools_guids = [vm.vpool_guid for vm in vmachines if vm.vpool_guid is not None] vpool = storagedriver.vpool if pmachine.guid in pmachine_guids and vpool.guid in vpools_guids: raise RuntimeError('There are still vMachines served from the given Storage Driver') if any(vdisk for vdisk in vpool.vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id): raise RuntimeError('There are still vDisks served from the given Storage Driver') services = ['volumedriver_{0}'.format(vpool.name), 'failovercache_{0}'.format(vpool.name)] storagedrivers_left = False # Stop services for current_storagedriver in vpool.storagedrivers: if current_storagedriver.guid != storagedriver_guid: storagedrivers_left = True client = SSHClient.load(current_storagedriver.storagerouter.ip) for service in services: System.exec_remote_python(client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.disable_service('{0}') """.format(service)) System.exec_remote_python(client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.stop_service('{0}') """.format(service)) # Unconfigure Cinder ovsdb = PersistentFactory.get_client() key = str('ovs_openstack_cinder_%s' % storagedriver.vpool_guid) if ovsdb.exists(key): cinder_password, cinder_user, tenant_name, controller_ip, _ = ovsdb.get(key) client = SSHClient.load(ip) System.exec_remote_python(client, """ from ovs.extensions.openstack.cinder import OpenStackCinder osc = OpenStackCinder(cinder_password = '******', cinder_user = '******', tenant_name = '{2}', controller_ip = '{3}') osc.unconfigure_vpool('{4}', '{5}', {6}) """.format(cinder_password, cinder_user, tenant_name, controller_ip, vpool.name, storagedriver.mountpoint, not storagedrivers_left)) if not storagedrivers_left: ovsdb.delete(key) # KVM pool client = SSHClient.load(ip) if pmachine.hvtype == 'KVM': if vpool.name in client.run('virsh pool-list'): client.run('virsh pool-destroy {0}'.format(vpool.name)) try: client.run('virsh pool-undefine {0}'.format(vpool.name)) except: pass # Ignore undefine errors, since that can happen on re-entrance # Remove services client = SSHClient.load(ip) for service in services: System.exec_remote_python(client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.remove_service(domain='openvstorage', name='{0}') """.format(service)) configuration_dir = System.read_remote_config(client, 'ovs.core.cfgdir') voldrv_arakoon_cluster_id = str(System.read_remote_config(client, 'volumedriver.arakoon.clusterid')) voldrv_arakoon_cluster = ArakoonManagementEx().getCluster(voldrv_arakoon_cluster_id) voldrv_arakoon_client_config = voldrv_arakoon_cluster.getClientConfig() arakoon_node_configs = [] for arakoon_node in voldrv_arakoon_client_config.keys(): arakoon_node_configs.append(ArakoonNodeConfig(arakoon_node, voldrv_arakoon_client_config[arakoon_node][0][0], voldrv_arakoon_client_config[arakoon_node][1])) vrouter_clusterregistry = ClusterRegistry(str(vpool.guid), voldrv_arakoon_cluster_id, arakoon_node_configs) # Reconfigure volumedriver if storagedrivers_left: node_configs = [] for current_storagedriver in vpool.storagedrivers: if current_storagedriver.guid != storagedriver_guid: node_configs.append(ClusterNodeConfig(str(current_storagedriver.storagedriver_id), str(current_storagedriver.cluster_ip), current_storagedriver.ports[0], current_storagedriver.ports[1], current_storagedriver.ports[2])) vrouter_clusterregistry.set_node_configs(node_configs) else: try: storagedriver_client = LocalStorageRouterClient('{0}/voldrv_vpools/{1}.json'.format(configuration_dir, vpool.name)) storagedriver_client.destroy_filesystem() vrouter_clusterregistry.erase_node_configs() except RuntimeError as ex: print('Could not destroy filesystem or erase node configs due to error: {}'.format(ex)) # Cleanup directories client = SSHClient.load(ip) client.run('rm -rf {}/read1_{}'.format(storagedriver.mountpoint_readcache1, vpool.name)) if storagedriver.mountpoint_readcache2: client.run('rm -rf {}/read2_{}'.format(storagedriver.mountpoint_readcache2, vpool.name)) client.run('rm -rf {}/sco_{}'.format(storagedriver.mountpoint_writecache, vpool.name)) client.run('rm -rf {}/foc_{}'.format(storagedriver.mountpoint_foc, vpool.name)) client.run('rm -rf {}/fd_{}'.format(storagedriver.mountpoint_writecache, vpool.name)) client.run('rm -rf {}/metadata_{}'.format(storagedriver.mountpoint_md, vpool.name)) client.run('rm -rf {}/tlogs_{}'.format(storagedriver.mountpoint_md, vpool.name)) client.run('rm -rf /var/rsp/{}'.format(vpool.name)) # Remove files client.run('rm -f {0}/voldrv_vpools/{1}.json'.format(configuration_dir, vpool.name)) # Remove top directories client.run('if [ -d {0} ] && [ ! "$(ls -A {0})" ]; then rmdir {0}; fi'.format(storagedriver.mountpoint_readcache1)) if storagedriver.mountpoint_readcache2: client.run('if [ -d {0} ] && [ ! "$(ls -A {0})" ]; then rmdir {0}; fi'.format(storagedriver.mountpoint_readcache2)) client.run('if [ -d {0} ] && [ ! "$(ls -A {0})" ]; then rmdir {0}; fi'.format(storagedriver.mountpoint_writecache)) client.run('if [ -d {0} ] && [ ! "$(ls -A {0})" ]; then rmdir {0}; fi'.format(storagedriver.mountpoint_foc)) client.run('if [ -d {0} ] && [ ! "$(ls -A {0})" ]; then rmdir {0}; fi'.format(storagedriver.mountpoint_md)) client.run('if [ -d {0} ] && [ ! "$(ls -A {0})" ]; then rmdir {0}; fi'.format(storagedriver.mountpoint)) # First model cleanup storagedriver.delete(abandon=True) # Detach from the log entries if storagedrivers_left: # Restart leftover services for current_storagedriver in vpool.storagedrivers: if current_storagedriver.guid != storagedriver_guid: client = SSHClient.load(current_storagedriver.storagerouter.ip) for service in services: System.exec_remote_python(client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.enable_service('{0}') """.format(service)) System.exec_remote_python(client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.start_service('{0}') """.format(service)) else: # Final model cleanup vpool.delete()
def test_clone_snapshot(self): """ Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted """ # Setup # There are 2 disks, second one cloned from a snapshot of the first vpool = VPool() vpool.name = 'vpool' vpool.status = 'RUNNING' vpool.save() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.save() disk = Disk() disk.name = 'physical_disk_1' disk.aliases = ['/dev/non-existent'] disk.size = 500 * 1024 ** 3 disk.state = 'OK' disk.is_ssd = True disk.storagerouter = storage_router disk.save() disk_partition = DiskPartition() disk_partition.disk = disk disk_partition.aliases = ['/dev/disk/non-existent'] disk_partition.size = 400 * 1024 ** 3 disk_partition.state = 'OK' disk_partition.offset = 1024 disk_partition.roles = [DiskPartition.ROLES.SCRUB] disk_partition.mountpoint = '/var/tmp' disk_partition.save() storage_driver = StorageDriver() storage_driver.vpool = vpool storage_driver.storagerouter = storage_router storage_driver.name = 'storage_driver_1' storage_driver.mountpoint = '/' storage_driver.cluster_ip = storage_router.ip storage_driver.storage_ip = '127.0.0.1' storage_driver.storagedriver_id = 'storage_driver_1' storage_driver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storage_driver.save() service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() service = Service() service.name = 'service_1' service.storagerouter = storage_driver.storagerouter service.ports = [1] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storage_driver.vpool mds_service.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client('storagedriver') [dynamic for dynamic in vdisk_1_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' if travis is True: print 'Running in Travis, reducing output.' base = datetime.datetime.now().date() day = datetime.timedelta(1) base_timestamp = self._make_timestamp(base, day) minute = 60 hour = minute * 60 for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=vdisk_1_1.guid, metadata={'label': 'snapshot_{0}:30'.format(str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None}) base_snapshot_guid = vdisk_1_1.snapshots[0]['guid'] # Oldest clone_vdisk = VDisk() clone_vdisk.name = 'clone_vdisk' clone_vdisk.volume_id = 'clone_vdisk' clone_vdisk.vpool = vpool clone_vdisk.devicename = 'dummy' clone_vdisk.parentsnapshot = base_snapshot_guid clone_vdisk.size = 0 clone_vdisk.save() clone_vdisk.reload_client('storagedriver') for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid, metadata={'label': 'snapshot_{0}:30'.format(str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None}) base_timestamp = self._make_timestamp(base, day * 2) ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) self.assertIn(base_snapshot_guid, [snap['guid'] for snap in vdisk_1_1.snapshots], 'Snapshot was deleted while there are still clones of it')
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule sys.modules['ovs.extensions.hypervisor.hypervisors.kvm'] = KVMModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.j_mdsservice import MDSService from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk from ovs.lib.vdisk import VDiskController from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.vmachine import VMachine from ovs.dal.hybrids.vpool import VPool from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.failuredomain import FailureDomain from ovs.dal.hybrids.service import Service from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.lists.vdisklist import VDiskList from ovs.lib.mdsservice import MDSServiceController # Globalize mocked classes global VDisk global VDiskController global PMachine global VMachine global BackendType global VPool global StorageDriver global StorageRouter global FailureDomain global MDSService global MDSServiceVDisk global Service global ServiceType global VDiskList global MDSServiceController _ = VDisk(), PMachine(), VMachine(), VDiskController, VPool(), BackendType(), StorageDriver(), StorageRouter(), \ FailureDomain(), MDSService(), MDSServiceVDisk(), Service(), ServiceType(), VDiskList, MDSServiceController # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def _prepare(self): # Setup failure_domain = FailureDomain() failure_domain.name = 'Test' failure_domain.save() backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() vpool = VPool() vpool.name = 'vpool' vpool.backend_type = backend_type vpool.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'KVM' pmachine.save() vmachine_1 = VMachine() vmachine_1.name = 'vmachine_1' vmachine_1.devicename = 'dummy' vmachine_1.pmachine = pmachine vmachine_1.is_vtemplate = True vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.pmachine = pmachine storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.primary_failure_domain = failure_domain storage_router.save() storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storage_router storagedriver.name = '1' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storage_router.ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '1' storagedriver.ports = [1, 2, 3] storagedriver.save() service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() s_id = '{0}-{1}'.format(storagedriver.storagerouter.name, '1') service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [1] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() def ensure_safety(vdisk): pass class Dtl_Checkup(): @staticmethod def delay(vpool_guid=None, vdisk_guid=None, storagerouters_to_exclude=None): pass MDSServiceController.ensure_safety = staticmethod(ensure_safety) VDiskController.dtl_checkup = Dtl_Checkup return vdisk_1_1, pmachine
def _build_service_structure(self, structure): """ Builds an MDS service structure """ vpools = {} storagerouters = {} storagedrivers = {} services = {} mds_services = {} service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'VMWARE' pmachine.save() for vpool_id in structure['vpools']: vpool = VPool() vpool.name = str(vpool_id) vpool.backend_type = backend_type vpool.save() vpools[vpool_id] = vpool for sr_id in structure['storagerouters']: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.pmachine = pmachine storagerouter.save() storagerouters[sr_id] = storagerouter for sd_info in structure['storagedrivers']: sd_id, vpool_id, sr_id = sd_info storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = [1, 2, 3] storagedriver.save() storagedrivers[sd_id] = storagedriver for mds_info in structure['mds_services']: mds_id, sd_id = mds_info sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service return vpools, storagerouters, storagedrivers, services, mds_services, service_type
def test_clone_snapshot(self): """ Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted """ # Setup # There are 2 disks, second one cloned from a snapshot of the first vpool = VPool() vpool.name = 'vpool' vpool.status = 'RUNNING' vpool.save() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.save() disk = Disk() disk.name = 'physical_disk_1' disk.aliases = ['/dev/non-existent'] disk.size = 500 * 1024**3 disk.state = 'OK' disk.is_ssd = True disk.storagerouter = storage_router disk.save() disk_partition = DiskPartition() disk_partition.disk = disk disk_partition.aliases = ['/dev/disk/non-existent'] disk_partition.size = 400 * 1024**3 disk_partition.state = 'OK' disk_partition.offset = 1024 disk_partition.roles = [DiskPartition.ROLES.SCRUB] disk_partition.mountpoint = '/var/tmp' disk_partition.save() storage_driver = StorageDriver() storage_driver.vpool = vpool storage_driver.storagerouter = storage_router storage_driver.name = 'storage_driver_1' storage_driver.mountpoint = '/' storage_driver.cluster_ip = storage_router.ip storage_driver.storage_ip = '127.0.0.1' storage_driver.storagedriver_id = 'storage_driver_1' storage_driver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storage_driver.save() service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() service = Service() service.name = 'service_1' service.storagerouter = storage_driver.storagerouter service.ports = [1] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storage_driver.vpool mds_service.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client('storagedriver') [ dynamic for dynamic in vdisk_1_1._dynamics if dynamic.name == 'snapshots' ][0].timeout = 0 travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' if travis is True: print 'Running in Travis, reducing output.' base = datetime.datetime.now().date() day = datetime.timedelta(1) base_timestamp = self._make_timestamp(base, day) minute = 60 hour = minute * 60 for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=vdisk_1_1.guid, metadata={ 'label': 'snapshot_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None }) base_snapshot_guid = vdisk_1_1.snapshots[0]['guid'] # Oldest clone_vdisk = VDisk() clone_vdisk.name = 'clone_vdisk' clone_vdisk.volume_id = 'clone_vdisk' clone_vdisk.vpool = vpool clone_vdisk.devicename = 'dummy' clone_vdisk.parentsnapshot = base_snapshot_guid clone_vdisk.size = 0 clone_vdisk.save() clone_vdisk.reload_client('storagedriver') for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid, metadata={ 'label': 'snapshot_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None }) base_timestamp = self._make_timestamp(base, day * 2) ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) self.assertIn( base_snapshot_guid, [snap['guid'] for snap in vdisk_1_1.snapshots], 'Snapshot was deleted while there are still clones of it')
def test_from_single_node_to_multi_node(self): """ Deploy a vDisk on a single node --> This should result in no DTL configured Add an additional node and verify DTL will be set """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | structure = Helper.build_service_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Add a Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | # | sr 2 | | | | 1 | storagerouter = StorageRouter() storagerouter.name = '2' storagerouter.ip = '10.0.0.2' storagerouter.rdma_capable = False storagerouter.save() storagerouters[2] = storagerouter self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storagerouter storagedriver.name = '2' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouter.ip storagedriver.storage_ip = '10.0.1.2' storagedriver.storagedriver_id = '2' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def build_service_structure(structure, previous_structure=None): """ Builds an MDS service structure Example: structure = Helper.build_service_structure( {'vpools': [1], 'domains': [], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouter_domains': []} # (<id>, <storagerouter_id>, <domain_id>) ) """ if previous_structure is None: previous_structure = {} vdisks = previous_structure.get('vdisks', {}) vpools = previous_structure.get('vpools', {}) domains = previous_structure.get('domains', {}) services = previous_structure.get('services', {}) mds_services = previous_structure.get('mds_services', {}) storagerouters = previous_structure.get('storagerouters', {}) storagedrivers = previous_structure.get('storagedrivers', {}) storagerouter_domains = previous_structure.get('storagerouter_domains', {}) service_type = ServiceTypeList.get_by_name('MetadataServer') if service_type is None: service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() srclients = {} for domain_id in structure.get('domains', []): if domain_id not in domains: domain = Domain() domain.name = 'domain_{0}'.format(domain_id) domain.save() domains[domain_id] = domain for vpool_id in structure.get('vpools', []): if vpool_id not in vpools: vpool = VPool() vpool.name = str(vpool_id) vpool.status = 'RUNNING' vpool.save() vpools[vpool_id] = vpool else: vpool = vpools[vpool_id] srclients[vpool_id] = StorageRouterClient(vpool.guid, None) for sr_id in structure.get('storagerouters', []): if sr_id not in storagerouters: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.rdma_capable = False storagerouter.node_type = 'MASTER' storagerouter.machine_id = str(sr_id) storagerouter.save() storagerouters[sr_id] = storagerouter disk = Disk() disk.storagerouter = storagerouter disk.state = 'OK' disk.name = '/dev/uda' disk.size = 1 * 1024**4 disk.is_ssd = True disk.aliases = ['/dev/uda'] disk.save() partition = DiskPartition() partition.offset = 0 partition.size = disk.size partition.aliases = ['/dev/uda-1'] partition.state = 'OK' partition.mountpoint = '/tmp/unittest/sr_{0}/disk_1/partition_1'.format( sr_id) partition.disk = disk partition.roles = [ DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB ] partition.save() for sd_id, vpool_id, sr_id in structure.get('storagedrivers', ()): if sd_id not in storagedrivers: storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '10.0.1.{0}'.format(sr_id) storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() storagedrivers[sd_id] = storagedriver Helper._set_vpool_storage_driver_configuration( vpool=vpools[vpool_id], storagedriver=storagedriver) for mds_id, sd_id in structure.get('mds_services', ()): if mds_id not in mds_services: sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service StorageDriverController.add_storagedriverpartition( sd, { 'size': None, 'role': DiskPartition.ROLES.DB, 'sub_role': StorageDriverPartition.SUBROLE.MDS, 'partition': sd.storagerouter.disks[0].partitions[0], 'mds_service': mds_service }) for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get( 'vdisks', ()): if vdisk_id not in vdisks: vpool = vpools[vpool_id] devicename = 'vdisk_{0}'.format(vdisk_id) mds_backend_config = Helper._generate_mdsmetadatabackendconfig( [] if mds_id is None else [mds_services[mds_id]]) volume_id = srclients[vpool_id].create_volume( devicename, mds_backend_config, 0, str(storage_driver_id)) vdisk = VDisk() vdisk.name = str(vdisk_id) vdisk.devicename = devicename vdisk.volume_id = volume_id vdisk.vpool = vpool vdisk.size = 0 vdisk.save() vdisk.reload_client('storagedriver') vdisks[vdisk_id] = vdisk for srd_id, sr_id, domain_id, backup in structure.get( 'storagerouter_domains', ()): if srd_id not in storagerouter_domains: sr_domain = StorageRouterDomain() sr_domain.backup = backup sr_domain.domain = domains[domain_id] sr_domain.storagerouter = storagerouters[sr_id] sr_domain.save() storagerouter_domains[srd_id] = sr_domain return { 'vdisks': vdisks, 'vpools': vpools, 'domains': domains, 'services': services, 'service_type': service_type, 'mds_services': mds_services, 'storagerouters': storagerouters, 'storagedrivers': storagedrivers, 'storagerouter_domains': storagerouter_domains }
def create(self): """ Prepares a new Storagedriver for a given vPool and Storagerouter :return: None :rtype: NoneType """ if self.sr_installer is None: raise RuntimeError('No StorageRouterInstaller instance found') machine_id = System.get_my_machine_id(client=self.sr_installer.root_client) port_range = Configuration.get('/ovs/framework/hosts/{0}/ports|storagedriver'.format(machine_id)) storagerouter = self.sr_installer.storagerouter with volatile_mutex('add_vpool_get_free_ports_{0}'.format(machine_id), wait=30): model_ports_in_use = [] for sd in StorageDriverList.get_storagedrivers(): if sd.storagerouter_guid == storagerouter.guid: model_ports_in_use += sd.ports.values() for proxy in sd.alba_proxies: model_ports_in_use.append(proxy.service.ports[0]) ports = System.get_free_ports(selected_range=port_range, exclude=model_ports_in_use, amount=4 + self.sr_installer.requested_proxies, client=self.sr_installer.root_client) vpool = self.vp_installer.vpool vrouter_id = '{0}{1}'.format(vpool.name, machine_id) storagedriver = StorageDriver() storagedriver.name = vrouter_id.replace('_', ' ') storagedriver.ports = {'management': ports[0], 'xmlrpc': ports[1], 'dtl': ports[2], 'edge': ports[3]} storagedriver.vpool = vpool storagedriver.cluster_ip = Configuration.get('/ovs/framework/hosts/{0}/ip'.format(machine_id)) storagedriver.storage_ip = self.storage_ip storagedriver.mountpoint = '/mnt/{0}'.format(vpool.name) storagedriver.description = storagedriver.name storagedriver.storagerouter = storagerouter storagedriver.storagedriver_id = vrouter_id storagedriver.save() # ALBA Proxies proxy_service_type = ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.ALBA_PROXY) for proxy_id in xrange(self.sr_installer.requested_proxies): service = Service() service.storagerouter = storagerouter service.ports = [ports[4 + proxy_id]] service.name = 'albaproxy_{0}_{1}'.format(vpool.name, proxy_id) service.type = proxy_service_type service.save() alba_proxy = AlbaProxy() alba_proxy.service = service alba_proxy.storagedriver = storagedriver alba_proxy.save() self.storagedriver = storagedriver
def create_from_template(diskguid, machinename, devicename, pmachineguid, machineguid=None, storagedriver_guid=None): """ Create a disk from a template @param devicename: device file name for the disk (eg: mydisk-flat.vmdk) @param machineguid: guid of the machine to assign disk to @return diskguid: guid of new disk """ pmachine = PMachine(pmachineguid) hypervisor = Factory.get(pmachine) disk_path = hypervisor.get_disk_path(machinename, devicename) description = '{} {}'.format(machinename, devicename) properties_to_clone = [ 'description', 'size', 'type', 'retentionpolicyid', 'snapshotpolicyid', 'vmachine', 'vpool' ] vdisk = VDisk(diskguid) if vdisk.vmachine and not vdisk.vmachine.is_vtemplate: # Disk might not be attached to a vmachine, but still be a template raise RuntimeError('The given vdisk does not belong to a template') if storagedriver_guid is not None: storagedriver_id = StorageDriver( storagedriver_guid).storagedriver_id else: storagedriver_id = vdisk.storagedriver_id storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) if storagedriver is None: raise RuntimeError( 'Could not find StorageDriver with id {0}'.format( storagedriver_id)) new_vdisk = VDisk() new_vdisk.copy(vdisk, include=properties_to_clone) new_vdisk.vpool = vdisk.vpool new_vdisk.devicename = hypervisor.clean_backing_disk_filename( disk_path) new_vdisk.parent_vdisk = vdisk new_vdisk.name = '{}-clone'.format(vdisk.name) new_vdisk.description = description new_vdisk.vmachine = VMachine( machineguid) if machineguid else vdisk.vmachine new_vdisk.save() mds_service = MDSServiceController.get_preferred_mds( storagedriver.storagerouter, vdisk.vpool) if mds_service is None: raise RuntimeError('Could not find a MDS service') logger.info( 'Create disk from template {} to new disk {} to location {}'. format(vdisk.name, new_vdisk.name, disk_path)) try: volume_id = vdisk.storagedriver_client.create_clone_from_template( target_path=disk_path, metadata_backend_config=MDSMetaDataBackendConfig([ MDSNodeConfig(address=str( mds_service.service.storagerouter.ip), port=mds_service.service.ports[0]) ]), parent_volume_id=str(vdisk.volume_id), node_id=str(storagedriver_id)) new_vdisk.volume_id = volume_id new_vdisk.save() MDSServiceController.ensure_safety(new_vdisk) except Exception as ex: logger.error( 'Clone disk on volumedriver level failed with exception: {0}'. format(str(ex))) new_vdisk.delete() raise return { 'diskguid': new_vdisk.guid, 'name': new_vdisk.name, 'backingdevice': disk_path }
def add_vpool(parameters): """ Add a vPool to the machine this task is running on """ parameters = {} if parameters is None else parameters ip = parameters['storagerouter_ip'] vpool_name = parameters['vpool_name'] if StorageRouterController._validate_ip(ip) is False: raise ValueError('The entered ip address is invalid') if not re.match('^[0-9a-z]+(\-+[0-9a-z]+)*$', vpool_name): raise ValueError('Invalid vpool_name given') client = SSHClient.load(ip) # Make sure to ALWAYS reload the client, as Fabric seems to be singleton-ish unique_id = System.get_my_machine_id(client) storagerouter = None for current_storagerouter in StorageRouterList.get_storagerouters(): if current_storagerouter.ip == ip and current_storagerouter.machine_id == unique_id: storagerouter = current_storagerouter break if storagerouter is None: raise RuntimeError('Could not find Storage Router with given ip address') vpool = VPoolList.get_vpool_by_name(vpool_name) storagedriver = None if vpool is not None: if vpool.backend_type.code == 'local': # Might be an issue, investigating whether it's on the same not or not if len(vpool.storagedrivers) == 1 and vpool.storagedrivers[0].storagerouter.machine_id != unique_id: raise RuntimeError('A local vPool with name {0} already exists'.format(vpool_name)) for vpool_storagedriver in vpool.storagedrivers: if vpool_storagedriver.storagerouter_guid == storagerouter.guid: storagedriver = vpool_storagedriver # The vPool is already added to this Storage Router and this might be a cleanup/recovery # Check whether there are running machines on this vPool machine_guids = [] for vdisk in vpool.vdisks: if vdisk.vmachine_guid not in machine_guids: machine_guids.append(vdisk.vmachine_guid) if vdisk.vmachine.hypervisor_status in ['RUNNING', 'PAUSED']: raise RuntimeError( 'At least one vMachine using this vPool is still running or paused. Make sure there are no active vMachines' ) nodes = {ip} # Set comprehension if vpool is not None: for vpool_storagedriver in vpool.storagedrivers: nodes.add(vpool_storagedriver.storagerouter.ip) nodes = list(nodes) services = ['volumedriver_{0}'.format(vpool_name), 'failovercache_{0}'.format(vpool_name)] # Stop services for node in nodes: node_client = SSHClient.load(node) for service in services: System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.disable_service('{0}') """.format(service)) System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.stop_service('{0}') """.format(service)) # Keep in mind that if the Storage Driver exists, the vPool does as well client = SSHClient.load(ip) mountpoint_bfs = '' directories_to_create = [] if vpool is None: vpool = VPool() supported_backends = System.read_remote_config(client, 'volumedriver.supported.backends').split(',') if 'rest' in supported_backends: supported_backends.remove('rest') # REST is not supported for now backend_type = BackendTypeList.get_backend_type_by_code(parameters['type']) vpool.backend_type = backend_type connection_host = connection_port = connection_username = connection_password = None if vpool.backend_type.code in ['local', 'distributed']: vpool.metadata = {'backend_type': 'LOCAL'} mountpoint_bfs = parameters['mountpoint_bfs'] directories_to_create.append(mountpoint_bfs) vpool.metadata['local_connection_path'] = mountpoint_bfs if vpool.backend_type.code == 'rest': connection_host = parameters['connection_host'] connection_port = parameters['connection_port'] rest_connection_timeout_secs = parameters['connection_timeout'] vpool.metadata = {'rest_connection_host': connection_host, 'rest_connection_port': connection_port, 'buchla_connection_log_level': "0", 'rest_connection_verbose_logging': rest_connection_timeout_secs, 'rest_connection_metadata_format': "JSON", 'backend_type': 'REST'} elif vpool.backend_type.code in ('ceph_s3', 'amazon_s3', 'swift_s3'): connection_host = parameters['connection_host'] connection_port = parameters['connection_port'] connection_username = parameters['connection_username'] connection_password = parameters['connection_password'] if vpool.backend_type.code in ['swift_s3']: strict_consistency = 'false' s3_connection_flavour = 'SWIFT' else: strict_consistency = 'true' s3_connection_flavour = 'S3' vpool.metadata = {'s3_connection_host': connection_host, 's3_connection_port': connection_port, 's3_connection_username': connection_username, 's3_connection_password': connection_password, 's3_connection_flavour': s3_connection_flavour, 's3_connection_strict_consistency': strict_consistency, 's3_connection_verbose_logging': 1, 'backend_type': 'S3'} vpool.name = vpool_name vpool.description = "{} {}".format(vpool.backend_type.code, vpool_name) vpool.login = connection_username vpool.password = connection_password if not connection_host: vpool.connection = None else: vpool.connection = '{}:{}'.format(connection_host, connection_port) vpool.save() # Connection information is Storage Driver related information new_storagedriver = False if storagedriver is None: storagedriver = StorageDriver() new_storagedriver = True mountpoint_temp = parameters['mountpoint_temp'] mountpoint_md = parameters['mountpoint_md'] mountpoint_readcache1 = parameters['mountpoint_readcache1'] mountpoint_readcache2 = parameters.get('mountpoint_readcache2', '') mountpoint_writecache = parameters['mountpoint_writecache'] mountpoint_foc = parameters['mountpoint_foc'] directories_to_create.append(mountpoint_temp) directories_to_create.append(mountpoint_md) directories_to_create.append(mountpoint_readcache1) if mountpoint_readcache2: directories_to_create.append(mountpoint_readcache2) directories_to_create.append(mountpoint_writecache) directories_to_create.append(mountpoint_foc) client = SSHClient.load(ip) dir_create_script = """ import os for directory in {0}: if not os.path.exists(directory): os.makedirs(directory) """.format(directories_to_create) System.exec_remote_python(client, dir_create_script) read_cache1_fs = os.statvfs(mountpoint_readcache1) read_cache2_fs = None if mountpoint_readcache2: read_cache2_fs = os.statvfs(mountpoint_readcache2) write_cache_fs = os.statvfs(mountpoint_writecache) fdcache = '{}/fd_{}'.format(mountpoint_writecache, vpool_name) scocache = '{}/sco_{}'.format(mountpoint_writecache, vpool_name) readcache1 = '{}/read1_{}'.format(mountpoint_readcache1, vpool_name) files2create = [readcache1] if mountpoint_readcache2 and mountpoint_readcache1 != mountpoint_readcache2: readcache2 = '{}/read2_{}'.format(mountpoint_readcache2, vpool_name) files2create.append(readcache2) else: readcache2 = '' failovercache = '{}/foc_{}'.format(mountpoint_foc, vpool_name) metadatapath = '{}/metadata_{}'.format(mountpoint_md, vpool_name) tlogpath = '{}/tlogs_{}'.format(mountpoint_md, vpool_name) rsppath = '/var/rsp/{}'.format(vpool_name) dirs2create = [scocache, failovercache, metadatapath, tlogpath, rsppath, System.read_remote_config(client, 'volumedriver.readcache.serialization.path')] cmd = "cat /etc/mtab | grep ^/dev/ | cut -d ' ' -f 2" mountpoints = [device.strip() for device in client.run(cmd).strip().split('\n')] mountpoints.remove('/') def is_partition(directory): for mountpoint in mountpoints: if directory == mountpoint: return True return False # Cache sizes # 20% = scocache # 20% = failovercache (@TODO: check if this can possibly consume more than 20%) # 60% = readcache # safety values: readcache1_factor = 0.2 readcache2_factor = 0.2 writecache_factor = 0.1 if (mountpoint_readcache1 == mountpoint_readcache2) or not mountpoint_readcache2: delta = set() delta.add(mountpoint_readcache1 if is_partition(mountpoint_readcache1) else '/dummy') delta.add(mountpoint_writecache if is_partition(mountpoint_writecache) else '/dummy') delta.add(mountpoint_foc if is_partition(mountpoint_foc) else '/dummy') if len(delta) == 1: readcache1_factor = 0.49 writecache_factor = 0.2 elif len(delta) == 2: if mountpoint_writecache == mountpoint_foc: readcache1_factor = 0.98 writecache_factor = 0.49 else: readcache1_factor = 0.49 if mountpoint_readcache1 == mountpoint_writecache: writecache_factor = 0.49 else: writecache_factor = 0.98 elif len(delta) == 3: readcache1_factor = 0.98 writecache_factor = 0.98 else: delta = set() delta.add(mountpoint_readcache1 if is_partition(mountpoint_readcache1) else '/dummy') delta.add(mountpoint_readcache2 if is_partition(mountpoint_readcache2) else '/dummy') delta.add(mountpoint_writecache if is_partition(mountpoint_writecache) else '/dummy') delta.add(mountpoint_foc if is_partition(mountpoint_foc) else '/dummy') if len(delta) == 1: # consider them all to be directories readcache1_factor = 0.24 readcache2_factor = 0.24 writecache_factor = 0.24 elif len(delta) == 2: if mountpoint_writecache == mountpoint_foc: writecache_factor = 0.24 if mountpoint_readcache1 == mountpoint_writecache: readcache1_factor = 0.49 readcache2_factor = 0.98 else: readcache1_factor = 0.98 readcache2_factor = 0.49 else: readcache1_factor = readcache2_factor = 0.49 writecache_factor = 0.49 elif len(delta) == 3: if mountpoint_writecache == mountpoint_foc: readcache1_factor = 0.98 readcache2_factor = 0.98 writecache_factor = 0.49 elif mountpoint_readcache1 == mountpoint_writecache: readcache1_factor = 0.49 readcache2_factor = 0.98 writecache_factor = 0.49 elif mountpoint_readcache1 == mountpoint_foc: readcache1_factor = 0.49 readcache2_factor = 0.98 writecache_factor = 0.98 elif mountpoint_readcache2 == mountpoint_writecache: readcache1_factor = 0.98 readcache2_factor = 0.49 writecache_factor = 0.49 elif mountpoint_readcache2 == mountpoint_foc: readcache1_factor = 0.98 readcache2_factor = 0.49 writecache_factor = 0.98 elif len(delta) == 4: readcache1_factor = 0.98 readcache2_factor = 0.98 writecache_factor = 0.98 # summarize caching on root partition (directory only) root_assigned = dict() if not is_partition(mountpoint_readcache1): root_assigned['readcache1_factor'] = readcache1_factor if not is_partition(mountpoint_readcache2): root_assigned['readcache2_factor'] = readcache2_factor if not is_partition(mountpoint_writecache): root_assigned['writecache_factor'] = writecache_factor if not is_partition(mountpoint_foc): root_assigned['foc_factor'] = min(readcache1_factor, readcache2_factor, writecache_factor) # always leave at least 20% of free space division_factor = 1.0 total_size = sum(root_assigned.values()) + .02 * len(root_assigned) if 0.8 < total_size < 1.6: division_factor = 2.0 elif 1.6 < total_size < 3.2: division_factor = 4.0 elif total_size >= 3.2: division_factor = 8.0 if 'readcache1_factor' in root_assigned.keys(): readcache1_factor /= division_factor if 'readcache2_factor' in root_assigned.keys(): readcache2_factor /= division_factor if 'writecache_factor' in root_assigned.keys(): writecache_factor /= division_factor scocache_size = '{0}KiB'.format((int(write_cache_fs.f_bavail * writecache_factor / 4096) * 4096) * 4) if (mountpoint_readcache1 and not mountpoint_readcache2) or (mountpoint_readcache1 == mountpoint_readcache2): mountpoint_readcache2 = '' readcache1_size = '{0}KiB'.format((int(read_cache1_fs.f_bavail * readcache1_factor / 4096) * 4096) * 4) readcache2 = '' readcache2_size = '0KiB' else: readcache1_size = '{0}KiB'.format((int(read_cache1_fs.f_bavail * readcache1_factor / 4096) * 4096) * 4) readcache2_size = '{0}KiB'.format((int(read_cache2_fs.f_bavail * readcache2_factor / 4096) * 4096) * 4) if new_storagedriver: ports_in_use = System.ports_in_use(client) ports_reserved = [] ports_in_use_model = {} for port_storagedriver in StorageDriverList.get_storagedrivers(): if port_storagedriver.vpool_guid not in ports_in_use_model: ports_in_use_model[port_storagedriver.vpool_guid] = port_storagedriver.ports ports_reserved += port_storagedriver.ports if vpool.guid in ports_in_use_model: # The vPool is extended to another StorageRouter. We need to use these ports. ports = ports_in_use_model[vpool.guid] if any(port in ports_in_use for port in ports): raise RuntimeError('The required ports are in use') else: # First StorageDriver for this vPool, so generating new ports ports = [] for port_range in System.read_remote_config(client, 'volumedriver.filesystem.ports').split(','): port_range = port_range.strip() if '-' in port_range: current_range = (int(port_range.split('-')[0]), int(port_range.split('-')[1])) else: current_range = (int(port_range), 65536) current_port = current_range[0] while len(ports) < 3: if current_port not in ports_in_use and current_port not in ports_reserved: ports.append(current_port) current_port += 1 if current_port > current_range[1]: break if len(ports) != 3: raise RuntimeError('Could not find enough free ports') else: ports = storagedriver.ports ip_path = Configuration.get('ovs.core.ip.path') if ip_path is None: ip_path = "`which ip`" cmd = "{0} a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1".format(ip_path) ipaddresses = client.run(cmd).strip().split('\n') ipaddresses = [ipaddr.strip() for ipaddr in ipaddresses] grid_ip = System.read_remote_config(client, 'ovs.grid.ip') if grid_ip in ipaddresses: ipaddresses.remove(grid_ip) if not ipaddresses: raise RuntimeError('No available ip addresses found suitable for Storage Router storage ip') if storagerouter.pmachine.hvtype == 'KVM': volumedriver_storageip = '127.0.0.1' else: volumedriver_storageip = parameters['storage_ip'] vrouter_id = '{0}{1}'.format(vpool_name, unique_id) vrouter_config = {'vrouter_id': vrouter_id, 'vrouter_redirect_timeout_ms': '5000', 'vrouter_routing_retries': 10, 'vrouter_volume_read_threshold': 1024, 'vrouter_volume_write_threshold': 1024, 'vrouter_file_read_threshold': 1024, 'vrouter_file_write_threshold': 1024, 'vrouter_min_workers': 4, 'vrouter_max_workers': 16} voldrv_arakoon_cluster_id = str(System.read_remote_config(client, 'volumedriver.arakoon.clusterid')) voldrv_arakoon_cluster = ArakoonManagementEx().getCluster(voldrv_arakoon_cluster_id) voldrv_arakoon_client_config = voldrv_arakoon_cluster.getClientConfig() arakoon_node_configs = [] for arakoon_node in voldrv_arakoon_client_config.keys(): arakoon_node_configs.append(ArakoonNodeConfig(arakoon_node, voldrv_arakoon_client_config[arakoon_node][0][0], voldrv_arakoon_client_config[arakoon_node][1])) vrouter_clusterregistry = ClusterRegistry(str(vpool.guid), voldrv_arakoon_cluster_id, arakoon_node_configs) node_configs = [] for existing_storagedriver in StorageDriverList.get_storagedrivers(): if existing_storagedriver.vpool_guid == vpool.guid: node_configs.append(ClusterNodeConfig(str(existing_storagedriver.storagedriver_id), str(existing_storagedriver.cluster_ip), existing_storagedriver.ports[0], existing_storagedriver.ports[1], existing_storagedriver.ports[2])) if new_storagedriver: node_configs.append(ClusterNodeConfig(vrouter_id, grid_ip, ports[0], ports[1], ports[2])) vrouter_clusterregistry.set_node_configs(node_configs) readcaches = [{'path': readcache1, 'size': readcache1_size}] if readcache2: readcaches.append({'path': readcache2, 'size': readcache2_size}) scocaches = [{'path': scocache, 'size': scocache_size}] filesystem_config = {'fs_backend_path': mountpoint_bfs} volumemanager_config = {'metadata_path': metadatapath, 'tlog_path': tlogpath} storagedriver_config_script = """ from ovs.plugin.provider.configuration import Configuration from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration fd_config = {{'fd_cache_path': '{11}', 'fd_extent_cache_capacity': '1024', 'fd_namespace' : 'fd-{0}-{12}'}} storagedriver_configuration = StorageDriverConfiguration('{0}') storagedriver_configuration.configure_backend({1}) storagedriver_configuration.configure_readcache({2}, Configuration.get('volumedriver.readcache.serialization.path') + '/{0}') storagedriver_configuration.configure_scocache({3}, '1GB', '2GB') storagedriver_configuration.configure_failovercache('{4}') storagedriver_configuration.configure_filesystem({5}) storagedriver_configuration.configure_volumemanager({6}) storagedriver_configuration.configure_volumerouter('{12}', {7}) storagedriver_configuration.configure_arakoon_cluster('{8}', {9}) storagedriver_configuration.configure_hypervisor('{10}') storagedriver_configuration.configure_filedriver(fd_config) """.format(vpool_name, vpool.metadata, readcaches, scocaches, failovercache, filesystem_config, volumemanager_config, vrouter_config, voldrv_arakoon_cluster_id, voldrv_arakoon_client_config, storagerouter.pmachine.hvtype, fdcache, vpool.guid) System.exec_remote_python(client, storagedriver_config_script) remote_script = """ import os from configobj import ConfigObj from ovs.plugin.provider.configuration import Configuration protocol = Configuration.get('ovs.core.broker.protocol') login = Configuration.get('ovs.core.broker.login') password = Configuration.get('ovs.core.broker.password') vpool_name = {0} uris = [] cfg = ConfigObj('/opt/OpenvStorage/config/rabbitmqclient.cfg') main_section = cfg.get('main') nodes = main_section['nodes'] if type(main_section['nodes']) == list else [main_section['nodes']] for node in nodes: uris.append({{'amqp_uri': '{{0}}://{{1}}:{{2}}@{{3}}'.format(protocol, login, password, cfg.get(node)['location'])}}) from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration queue_config = {{'events_amqp_routing_key': Configuration.get('ovs.core.broker.volumerouter.queue'), 'events_amqp_uris': uris}} for config_file in os.listdir('/opt/OpenvStorage/config/voldrv_vpools'): this_vpool_name = config_file.replace('.json', '') if config_file.endswith('.json') and (vpool_name is None or vpool_name == this_vpool_name): storagedriver_configuration = StorageDriverConfiguration(this_vpool_name) storagedriver_configuration.configure_event_publisher(queue_config) """.format(vpool_name if vpool_name is None else "'{0}'".format(vpool_name)) System.exec_remote_python(client, remote_script) # Updating the model storagedriver.storagedriver_id = vrouter_id storagedriver.name = vrouter_id.replace('_', ' ') storagedriver.description = storagedriver.name storagedriver.storage_ip = volumedriver_storageip storagedriver.cluster_ip = grid_ip storagedriver.ports = ports storagedriver.mountpoint = '/mnt/{0}'.format(vpool_name) storagedriver.mountpoint_temp = mountpoint_temp storagedriver.mountpoint_readcache1 = mountpoint_readcache1 storagedriver.mountpoint_readcache2 = mountpoint_readcache2 storagedriver.mountpoint_writecache = mountpoint_writecache storagedriver.mountpoint_foc = mountpoint_foc storagedriver.mountpoint_bfs = mountpoint_bfs storagedriver.mountpoint_md = mountpoint_md storagedriver.storagerouter = storagerouter storagedriver.vpool = vpool storagedriver.save() dirs2create.append(storagedriver.mountpoint) dirs2create.append(mountpoint_writecache + '/' + '/fd_' + vpool_name) dirs2create.append('{0}/fd_{1}'.format(mountpoint_writecache, vpool_name)) file_create_script = """ import os for directory in {0}: if not os.path.exists(directory): os.makedirs(directory) for filename in {1}: if not os.path.exists(filename): open(filename, 'a').close() """.format(dirs2create, files2create) System.exec_remote_python(client, file_create_script) voldrv_config_file = '{0}/voldrv_vpools/{1}.json'.format(System.read_remote_config(client, 'ovs.core.cfgdir'), vpool_name) log_file = '/var/log/ovs/volumedriver/{0}.log'.format(vpool_name) vd_cmd = '/usr/bin/volumedriver_fs -f --config-file={0} --mountpoint {1} --logrotation --logfile {2} -o big_writes -o sync_read -o allow_other'.format( voldrv_config_file, storagedriver.mountpoint, log_file) if storagerouter.pmachine.hvtype == 'KVM': vd_stopcmd = 'umount {0}'.format(storagedriver.mountpoint) else: vd_stopcmd = 'exportfs -u *:{0}; umount {0}'.format(storagedriver.mountpoint) vd_name = 'volumedriver_{}'.format(vpool_name) log_file = '/var/log/ovs/volumedriver/foc_{0}.log'.format(vpool_name) fc_cmd = '/usr/bin/failovercachehelper --config-file={0} --logfile={1}'.format(voldrv_config_file, log_file) fc_name = 'failovercache_{0}'.format(vpool_name) params = {'<VPOOL_MOUNTPOINT>': storagedriver.mountpoint, '<HYPERVISOR_TYPE>': storagerouter.pmachine.hvtype, '<VPOOL_NAME>': vpool_name, '<UUID>': str(uuid.uuid4())} if Osdist.is_ubuntu(client): if client.file_exists('/opt/OpenvStorage/config/templates/upstart/ovs-volumedriver.conf'): client.run('cp -f /opt/OpenvStorage/config/templates/upstart/ovs-volumedriver.conf /opt/OpenvStorage/config/templates/upstart/ovs-volumedriver_{0}.conf'.format(vpool_name)) client.run('cp -f /opt/OpenvStorage/config/templates/upstart/ovs-failovercache.conf /opt/OpenvStorage/config/templates/upstart/ovs-failovercache_{0}.conf'.format(vpool_name)) else: if client.file_exists('/opt/OpenvStorage/config/templates/systemd/ovs-volumedriver.service'): client.run('cp -f /opt/OpenvStorage/config/templates/systemd/ovs-volumedriver.service /opt/OpenvStorage/config/templates/systemd/ovs-volumedriver_{0}.service'.format(vpool_name)) client.run('cp -f /opt/OpenvStorage/config/templates/systemd/ovs-failovercache.service /opt/OpenvStorage/config/templates/systemd/ovs-failovercache_{0}.service'.format(vpool_name)) service_script = """ from ovs.plugin.provider.service import Service Service.add_service(package=('openvstorage', 'volumedriver'), name='{0}', command='{1}', stop_command='{2}', params={5}) Service.add_service(package=('openvstorage', 'failovercache'), name='{3}', command='{4}', stop_command=None, params={5}) """.format( vd_name, vd_cmd, vd_stopcmd, fc_name, fc_cmd, params ) System.exec_remote_python(client, service_script) if storagerouter.pmachine.hvtype == 'VMWARE': client.run("grep -q '/tmp localhost(ro,no_subtree_check)' /etc/exports || echo '/tmp localhost(ro,no_subtree_check)' >> /etc/exports") if Osdist.is_ubuntu(client): client.run('service nfs-kernel-server start') else: client.run('service nfs start') if storagerouter.pmachine.hvtype == 'KVM': client.run('virsh pool-define-as {0} dir - - - - {1}'.format(vpool_name, storagedriver.mountpoint)) client.run('virsh pool-build {0}'.format(vpool_name)) client.run('virsh pool-start {0}'.format(vpool_name)) client.run('virsh pool-autostart {0}'.format(vpool_name)) # Start services for node in nodes: node_client = SSHClient.load(node) for service in services: System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service Service.enable_service('{0}') """.format(service)) System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service Service.start_service('{0}') """.format(service)) # Fill vPool size vfs_info = os.statvfs('/mnt/{0}'.format(vpool_name)) vpool.size = vfs_info.f_blocks * vfs_info.f_bsize vpool.save() # Configure Cinder ovsdb = PersistentFactory.get_client() vpool_config_key = str('ovs_openstack_cinder_%s' % storagedriver.vpool_guid) if ovsdb.exists(vpool_config_key): # Second node gets values saved by first node cinder_password, cinder_user, tenant_name, controller_ip, config_cinder = ovsdb.get(vpool_config_key) else: config_cinder = parameters.get('config_cinder', False) cinder_password = '' cinder_user = '' tenant_name = '' controller_ip = '' if config_cinder: cinder_password = parameters.get('cinder_pass', cinder_password) cinder_user = parameters.get('cinder_user', cinder_user) tenant_name = parameters.get('cinder_tenant', tenant_name) controller_ip = parameters.get('cinder_controller', controller_ip) # Keystone host if cinder_password: osc = OpenStackCinder(cinder_password = cinder_password, cinder_user = cinder_user, tenant_name = tenant_name, controller_ip = controller_ip) osc.configure_vpool(vpool_name, storagedriver.mountpoint) # Save values for first node to use ovsdb.set(vpool_config_key, [cinder_password, cinder_user, tenant_name, controller_ip, config_cinder])
def test_clone(self): """ Test the clone functionality - Create a vDisk with name 'clone1' - Clone the vDisk and make some assertions - Attempt to clone again using same name and same devicename - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted - Attempt to clone on Storage Driver without MDS service - Attempt to clone from snapshot which is not yet completely synced to backend - Attempt to delete the snapshot from which a clone was made - Clone the vDisk on another Storage Router - Clone another vDisk with name 'clone1' linked to another vPool """ structure = Helper.build_service_structure( {'vpools': [1, 2], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)]} # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] service_type = structure['service_type'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters) self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters) # Basic clone scenario vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshots[0]['guid']) self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot') self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk') for expected_key in ['vdisk_guid', 'name', 'backingdevice']: self.assertTrue(expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format(expected_key)) self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match') self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match') self.assertTrue(expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match') # Attempt to clone again with same name with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1') # Attempt to clone again with a name which will have identical devicename with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2') # Attempt to clone on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', storagerouter_guid=storagerouters[2].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3') # Attempt to clone on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to clone on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Attempt to clone by providing snapshot_id not synced to backend self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning') metadata = {'label': 'label1', 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': False, 'is_automatic': True, 'is_consistent': True} snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots') with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6') # Update backend synced flag and retry vdisk1.storagedriver_client._set_snapshot_in_backend(vdisk1.volume_id, snapshot_id, True) vdisk1.invalidate_dynamics('snapshots') VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() vdisk1.invalidate_dynamics() self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks') self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks') self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots after cloning from a specified snapshot') # Attempt to delete the snapshot that has clones with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=snapshot_id) # Clone on specific Storage Router storagedriver = StorageDriver() storagedriver.vpool = vpools[1] storagedriver.storagerouter = storagerouters[2] storagedriver.name = '3' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[2].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '3' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() s_id = '{0}-1'.format(storagedriver.storagerouter.name) service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [3] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() clone3 = VDisk(VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone3', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue(expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached') # Clone vDisk with existing name on another vPool vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[2].guid)) clone_vdisk2 = VDisk(VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone1')['vdisk_guid']) self.assertTrue(expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool') self.assertTrue(expr=len([vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1']) == 2, msg='Expected to find 2 vDisks with name "clone1"') # Attempt to clone without specifying snapshot and snapshot fails to sync to backend StorageRouterClient.synced = False vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_2', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4') vdisk2.invalidate_dynamics() self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure') self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure') StorageRouterClient.synced = True
def build_dal_structure(structure, previous_structure=None): """ Builds a model structure Example: structure = DalHelper.build_service_structure( {'vpools': [1], 'domains': [], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouter_domains': []} # (<id>, <storagerouter_id>, <domain_id>) ) """ Configuration.set(key=Configuration.EDITION_KEY, value=PackageFactory.EDITION_ENTERPRISE) if previous_structure is None: previous_structure = {} vdisks = previous_structure.get('vdisks', {}) vpools = previous_structure.get('vpools', {}) domains = previous_structure.get('domains', {}) services = previous_structure.get('services', {}) mds_services = previous_structure.get('mds_services', {}) storagerouters = previous_structure.get('storagerouters', {}) storagedrivers = previous_structure.get('storagedrivers', {}) storagerouter_domains = previous_structure.get('storagerouter_domains', {}) service_types = {} for service_type_name in ServiceType.SERVICE_TYPES.values(): service_type = ServiceTypeList.get_by_name(service_type_name) if service_type is None: service_type = ServiceType() service_type.name = service_type_name service_type.save() service_types[service_type_name] = service_type srclients = {} for domain_id in structure.get('domains', []): if domain_id not in domains: domain = Domain() domain.name = 'domain_{0}'.format(domain_id) domain.save() domains[domain_id] = domain for vpool_id in structure.get('vpools', []): if vpool_id not in vpools: vpool = VPool() vpool.name = str(vpool_id) vpool.status = 'RUNNING' vpool.metadata = {'backend': {}, 'caching_info': {}} vpool.metadata_store_bits = 5 vpool.save() vpools[vpool_id] = vpool else: vpool = vpools[vpool_id] srclients[vpool_id] = StorageRouterClient(vpool.guid, None) Configuration.set( '/ovs/vpools/{0}/mds_config|mds_tlogs'.format(vpool.guid), 100) Configuration.set( '/ovs/vpools/{0}/mds_config|mds_safety'.format(vpool.guid), 2) Configuration.set( '/ovs/vpools/{0}/mds_config|mds_maxload'.format(vpool.guid), 75) Configuration.set( '/ovs/vpools/{0}/proxies/scrub/generic_scrub'.format( vpool.guid), json.dumps({}, indent=4), raw=True) for sr_id in structure.get('storagerouters', []): if sr_id not in storagerouters: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.rdma_capable = False storagerouter.node_type = 'MASTER' storagerouter.machine_id = str(sr_id) storagerouter.save() storagerouters[sr_id] = storagerouter disk = Disk() disk.storagerouter = storagerouter disk.state = 'OK' disk.name = '/dev/uda' disk.size = 1 * 1024**4 disk.is_ssd = True disk.aliases = ['/dev/uda'] disk.save() partition = DiskPartition() partition.offset = 0 partition.size = disk.size partition.aliases = ['/dev/uda-1'] partition.state = 'OK' partition.mountpoint = '/tmp/unittest/sr_{0}/disk_1/partition_1'.format( sr_id) partition.disk = disk partition.roles = [ DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB ] partition.save() else: storagerouter = storagerouters[sr_id] # noinspection PyProtectedMember System._machine_id[storagerouter.ip] = str(sr_id) mds_start = 10000 + 100 * (sr_id - 1) mds_end = 10000 + 100 * sr_id - 1 arakoon_start = 20000 + 100 * (sr_id - 1) storagedriver_start = 30000 + 100 * (sr_id - 1) storagedriver_end = 30000 + 100 * sr_id - 1 Configuration.initialize_host( host_id=sr_id, port_info={ 'mds': [mds_start, mds_end], 'arakoon': arakoon_start, 'storagedriver': [storagedriver_start, storagedriver_end] }) for sd_id, vpool_id, sr_id in structure.get('storagedrivers', ()): if sd_id not in storagedrivers: storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '10.0.1.{0}'.format(sr_id) storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() storagedrivers[sd_id] = storagedriver DalHelper.set_vpool_storage_driver_configuration( vpool=vpools[vpool_id], storagedriver=storagedriver) for mds_id, sd_id in structure.get('mds_services', ()): if mds_id not in mds_services: sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_types['MetadataServer'] service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service StorageDriverController.add_storagedriverpartition( sd, { 'size': None, 'role': DiskPartition.ROLES.DB, 'sub_role': StorageDriverPartition.SUBROLE.MDS, 'partition': sd.storagerouter.disks[0].partitions[0], 'mds_service': mds_service }) for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get( 'vdisks', ()): if vdisk_id not in vdisks: vpool = vpools[vpool_id] devicename = 'vdisk_{0}'.format(vdisk_id) mds_backend_config = DalHelper.generate_mds_metadata_backend_config( [] if mds_id is None else [mds_services[mds_id]]) volume_id = srclients[vpool_id].create_volume( devicename, mds_backend_config, 0, str(storage_driver_id)) vdisk = VDisk() vdisk.name = str(vdisk_id) vdisk.devicename = devicename vdisk.volume_id = volume_id vdisk.vpool = vpool vdisk.size = 0 vdisk.save() vdisk.reload_client('storagedriver') vdisks[vdisk_id] = vdisk for srd_id, sr_id, domain_id, backup in structure.get( 'storagerouter_domains', ()): if srd_id not in storagerouter_domains: sr_domain = StorageRouterDomain() sr_domain.backup = backup sr_domain.domain = domains[domain_id] sr_domain.storagerouter = storagerouters[sr_id] sr_domain.save() storagerouter_domains[srd_id] = sr_domain return { 'vdisks': vdisks, 'vpools': vpools, 'domains': domains, 'services': services, 'mds_services': mds_services, 'service_types': service_types, 'storagerouters': storagerouters, 'storagedrivers': storagedrivers, 'storagerouter_domains': storagerouter_domains }
def _build_service_structure(self, structure): """ Builds an MDS service structure """ vpools = {} storagerouters = {} storagedrivers = {} services = {} mds_services = {} service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() for vpool_id in structure['vpools']: vpool = VPool() vpool.name = str(vpool_id) vpool.backend_type = BackendType() vpool.save() vpools[vpool_id] = vpool for sr_id in structure['storagerouters']: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.pmachine = PMachine() storagerouter.save() storagerouters[sr_id] = storagerouter for sd_info in structure['storagedrivers']: sd_id, vpool_id, sr_id = sd_info storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint_temp = '/' storagedriver.mountpoint_foc = '/' storagedriver.mountpoint_readcaches = ['/'] storagedriver.mountpoint_writecaches = ['/'] storagedriver.mountpoint_temp = '/' storagedriver.mountpoint_md = '/' storagedriver.mountpoint_bfs = '/' storagedriver.mountpoint_fragmentcache = '/' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = [1, 2, 3] storagedriver.save() storagedrivers[sd_id] = storagedriver for mds_info in structure['mds_services']: mds_id, sd_id = mds_info sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service return vpools, storagerouters, storagedrivers, services, mds_services, service_type
def test_from_single_node_to_multi_node(self): """ Deploy a vDisk on a single node --> This should result in no DTL configured Add an additional node and verify DTL will be set """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Add a Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | # | sr 2 | | | | 1 | storagerouter = StorageRouter() storagerouter.name = '2' storagerouter.ip = '10.0.0.2' storagerouter.rdma_capable = False storagerouter.save() storagerouters[2] = storagerouter self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storagerouter storagedriver.name = '2' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouter.ip storagedriver.storage_ip = '10.0.1.2' storagedriver.storagedriver_id = '2' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def shrink_vpool(cls, storagedriver_guid, offline_storage_router_guids=list()): """ Removes a StorageDriver (if its the last StorageDriver for a vPool, the vPool is removed as well) :param storagedriver_guid: Guid of the StorageDriver to remove :type storagedriver_guid: str :param offline_storage_router_guids: Guids of StorageRouters which are offline and will be removed from cluster. WHETHER VPOOL WILL BE DELETED DEPENDS ON THIS :type offline_storage_router_guids: list :return: None :rtype: NoneType """ # TODO: Add logging # TODO: Unit test individual pieces of code # Validations storagedriver = StorageDriver(storagedriver_guid) storagerouter = storagedriver.storagerouter cls._logger.info( 'StorageDriver {0} - Deleting StorageDriver {1}'.format( storagedriver.guid, storagedriver.name)) vp_installer = VPoolInstaller(name=storagedriver.vpool.name) vp_installer.validate(storagedriver=storagedriver) sd_installer = StorageDriverInstaller(vp_installer=vp_installer, storagedriver=storagedriver) cls._logger.info( 'StorageDriver {0} - Checking availability of related StorageRouters' .format(storagedriver.guid, storagedriver.name)) sr_client_map = SSHClient.get_clients(endpoints=[ sd.storagerouter for sd in vp_installer.vpool.storagedrivers ], user_names=['root']) sr_installer = StorageRouterInstaller(root_client=sr_client_map.get( storagerouter, {}).get('root'), storagerouter=storagerouter, vp_installer=vp_installer, sd_installer=sd_installer) offline_srs = sr_client_map.pop('offline') if sorted([sr.guid for sr in offline_srs ]) != sorted(offline_storage_router_guids): raise RuntimeError('Not all StorageRouters are reachable') if storagerouter not in offline_srs: mtpt_pids = sr_installer.root_client.run( "lsof -t +D '/mnt/{0}' || true".format( vp_installer.name.replace(r"'", r"'\''")), allow_insecure=True).splitlines() if len(mtpt_pids) > 0: raise RuntimeError( 'vPool cannot be deleted. Following processes keep the vPool mount point occupied: {0}' .format(', '.join(mtpt_pids))) # Retrieve reachable StorageDrivers reachable_storagedrivers = [] for sd in vp_installer.vpool.storagedrivers: if sd.storagerouter not in sr_client_map: # StorageRouter is offline continue sd_key = '/ovs/vpools/{0}/hosts/{1}/config'.format( vp_installer.vpool.guid, sd.storagedriver_id) if Configuration.exists(sd_key) is True: path = Configuration.get_configuration_path(sd_key) with remote(sd.storagerouter.ip, [LocalStorageRouterClient]) as rem: try: lsrc = rem.LocalStorageRouterClient(path) lsrc.server_revision( ) # 'Cheap' call to verify whether volumedriver is responsive cls._logger.info( 'StorageDriver {0} - Responsive StorageDriver {1} on node with IP {2}' .format(storagedriver.guid, sd.name, sd.storagerouter.ip)) reachable_storagedrivers.append(sd) except Exception as exception: if not is_connection_failure(exception): raise if len(reachable_storagedrivers) == 0: raise RuntimeError( 'Could not find any responsive node in the cluster') # Start removal if vp_installer.storagedriver_amount > 1: vp_installer.update_status(status=VPool.STATUSES.SHRINKING) else: vp_installer.update_status(status=VPool.STATUSES.DELETING) # Clean up stale vDisks cls._logger.info('StorageDriver {0} - Removing stale vDisks'.format( storagedriver.guid)) VDiskController.remove_stale_vdisks(vpool=vp_installer.vpool) # Reconfigure the MDSes cls._logger.info('StorageDriver {0} - Reconfiguring MDSes'.format( storagedriver.guid)) for vdisk_guid in storagerouter.vdisks_guids: try: MDSServiceController.ensure_safety( vdisk_guid=vdisk_guid, excluded_storagerouter_guids=[storagerouter.guid] + offline_storage_router_guids) except Exception: cls._logger.exception( 'StorageDriver {0} - vDisk {1} - Ensuring MDS safety failed' .format(storagedriver.guid, vdisk_guid)) # Validate that all MDSes on current StorageRouter have been moved away # Ensure safety does not always throw an error, that's why we perform this check here instead of in the Exception clause of above code vdisks = [] for mds in vp_installer.mds_services: for junction in mds.vdisks: vdisk = junction.vdisk if vdisk in vdisks: continue vdisks.append(vdisk) cls._logger.critical( 'StorageDriver {0} - vDisk {1} {2} - MDS Services have not been migrated away' .format(storagedriver.guid, vdisk.guid, vdisk.name)) if len(vdisks) > 0: # Put back in RUNNING, so it can be used again. Errors keep on displaying in GUI now anyway vp_installer.update_status(status=VPool.STATUSES.RUNNING) raise RuntimeError( 'Not all MDS Services have been successfully migrated away') # Start with actual removal errors_found = False if storagerouter not in offline_srs: errors_found &= sd_installer.stop_services() errors_found &= vp_installer.configure_cluster_registry( exclude=[storagedriver], apply_on=reachable_storagedrivers) errors_found &= vp_installer.update_node_distance_map() errors_found &= vp_installer.remove_mds_services() errors_found &= sd_installer.clean_config_management() errors_found &= sd_installer.clean_model() if storagerouter not in offline_srs: errors_found &= sd_installer.clean_directories( mountpoints=StorageRouterController.get_mountpoints( client=sr_installer.root_client)) try: DiskController.sync_with_reality( storagerouter_guid=storagerouter.guid) except Exception: cls._logger.exception( 'StorageDriver {0} - Synchronizing disks with reality failed' .format(storagedriver.guid)) errors_found = True if vp_installer.storagedriver_amount > 1: # Update the vPool metadata and run DTL checkup vp_installer.vpool.metadata['caching_info'].pop( sr_installer.storagerouter.guid, None) vp_installer.vpool.save() try: VDiskController.dtl_checkup(vpool_guid=vp_installer.vpool.guid, ensure_single_timeout=600) except Exception: cls._logger.exception( 'StorageDriver {0} - DTL checkup failed for vPool {1} with guid {2}' .format(storagedriver.guid, vp_installer.name, vp_installer.vpool.guid)) else: cls._logger.info( 'StorageDriver {0} - Removing vPool from model'.format( storagedriver.guid)) # Clean up model try: vp_installer.vpool.delete() except Exception: errors_found = True cls._logger.exception( 'StorageDriver {0} - Cleaning up vPool from the model failed' .format(storagedriver.guid)) Configuration.delete('/ovs/vpools/{0}'.format( vp_installer.vpool.guid)) cls._logger.info('StorageDriver {0} - Running MDS checkup'.format( storagedriver.guid)) try: MDSServiceController.mds_checkup() except Exception: cls._logger.exception( 'StorageDriver {0} - MDS checkup failed'.format( storagedriver.guid)) # Update vPool status if errors_found is True: if vp_installer.storagedriver_amount > 1: vp_installer.update_status(status=VPool.STATUSES.FAILURE) raise RuntimeError( '1 or more errors occurred while trying to remove the StorageDriver. Please check the logs for more information' ) if vp_installer.storagedriver_amount > 1: vp_installer.update_status(status=VPool.STATUSES.RUNNING) cls._logger.info( 'StorageDriver {0} - Deleted StorageDriver {1}'.format( storagedriver.guid, storagedriver.name)) if len(VPoolList.get_vpools()) == 0: cluster_name = ArakoonInstaller.get_cluster_name('voldrv') if ArakoonInstaller.get_arakoon_metadata_by_cluster_name( cluster_name=cluster_name)['internal'] is True: cls._logger.debug( 'StorageDriver {0} - Removing Arakoon cluster {1}'.format( storagedriver.guid, cluster_name)) try: installer = ArakoonInstaller(cluster_name=cluster_name) installer.load() installer.delete_cluster() except Exception: cls._logger.exception( 'StorageDriver {0} - Delete voldrv Arakoon cluster failed' .format(storagedriver.guid)) service_type = ServiceTypeList.get_by_name( ServiceType.SERVICE_TYPES.ARAKOON) service_name = ArakoonInstaller.get_service_name_for_cluster( cluster_name=cluster_name) for service in list(service_type.services): if service.name == service_name: service.delete() # Remove watcher volumedriver service if last StorageDriver on current StorageRouter if len( storagerouter.storagedrivers ) == 0 and storagerouter not in offline_srs: # ensure client is initialized for StorageRouter try: if cls._service_manager.has_service( ServiceFactory.SERVICE_WATCHER_VOLDRV, client=sr_installer.root_client): cls._service_manager.stop_service( ServiceFactory.SERVICE_WATCHER_VOLDRV, client=sr_installer.root_client) cls._service_manager.remove_service( ServiceFactory.SERVICE_WATCHER_VOLDRV, client=sr_installer.root_client) except Exception: cls._logger.exception( 'StorageDriver {0} - {1} service deletion failed'.format( storagedriver.guid, ServiceFactory.SERVICE_WATCHER_VOLDRV))
def create_from_template(diskguid, machinename, devicename, pmachineguid, machineguid=None, storagedriver_guid=None): """ Create a disk from a template @param parentdiskguid: guid of the disk @param location: location where virtual device should be created (eg: myVM) @param devicename: device file name for the disk (eg: mydisk-flat.vmdk) @param machineguid: guid of the machine to assign disk to @return diskguid: guid of new disk """ pmachine = PMachine(pmachineguid) hypervisor = Factory.get(pmachine) disk_path = hypervisor.get_disk_path(machinename, devicename) description = '{} {}'.format(machinename, devicename) properties_to_clone = [ 'description', 'size', 'type', 'retentionpolicyid', 'snapshotpolicyid', 'vmachine', 'vpool' ] disk = VDisk(diskguid) if disk.vmachine and not disk.vmachine.is_vtemplate: # Disk might not be attached to a vmachine, but still be a template raise RuntimeError('The given disk does not belong to a template') if storagedriver_guid is not None: storagedriver_id = StorageDriver( storagedriver_guid).storagedriver_id else: storagedriver_id = disk.storagedriver_id new_disk = VDisk() new_disk.copy(disk, include=properties_to_clone) new_disk.vpool = disk.vpool new_disk.devicename = hypervisor.clean_backing_disk_filename(disk_path) new_disk.parent_vdisk = disk new_disk.name = '{}-clone'.format(disk.name) new_disk.description = description new_disk.vmachine = VMachine( machineguid) if machineguid else disk.vmachine new_disk.save() logger.info( 'Create disk from template {} to new disk {} to location {}'. format(disk.name, new_disk.name, disk_path)) try: volume_id = disk.storagedriver_client.create_clone_from_template( disk_path, str(disk.volume_id), node_id=str(storagedriver_id)) new_disk.volume_id = volume_id new_disk.save() except Exception as ex: logger.error( 'Clone disk on volumedriver level failed with exception: {0}'. format(str(ex))) new_disk.delete() raise return { 'diskguid': new_disk.guid, 'name': new_disk.name, 'backingdevice': disk_path }
def test_clone(self): """ Test the clone functionality - Create a vDisk with name 'clone1' - Clone the vDisk and make some assertions - Attempt to clone again using same name and same devicename - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted - Attempt to clone on Storage Driver without MDS service - Attempt to clone from snapshot which is not yet completely synced to backend - Attempt to delete the snapshot from which a clone was made - Clone the vDisk on another Storage Router - Clone another vDisk with name 'clone1' linked to another vPool """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] service_type = structure['service_types']['MetadataServer'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters) self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters) # Basic clone scenario vdisk1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshot_ids[0]) self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot') self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk') for expected_key in ['vdisk_guid', 'name', 'backingdevice']: self.assertTrue( expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format( expected_key)) self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match') self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match') self.assertTrue( expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match') # Attempt to clone again with same name with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1') # Attempt to clone again with a name which will have identical devicename with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2') # Attempt to clone on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', storagerouter_guid=storagerouters[2].guid) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3') # Attempt to clone on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to clone on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Attempt to clone by providing snapshot_id not synced to backend self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning') self.assertTrue( expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find only 1 snapshot ID before cloning') metadata = { 'label': 'label1', 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': False, 'is_automatic': True, 'is_consistent': True } snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 2, msg='Expected to find 2 snapshot IDs') with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6') # Update backend synced flag and retry vdisk1.storagedriver_client._set_snapshot_in_backend( vdisk1.volume_id, snapshot_id, True) vdisk1.invalidate_dynamics(['snapshots', 'snapshot_ids']) VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() vdisk1.invalidate_dynamics() self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks') self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks') self.assertTrue( expr=len(vdisk1.snapshots) == 2, msg= 'Expected to find 2 snapshots after cloning from a specified snapshot' ) self.assertTrue( expr=len(vdisk1.snapshot_ids) == 2, msg= 'Expected to find 2 snapshot IDs after cloning from a specified snapshot' ) # Attempt to delete the snapshot that has clones with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=snapshot_id) # Clone on specific Storage Router storagedriver = StorageDriver() storagedriver.vpool = vpools[1] storagedriver.storagerouter = storagerouters[2] storagedriver.name = '3' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[2].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '3' storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() s_id = '{0}-1'.format(storagedriver.storagerouter.name) service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [3] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() clone3 = VDisk( VDiskController.clone( vdisk_guid=vdisk1.guid, name='clone3', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue( expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached') # Clone vDisk with existing name on another vPool vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[2].guid)) clone_vdisk2 = VDisk( VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone1')['vdisk_guid']) self.assertTrue( expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool' ) self.assertTrue(expr=len([ vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1' ]) == 2, msg='Expected to find 2 vDisks with name "clone1"') # Attempt to clone without specifying snapshot and snapshot fails to sync to backend StorageRouterClient.synced = False vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_2', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4') vdisk2.invalidate_dynamics() self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure') self.assertTrue( expr=len(vdisk2.snapshot_ids) == 0, msg='Expected to find 0 snapshot IDs after clone failure') self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure') StorageRouterClient.synced = True
def delete_snapshots_storagedriver(storagedriver_guid, timestamp=None, group_id=None): """ Delete snapshots per storagedriver & scrubbing policy Implemented delete snapshot policy: < 1d | 1d bucket | 1 | best of bucket | 1d < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m > 1m | delete :param storagedriver_guid: Guid of the StorageDriver to remove snapshots on :type storagedriver_guid: str :param timestamp: Timestamp to determine whether snapshots should be kept or not, if none provided, current time will be used :type timestamp: float :param group_id: ID of the group task. Used to identify which snapshot deletes were called during the scheduled task :type group_id: str :return: None """ if group_id: log_id = 'Group job {} - '.format(group_id) else: log_id = '' def format_log(message): return '{}{}'.format(log_id, message) GenericController._logger.info(format_log('Delete snapshots started for StorageDriver {0}'.format(storagedriver_guid))) storagedriver = StorageDriver(storagedriver_guid) exceptions = [] day = timedelta(1) week = day * 7 def make_timestamp(offset): """ Create an integer based timestamp :param offset: Offset in days :return: Timestamp """ return int(mktime((base - offset).timetuple())) # Calculate bucket structure if timestamp is None: timestamp = time.time() base = datetime.fromtimestamp(timestamp).date() - day buckets = [] # Buckets first 7 days: [0-1[, [1-2[, [2-3[, [3-4[, [4-5[, [5-6[, [6-7[ for i in xrange(0, 7): buckets.append({'start': make_timestamp(day * i), 'end': make_timestamp(day * (i + 1)), 'type': '1d', 'snapshots': []}) # Week buckets next 3 weeks: [7-14[, [14-21[, [21-28[ for i in xrange(1, 4): buckets.append({'start': make_timestamp(week * i), 'end': make_timestamp(week * (i + 1)), 'type': '1w', 'snapshots': []}) buckets.append({'start': make_timestamp(week * 4), 'end': 0, 'type': 'rest', 'snapshots': []}) # Get a list of all snapshots that are used as parents for clones parent_snapshots = set([vd.parentsnapshot for vd in VDiskList.get_with_parent_snaphots()]) # Place all snapshots in bucket_chains bucket_chains = [] for vdisk_guid in storagedriver.vdisks_guids: try: vdisk = VDisk(vdisk_guid) vdisk.invalidate_dynamics('being_scrubbed') if vdisk.being_scrubbed: continue if vdisk.info['object_type'] in ['BASE']: bucket_chain = copy.deepcopy(buckets) for snapshot in vdisk.snapshots: if snapshot.get('is_sticky') is True: continue if snapshot['guid'] in parent_snapshots: GenericController._logger.info(format_log('Not deleting snapshot {0} because it has clones'.format(snapshot['guid']))) continue timestamp = int(snapshot['timestamp']) for bucket in bucket_chain: if bucket['start'] >= timestamp > bucket['end']: bucket['snapshots'].append({'timestamp': timestamp, 'snapshot_id': snapshot['guid'], 'vdisk_guid': vdisk.guid, 'is_consistent': snapshot['is_consistent']}) bucket_chains.append(bucket_chain) except Exception as ex: exceptions.append(ex) # Clean out the snapshot bucket_chains, we delete the snapshots we want to keep # And we'll remove all snapshots that remain in the buckets for bucket_chain in bucket_chains: first = True for bucket in bucket_chain: if first is True: best = None for snapshot in bucket['snapshots']: if best is None: best = snapshot # Consistent is better than inconsistent elif snapshot['is_consistent'] and not best['is_consistent']: best = snapshot # Newer (larger timestamp) is better than older snapshots elif snapshot['is_consistent'] == best['is_consistent'] and \ snapshot['timestamp'] > best['timestamp']: best = snapshot bucket['snapshots'] = [s for s in bucket['snapshots'] if s['timestamp'] != best['timestamp']] first = False elif bucket['end'] > 0: oldest = None for snapshot in bucket['snapshots']: if oldest is None: oldest = snapshot # Older (smaller timestamp) is the one we want to keep elif snapshot['timestamp'] < oldest['timestamp']: oldest = snapshot bucket['snapshots'] = [s for s in bucket['snapshots'] if s['timestamp'] != oldest['timestamp']] # Delete obsolete snapshots for bucket_chain in bucket_chains: # Each bucket chain represents one vdisk's snapshots try: for bucket in bucket_chain: for snapshot in bucket['snapshots']: VDiskController.delete_snapshot(vdisk_guid=snapshot['vdisk_guid'], snapshot_id=snapshot['snapshot_id']) except RuntimeError as ex: vdisk_guid = next((snapshot['vdisk_guid'] for bucket in bucket_chain for snapshot in bucket['snapshots']), '') vdisk_id_log = '' if vdisk_guid: vdisk_id_log = ' for VDisk with guid {}'.format(vdisk_guid) if SCRUB_VDISK_EXCEPTION_MESSAGE in ex.message: GenericController._logger.warning(format_log('Being scrubbed exception occurred while deleting snapshots{}'.format(vdisk_id_log))) else: GenericController._logger.exception(format_log('Exception occurred while deleting snapshots{}'.format(vdisk_id_log))) exceptions.append(ex) if exceptions: raise RuntimeError('Exceptions occurred while deleting snapshots: \n- {}'.format('\n- '.join((str(ex) for ex in exceptions)))) GenericController._logger.info(format_log('Delete snapshots finished for StorageDriver {0}'))