def create(self): """ Prepares a new Storagedriver for a given vPool and Storagerouter :return: None :rtype: NoneType """ if self.sr_installer is None: raise RuntimeError('No StorageRouterInstaller instance found') machine_id = System.get_my_machine_id(client=self.sr_installer.root_client) port_range = Configuration.get('/ovs/framework/hosts/{0}/ports|storagedriver'.format(machine_id)) storagerouter = self.sr_installer.storagerouter with volatile_mutex('add_vpool_get_free_ports_{0}'.format(machine_id), wait=30): model_ports_in_use = [] for sd in StorageDriverList.get_storagedrivers(): if sd.storagerouter_guid == storagerouter.guid: model_ports_in_use += sd.ports.values() for proxy in sd.alba_proxies: model_ports_in_use.append(proxy.service.ports[0]) ports = System.get_free_ports(selected_range=port_range, exclude=model_ports_in_use, amount=4 + self.sr_installer.requested_proxies, client=self.sr_installer.root_client) vpool = self.vp_installer.vpool vrouter_id = '{0}{1}'.format(vpool.name, machine_id) storagedriver = StorageDriver() storagedriver.name = vrouter_id.replace('_', ' ') storagedriver.ports = {'management': ports[0], 'xmlrpc': ports[1], 'dtl': ports[2], 'edge': ports[3]} storagedriver.vpool = vpool storagedriver.cluster_ip = Configuration.get('/ovs/framework/hosts/{0}/ip'.format(machine_id)) storagedriver.storage_ip = self.storage_ip storagedriver.mountpoint = '/mnt/{0}'.format(vpool.name) storagedriver.description = storagedriver.name storagedriver.storagerouter = storagerouter storagedriver.storagedriver_id = vrouter_id storagedriver.save() # ALBA Proxies proxy_service_type = ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.ALBA_PROXY) for proxy_id in xrange(self.sr_installer.requested_proxies): service = Service() service.storagerouter = storagerouter service.ports = [ports[4 + proxy_id]] service.name = 'albaproxy_{0}_{1}'.format(vpool.name, proxy_id) service.type = proxy_service_type service.save() alba_proxy = AlbaProxy() alba_proxy.service = service alba_proxy.storagedriver = storagedriver alba_proxy.save() self.storagedriver = storagedriver
def test_from_single_node_to_multi_node(self): """ Deploy a vDisk on a single node --> This should result in no DTL configured Add an additional node and verify DTL will be set """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Add a Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | # | sr 2 | | | | 1 | storagerouter = StorageRouter() storagerouter.name = '2' storagerouter.ip = '10.0.0.2' storagerouter.rdma_capable = False storagerouter.save() storagerouters[2] = storagerouter self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storagerouter storagedriver.name = '2' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouter.ip storagedriver.storage_ip = '10.0.1.2' storagedriver.storagedriver_id = '2' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_from_single_node_to_multi_node(self): """ Deploy a vDisk on a single node --> This should result in no DTL configured Add an additional node and verify DTL will be set """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | structure = Helper.build_service_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Add a Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | # | sr 2 | | | | 1 | storagerouter = StorageRouter() storagerouter.name = '2' storagerouter.ip = '10.0.0.2' storagerouter.rdma_capable = False storagerouter.save() storagerouters[2] = storagerouter self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storagerouter storagedriver.name = '2' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouter.ip storagedriver.storage_ip = '10.0.1.2' storagedriver.storagedriver_id = '2' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_clone(self): """ Test the clone functionality - Create a vDisk with name 'clone1' - Clone the vDisk and make some assertions - Attempt to clone again using same name and same devicename - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted - Attempt to clone on Storage Driver without MDS service - Attempt to clone from snapshot which is not yet completely synced to backend - Attempt to delete the snapshot from which a clone was made - Clone the vDisk on another Storage Router - Clone another vDisk with name 'clone1' linked to another vPool """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] service_type = structure['service_types']['MetadataServer'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters) self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters) # Basic clone scenario vdisk1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshot_ids[0]) self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot') self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk') for expected_key in ['vdisk_guid', 'name', 'backingdevice']: self.assertTrue( expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format( expected_key)) self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match') self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match') self.assertTrue( expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match') # Attempt to clone again with same name with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1') # Attempt to clone again with a name which will have identical devicename with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2') # Attempt to clone on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', storagerouter_guid=storagerouters[2].guid) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3') # Attempt to clone on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to clone on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Attempt to clone by providing snapshot_id not synced to backend self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning') self.assertTrue( expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find only 1 snapshot ID before cloning') metadata = { 'label': 'label1', 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': False, 'is_automatic': True, 'is_consistent': True } snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 2, msg='Expected to find 2 snapshot IDs') with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6') # Update backend synced flag and retry vdisk1.storagedriver_client._set_snapshot_in_backend( vdisk1.volume_id, snapshot_id, True) vdisk1.invalidate_dynamics(['snapshots', 'snapshot_ids']) VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() vdisk1.invalidate_dynamics() self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks') self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks') self.assertTrue( expr=len(vdisk1.snapshots) == 2, msg= 'Expected to find 2 snapshots after cloning from a specified snapshot' ) self.assertTrue( expr=len(vdisk1.snapshot_ids) == 2, msg= 'Expected to find 2 snapshot IDs after cloning from a specified snapshot' ) # Attempt to delete the snapshot that has clones with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=snapshot_id) # Clone on specific Storage Router storagedriver = StorageDriver() storagedriver.vpool = vpools[1] storagedriver.storagerouter = storagerouters[2] storagedriver.name = '3' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[2].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '3' storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() s_id = '{0}-1'.format(storagedriver.storagerouter.name) service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [3] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() clone3 = VDisk( VDiskController.clone( vdisk_guid=vdisk1.guid, name='clone3', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue( expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached') # Clone vDisk with existing name on another vPool vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[2].guid)) clone_vdisk2 = VDisk( VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone1')['vdisk_guid']) self.assertTrue( expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool' ) self.assertTrue(expr=len([ vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1' ]) == 2, msg='Expected to find 2 vDisks with name "clone1"') # Attempt to clone without specifying snapshot and snapshot fails to sync to backend StorageRouterClient.synced = False vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_2', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4') vdisk2.invalidate_dynamics() self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure') self.assertTrue( expr=len(vdisk2.snapshot_ids) == 0, msg='Expected to find 0 snapshot IDs after clone failure') self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure') StorageRouterClient.synced = True
def _build_service_structure(self, structure): """ Builds an MDS service structure """ vpools = {} storagerouters = {} storagedrivers = {} services = {} mds_services = {} service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'VMWARE' pmachine.save() for vpool_id in structure['vpools']: vpool = VPool() vpool.name = str(vpool_id) vpool.backend_type = backend_type vpool.save() vpools[vpool_id] = vpool for sr_id in structure['storagerouters']: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.pmachine = pmachine storagerouter.save() storagerouters[sr_id] = storagerouter for sd_info in structure['storagedrivers']: sd_id, vpool_id, sr_id = sd_info storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = [1, 2, 3] storagedriver.save() storagedrivers[sd_id] = storagedriver for mds_info in structure['mds_services']: mds_id, sd_id = mds_info sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service return vpools, storagerouters, storagedrivers, services, mds_services, service_type
def build_dal_structure(structure, previous_structure=None): """ Builds a model structure Example: structure = DalHelper.build_service_structure( {'vpools': [1], 'domains': [], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouter_domains': []} # (<id>, <storagerouter_id>, <domain_id>) ) """ Configuration.set(key=Configuration.EDITION_KEY, value=PackageFactory.EDITION_ENTERPRISE) if previous_structure is None: previous_structure = {} vdisks = previous_structure.get('vdisks', {}) vpools = previous_structure.get('vpools', {}) domains = previous_structure.get('domains', {}) services = previous_structure.get('services', {}) mds_services = previous_structure.get('mds_services', {}) storagerouters = previous_structure.get('storagerouters', {}) storagedrivers = previous_structure.get('storagedrivers', {}) storagerouter_domains = previous_structure.get('storagerouter_domains', {}) service_types = {} for service_type_name in ServiceType.SERVICE_TYPES.values(): service_type = ServiceTypeList.get_by_name(service_type_name) if service_type is None: service_type = ServiceType() service_type.name = service_type_name service_type.save() service_types[service_type_name] = service_type srclients = {} for domain_id in structure.get('domains', []): if domain_id not in domains: domain = Domain() domain.name = 'domain_{0}'.format(domain_id) domain.save() domains[domain_id] = domain for vpool_id in structure.get('vpools', []): if vpool_id not in vpools: vpool = VPool() vpool.name = str(vpool_id) vpool.status = 'RUNNING' vpool.metadata = {'backend': {}, 'caching_info': {}} vpool.metadata_store_bits = 5 vpool.save() vpools[vpool_id] = vpool else: vpool = vpools[vpool_id] srclients[vpool_id] = StorageRouterClient(vpool.guid, None) Configuration.set( '/ovs/vpools/{0}/mds_config|mds_tlogs'.format(vpool.guid), 100) Configuration.set( '/ovs/vpools/{0}/mds_config|mds_safety'.format(vpool.guid), 2) Configuration.set( '/ovs/vpools/{0}/mds_config|mds_maxload'.format(vpool.guid), 75) Configuration.set( '/ovs/vpools/{0}/proxies/scrub/generic_scrub'.format( vpool.guid), json.dumps({}, indent=4), raw=True) for sr_id in structure.get('storagerouters', []): if sr_id not in storagerouters: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.rdma_capable = False storagerouter.node_type = 'MASTER' storagerouter.machine_id = str(sr_id) storagerouter.save() storagerouters[sr_id] = storagerouter disk = Disk() disk.storagerouter = storagerouter disk.state = 'OK' disk.name = '/dev/uda' disk.size = 1 * 1024**4 disk.is_ssd = True disk.aliases = ['/dev/uda'] disk.save() partition = DiskPartition() partition.offset = 0 partition.size = disk.size partition.aliases = ['/dev/uda-1'] partition.state = 'OK' partition.mountpoint = '/tmp/unittest/sr_{0}/disk_1/partition_1'.format( sr_id) partition.disk = disk partition.roles = [ DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB ] partition.save() else: storagerouter = storagerouters[sr_id] # noinspection PyProtectedMember System._machine_id[storagerouter.ip] = str(sr_id) mds_start = 10000 + 100 * (sr_id - 1) mds_end = 10000 + 100 * sr_id - 1 arakoon_start = 20000 + 100 * (sr_id - 1) storagedriver_start = 30000 + 100 * (sr_id - 1) storagedriver_end = 30000 + 100 * sr_id - 1 Configuration.initialize_host( host_id=sr_id, port_info={ 'mds': [mds_start, mds_end], 'arakoon': arakoon_start, 'storagedriver': [storagedriver_start, storagedriver_end] }) for sd_id, vpool_id, sr_id in structure.get('storagedrivers', ()): if sd_id not in storagedrivers: storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '10.0.1.{0}'.format(sr_id) storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() storagedrivers[sd_id] = storagedriver DalHelper.set_vpool_storage_driver_configuration( vpool=vpools[vpool_id], storagedriver=storagedriver) for mds_id, sd_id in structure.get('mds_services', ()): if mds_id not in mds_services: sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_types['MetadataServer'] service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service StorageDriverController.add_storagedriverpartition( sd, { 'size': None, 'role': DiskPartition.ROLES.DB, 'sub_role': StorageDriverPartition.SUBROLE.MDS, 'partition': sd.storagerouter.disks[0].partitions[0], 'mds_service': mds_service }) for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get( 'vdisks', ()): if vdisk_id not in vdisks: vpool = vpools[vpool_id] devicename = 'vdisk_{0}'.format(vdisk_id) mds_backend_config = DalHelper.generate_mds_metadata_backend_config( [] if mds_id is None else [mds_services[mds_id]]) volume_id = srclients[vpool_id].create_volume( devicename, mds_backend_config, 0, str(storage_driver_id)) vdisk = VDisk() vdisk.name = str(vdisk_id) vdisk.devicename = devicename vdisk.volume_id = volume_id vdisk.vpool = vpool vdisk.size = 0 vdisk.save() vdisk.reload_client('storagedriver') vdisks[vdisk_id] = vdisk for srd_id, sr_id, domain_id, backup in structure.get( 'storagerouter_domains', ()): if srd_id not in storagerouter_domains: sr_domain = StorageRouterDomain() sr_domain.backup = backup sr_domain.domain = domains[domain_id] sr_domain.storagerouter = storagerouters[sr_id] sr_domain.save() storagerouter_domains[srd_id] = sr_domain return { 'vdisks': vdisks, 'vpools': vpools, 'domains': domains, 'services': services, 'mds_services': mds_services, 'service_types': service_types, 'storagerouters': storagerouters, 'storagedrivers': storagedrivers, 'storagerouter_domains': storagerouter_domains }
def add_vpool(parameters): """ Add a vPool to the machine this task is running on """ parameters = {} if parameters is None else parameters ip = parameters['storagerouter_ip'] vpool_name = parameters['vpool_name'] if StorageRouterController._validate_ip(ip) is False: raise ValueError('The entered ip address is invalid') if not re.match('^[0-9a-z]+(\-+[0-9a-z]+)*$', vpool_name): raise ValueError('Invalid vpool_name given') client = SSHClient.load(ip) # Make sure to ALWAYS reload the client, as Fabric seems to be singleton-ish unique_id = System.get_my_machine_id(client) storagerouter = None for current_storagerouter in StorageRouterList.get_storagerouters(): if current_storagerouter.ip == ip and current_storagerouter.machine_id == unique_id: storagerouter = current_storagerouter break if storagerouter is None: raise RuntimeError('Could not find Storage Router with given ip address') vpool = VPoolList.get_vpool_by_name(vpool_name) storagedriver = None if vpool is not None: if vpool.backend_type.code == 'local': # Might be an issue, investigating whether it's on the same not or not if len(vpool.storagedrivers) == 1 and vpool.storagedrivers[0].storagerouter.machine_id != unique_id: raise RuntimeError('A local vPool with name {0} already exists'.format(vpool_name)) for vpool_storagedriver in vpool.storagedrivers: if vpool_storagedriver.storagerouter_guid == storagerouter.guid: storagedriver = vpool_storagedriver # The vPool is already added to this Storage Router and this might be a cleanup/recovery # Check whether there are running machines on this vPool machine_guids = [] for vdisk in vpool.vdisks: if vdisk.vmachine_guid not in machine_guids: machine_guids.append(vdisk.vmachine_guid) if vdisk.vmachine.hypervisor_status in ['RUNNING', 'PAUSED']: raise RuntimeError( 'At least one vMachine using this vPool is still running or paused. Make sure there are no active vMachines' ) nodes = {ip} # Set comprehension if vpool is not None: for vpool_storagedriver in vpool.storagedrivers: nodes.add(vpool_storagedriver.storagerouter.ip) nodes = list(nodes) services = ['volumedriver_{0}'.format(vpool_name), 'failovercache_{0}'.format(vpool_name)] # Stop services for node in nodes: node_client = SSHClient.load(node) for service in services: System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.disable_service('{0}') """.format(service)) System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service if Service.has_service('{0}'): Service.stop_service('{0}') """.format(service)) # Keep in mind that if the Storage Driver exists, the vPool does as well client = SSHClient.load(ip) mountpoint_bfs = '' directories_to_create = [] if vpool is None: vpool = VPool() supported_backends = System.read_remote_config(client, 'volumedriver.supported.backends').split(',') if 'rest' in supported_backends: supported_backends.remove('rest') # REST is not supported for now backend_type = BackendTypeList.get_backend_type_by_code(parameters['type']) vpool.backend_type = backend_type connection_host = connection_port = connection_username = connection_password = None if vpool.backend_type.code in ['local', 'distributed']: vpool.metadata = {'backend_type': 'LOCAL'} mountpoint_bfs = parameters['mountpoint_bfs'] directories_to_create.append(mountpoint_bfs) vpool.metadata['local_connection_path'] = mountpoint_bfs if vpool.backend_type.code == 'rest': connection_host = parameters['connection_host'] connection_port = parameters['connection_port'] rest_connection_timeout_secs = parameters['connection_timeout'] vpool.metadata = {'rest_connection_host': connection_host, 'rest_connection_port': connection_port, 'buchla_connection_log_level': "0", 'rest_connection_verbose_logging': rest_connection_timeout_secs, 'rest_connection_metadata_format': "JSON", 'backend_type': 'REST'} elif vpool.backend_type.code in ('ceph_s3', 'amazon_s3', 'swift_s3'): connection_host = parameters['connection_host'] connection_port = parameters['connection_port'] connection_username = parameters['connection_username'] connection_password = parameters['connection_password'] if vpool.backend_type.code in ['swift_s3']: strict_consistency = 'false' s3_connection_flavour = 'SWIFT' else: strict_consistency = 'true' s3_connection_flavour = 'S3' vpool.metadata = {'s3_connection_host': connection_host, 's3_connection_port': connection_port, 's3_connection_username': connection_username, 's3_connection_password': connection_password, 's3_connection_flavour': s3_connection_flavour, 's3_connection_strict_consistency': strict_consistency, 's3_connection_verbose_logging': 1, 'backend_type': 'S3'} vpool.name = vpool_name vpool.description = "{} {}".format(vpool.backend_type.code, vpool_name) vpool.login = connection_username vpool.password = connection_password if not connection_host: vpool.connection = None else: vpool.connection = '{}:{}'.format(connection_host, connection_port) vpool.save() # Connection information is Storage Driver related information new_storagedriver = False if storagedriver is None: storagedriver = StorageDriver() new_storagedriver = True mountpoint_temp = parameters['mountpoint_temp'] mountpoint_md = parameters['mountpoint_md'] mountpoint_readcache1 = parameters['mountpoint_readcache1'] mountpoint_readcache2 = parameters.get('mountpoint_readcache2', '') mountpoint_writecache = parameters['mountpoint_writecache'] mountpoint_foc = parameters['mountpoint_foc'] directories_to_create.append(mountpoint_temp) directories_to_create.append(mountpoint_md) directories_to_create.append(mountpoint_readcache1) if mountpoint_readcache2: directories_to_create.append(mountpoint_readcache2) directories_to_create.append(mountpoint_writecache) directories_to_create.append(mountpoint_foc) client = SSHClient.load(ip) dir_create_script = """ import os for directory in {0}: if not os.path.exists(directory): os.makedirs(directory) """.format(directories_to_create) System.exec_remote_python(client, dir_create_script) read_cache1_fs = os.statvfs(mountpoint_readcache1) read_cache2_fs = None if mountpoint_readcache2: read_cache2_fs = os.statvfs(mountpoint_readcache2) write_cache_fs = os.statvfs(mountpoint_writecache) fdcache = '{}/fd_{}'.format(mountpoint_writecache, vpool_name) scocache = '{}/sco_{}'.format(mountpoint_writecache, vpool_name) readcache1 = '{}/read1_{}'.format(mountpoint_readcache1, vpool_name) files2create = [readcache1] if mountpoint_readcache2 and mountpoint_readcache1 != mountpoint_readcache2: readcache2 = '{}/read2_{}'.format(mountpoint_readcache2, vpool_name) files2create.append(readcache2) else: readcache2 = '' failovercache = '{}/foc_{}'.format(mountpoint_foc, vpool_name) metadatapath = '{}/metadata_{}'.format(mountpoint_md, vpool_name) tlogpath = '{}/tlogs_{}'.format(mountpoint_md, vpool_name) rsppath = '/var/rsp/{}'.format(vpool_name) dirs2create = [scocache, failovercache, metadatapath, tlogpath, rsppath, System.read_remote_config(client, 'volumedriver.readcache.serialization.path')] cmd = "cat /etc/mtab | grep ^/dev/ | cut -d ' ' -f 2" mountpoints = [device.strip() for device in client.run(cmd).strip().split('\n')] mountpoints.remove('/') def is_partition(directory): for mountpoint in mountpoints: if directory == mountpoint: return True return False # Cache sizes # 20% = scocache # 20% = failovercache (@TODO: check if this can possibly consume more than 20%) # 60% = readcache # safety values: readcache1_factor = 0.2 readcache2_factor = 0.2 writecache_factor = 0.1 if (mountpoint_readcache1 == mountpoint_readcache2) or not mountpoint_readcache2: delta = set() delta.add(mountpoint_readcache1 if is_partition(mountpoint_readcache1) else '/dummy') delta.add(mountpoint_writecache if is_partition(mountpoint_writecache) else '/dummy') delta.add(mountpoint_foc if is_partition(mountpoint_foc) else '/dummy') if len(delta) == 1: readcache1_factor = 0.49 writecache_factor = 0.2 elif len(delta) == 2: if mountpoint_writecache == mountpoint_foc: readcache1_factor = 0.98 writecache_factor = 0.49 else: readcache1_factor = 0.49 if mountpoint_readcache1 == mountpoint_writecache: writecache_factor = 0.49 else: writecache_factor = 0.98 elif len(delta) == 3: readcache1_factor = 0.98 writecache_factor = 0.98 else: delta = set() delta.add(mountpoint_readcache1 if is_partition(mountpoint_readcache1) else '/dummy') delta.add(mountpoint_readcache2 if is_partition(mountpoint_readcache2) else '/dummy') delta.add(mountpoint_writecache if is_partition(mountpoint_writecache) else '/dummy') delta.add(mountpoint_foc if is_partition(mountpoint_foc) else '/dummy') if len(delta) == 1: # consider them all to be directories readcache1_factor = 0.24 readcache2_factor = 0.24 writecache_factor = 0.24 elif len(delta) == 2: if mountpoint_writecache == mountpoint_foc: writecache_factor = 0.24 if mountpoint_readcache1 == mountpoint_writecache: readcache1_factor = 0.49 readcache2_factor = 0.98 else: readcache1_factor = 0.98 readcache2_factor = 0.49 else: readcache1_factor = readcache2_factor = 0.49 writecache_factor = 0.49 elif len(delta) == 3: if mountpoint_writecache == mountpoint_foc: readcache1_factor = 0.98 readcache2_factor = 0.98 writecache_factor = 0.49 elif mountpoint_readcache1 == mountpoint_writecache: readcache1_factor = 0.49 readcache2_factor = 0.98 writecache_factor = 0.49 elif mountpoint_readcache1 == mountpoint_foc: readcache1_factor = 0.49 readcache2_factor = 0.98 writecache_factor = 0.98 elif mountpoint_readcache2 == mountpoint_writecache: readcache1_factor = 0.98 readcache2_factor = 0.49 writecache_factor = 0.49 elif mountpoint_readcache2 == mountpoint_foc: readcache1_factor = 0.98 readcache2_factor = 0.49 writecache_factor = 0.98 elif len(delta) == 4: readcache1_factor = 0.98 readcache2_factor = 0.98 writecache_factor = 0.98 # summarize caching on root partition (directory only) root_assigned = dict() if not is_partition(mountpoint_readcache1): root_assigned['readcache1_factor'] = readcache1_factor if not is_partition(mountpoint_readcache2): root_assigned['readcache2_factor'] = readcache2_factor if not is_partition(mountpoint_writecache): root_assigned['writecache_factor'] = writecache_factor if not is_partition(mountpoint_foc): root_assigned['foc_factor'] = min(readcache1_factor, readcache2_factor, writecache_factor) # always leave at least 20% of free space division_factor = 1.0 total_size = sum(root_assigned.values()) + .02 * len(root_assigned) if 0.8 < total_size < 1.6: division_factor = 2.0 elif 1.6 < total_size < 3.2: division_factor = 4.0 elif total_size >= 3.2: division_factor = 8.0 if 'readcache1_factor' in root_assigned.keys(): readcache1_factor /= division_factor if 'readcache2_factor' in root_assigned.keys(): readcache2_factor /= division_factor if 'writecache_factor' in root_assigned.keys(): writecache_factor /= division_factor scocache_size = '{0}KiB'.format((int(write_cache_fs.f_bavail * writecache_factor / 4096) * 4096) * 4) if (mountpoint_readcache1 and not mountpoint_readcache2) or (mountpoint_readcache1 == mountpoint_readcache2): mountpoint_readcache2 = '' readcache1_size = '{0}KiB'.format((int(read_cache1_fs.f_bavail * readcache1_factor / 4096) * 4096) * 4) readcache2 = '' readcache2_size = '0KiB' else: readcache1_size = '{0}KiB'.format((int(read_cache1_fs.f_bavail * readcache1_factor / 4096) * 4096) * 4) readcache2_size = '{0}KiB'.format((int(read_cache2_fs.f_bavail * readcache2_factor / 4096) * 4096) * 4) if new_storagedriver: ports_in_use = System.ports_in_use(client) ports_reserved = [] ports_in_use_model = {} for port_storagedriver in StorageDriverList.get_storagedrivers(): if port_storagedriver.vpool_guid not in ports_in_use_model: ports_in_use_model[port_storagedriver.vpool_guid] = port_storagedriver.ports ports_reserved += port_storagedriver.ports if vpool.guid in ports_in_use_model: # The vPool is extended to another StorageRouter. We need to use these ports. ports = ports_in_use_model[vpool.guid] if any(port in ports_in_use for port in ports): raise RuntimeError('The required ports are in use') else: # First StorageDriver for this vPool, so generating new ports ports = [] for port_range in System.read_remote_config(client, 'volumedriver.filesystem.ports').split(','): port_range = port_range.strip() if '-' in port_range: current_range = (int(port_range.split('-')[0]), int(port_range.split('-')[1])) else: current_range = (int(port_range), 65536) current_port = current_range[0] while len(ports) < 3: if current_port not in ports_in_use and current_port not in ports_reserved: ports.append(current_port) current_port += 1 if current_port > current_range[1]: break if len(ports) != 3: raise RuntimeError('Could not find enough free ports') else: ports = storagedriver.ports ip_path = Configuration.get('ovs.core.ip.path') if ip_path is None: ip_path = "`which ip`" cmd = "{0} a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1".format(ip_path) ipaddresses = client.run(cmd).strip().split('\n') ipaddresses = [ipaddr.strip() for ipaddr in ipaddresses] grid_ip = System.read_remote_config(client, 'ovs.grid.ip') if grid_ip in ipaddresses: ipaddresses.remove(grid_ip) if not ipaddresses: raise RuntimeError('No available ip addresses found suitable for Storage Router storage ip') if storagerouter.pmachine.hvtype == 'KVM': volumedriver_storageip = '127.0.0.1' else: volumedriver_storageip = parameters['storage_ip'] vrouter_id = '{0}{1}'.format(vpool_name, unique_id) vrouter_config = {'vrouter_id': vrouter_id, 'vrouter_redirect_timeout_ms': '5000', 'vrouter_routing_retries': 10, 'vrouter_volume_read_threshold': 1024, 'vrouter_volume_write_threshold': 1024, 'vrouter_file_read_threshold': 1024, 'vrouter_file_write_threshold': 1024, 'vrouter_min_workers': 4, 'vrouter_max_workers': 16} voldrv_arakoon_cluster_id = str(System.read_remote_config(client, 'volumedriver.arakoon.clusterid')) voldrv_arakoon_cluster = ArakoonManagementEx().getCluster(voldrv_arakoon_cluster_id) voldrv_arakoon_client_config = voldrv_arakoon_cluster.getClientConfig() arakoon_node_configs = [] for arakoon_node in voldrv_arakoon_client_config.keys(): arakoon_node_configs.append(ArakoonNodeConfig(arakoon_node, voldrv_arakoon_client_config[arakoon_node][0][0], voldrv_arakoon_client_config[arakoon_node][1])) vrouter_clusterregistry = ClusterRegistry(str(vpool.guid), voldrv_arakoon_cluster_id, arakoon_node_configs) node_configs = [] for existing_storagedriver in StorageDriverList.get_storagedrivers(): if existing_storagedriver.vpool_guid == vpool.guid: node_configs.append(ClusterNodeConfig(str(existing_storagedriver.storagedriver_id), str(existing_storagedriver.cluster_ip), existing_storagedriver.ports[0], existing_storagedriver.ports[1], existing_storagedriver.ports[2])) if new_storagedriver: node_configs.append(ClusterNodeConfig(vrouter_id, grid_ip, ports[0], ports[1], ports[2])) vrouter_clusterregistry.set_node_configs(node_configs) readcaches = [{'path': readcache1, 'size': readcache1_size}] if readcache2: readcaches.append({'path': readcache2, 'size': readcache2_size}) scocaches = [{'path': scocache, 'size': scocache_size}] filesystem_config = {'fs_backend_path': mountpoint_bfs} volumemanager_config = {'metadata_path': metadatapath, 'tlog_path': tlogpath} storagedriver_config_script = """ from ovs.plugin.provider.configuration import Configuration from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration fd_config = {{'fd_cache_path': '{11}', 'fd_extent_cache_capacity': '1024', 'fd_namespace' : 'fd-{0}-{12}'}} storagedriver_configuration = StorageDriverConfiguration('{0}') storagedriver_configuration.configure_backend({1}) storagedriver_configuration.configure_readcache({2}, Configuration.get('volumedriver.readcache.serialization.path') + '/{0}') storagedriver_configuration.configure_scocache({3}, '1GB', '2GB') storagedriver_configuration.configure_failovercache('{4}') storagedriver_configuration.configure_filesystem({5}) storagedriver_configuration.configure_volumemanager({6}) storagedriver_configuration.configure_volumerouter('{12}', {7}) storagedriver_configuration.configure_arakoon_cluster('{8}', {9}) storagedriver_configuration.configure_hypervisor('{10}') storagedriver_configuration.configure_filedriver(fd_config) """.format(vpool_name, vpool.metadata, readcaches, scocaches, failovercache, filesystem_config, volumemanager_config, vrouter_config, voldrv_arakoon_cluster_id, voldrv_arakoon_client_config, storagerouter.pmachine.hvtype, fdcache, vpool.guid) System.exec_remote_python(client, storagedriver_config_script) remote_script = """ import os from configobj import ConfigObj from ovs.plugin.provider.configuration import Configuration protocol = Configuration.get('ovs.core.broker.protocol') login = Configuration.get('ovs.core.broker.login') password = Configuration.get('ovs.core.broker.password') vpool_name = {0} uris = [] cfg = ConfigObj('/opt/OpenvStorage/config/rabbitmqclient.cfg') main_section = cfg.get('main') nodes = main_section['nodes'] if type(main_section['nodes']) == list else [main_section['nodes']] for node in nodes: uris.append({{'amqp_uri': '{{0}}://{{1}}:{{2}}@{{3}}'.format(protocol, login, password, cfg.get(node)['location'])}}) from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration queue_config = {{'events_amqp_routing_key': Configuration.get('ovs.core.broker.volumerouter.queue'), 'events_amqp_uris': uris}} for config_file in os.listdir('/opt/OpenvStorage/config/voldrv_vpools'): this_vpool_name = config_file.replace('.json', '') if config_file.endswith('.json') and (vpool_name is None or vpool_name == this_vpool_name): storagedriver_configuration = StorageDriverConfiguration(this_vpool_name) storagedriver_configuration.configure_event_publisher(queue_config) """.format(vpool_name if vpool_name is None else "'{0}'".format(vpool_name)) System.exec_remote_python(client, remote_script) # Updating the model storagedriver.storagedriver_id = vrouter_id storagedriver.name = vrouter_id.replace('_', ' ') storagedriver.description = storagedriver.name storagedriver.storage_ip = volumedriver_storageip storagedriver.cluster_ip = grid_ip storagedriver.ports = ports storagedriver.mountpoint = '/mnt/{0}'.format(vpool_name) storagedriver.mountpoint_temp = mountpoint_temp storagedriver.mountpoint_readcache1 = mountpoint_readcache1 storagedriver.mountpoint_readcache2 = mountpoint_readcache2 storagedriver.mountpoint_writecache = mountpoint_writecache storagedriver.mountpoint_foc = mountpoint_foc storagedriver.mountpoint_bfs = mountpoint_bfs storagedriver.mountpoint_md = mountpoint_md storagedriver.storagerouter = storagerouter storagedriver.vpool = vpool storagedriver.save() dirs2create.append(storagedriver.mountpoint) dirs2create.append(mountpoint_writecache + '/' + '/fd_' + vpool_name) dirs2create.append('{0}/fd_{1}'.format(mountpoint_writecache, vpool_name)) file_create_script = """ import os for directory in {0}: if not os.path.exists(directory): os.makedirs(directory) for filename in {1}: if not os.path.exists(filename): open(filename, 'a').close() """.format(dirs2create, files2create) System.exec_remote_python(client, file_create_script) voldrv_config_file = '{0}/voldrv_vpools/{1}.json'.format(System.read_remote_config(client, 'ovs.core.cfgdir'), vpool_name) log_file = '/var/log/ovs/volumedriver/{0}.log'.format(vpool_name) vd_cmd = '/usr/bin/volumedriver_fs -f --config-file={0} --mountpoint {1} --logrotation --logfile {2} -o big_writes -o sync_read -o allow_other'.format( voldrv_config_file, storagedriver.mountpoint, log_file) if storagerouter.pmachine.hvtype == 'KVM': vd_stopcmd = 'umount {0}'.format(storagedriver.mountpoint) else: vd_stopcmd = 'exportfs -u *:{0}; umount {0}'.format(storagedriver.mountpoint) vd_name = 'volumedriver_{}'.format(vpool_name) log_file = '/var/log/ovs/volumedriver/foc_{0}.log'.format(vpool_name) fc_cmd = '/usr/bin/failovercachehelper --config-file={0} --logfile={1}'.format(voldrv_config_file, log_file) fc_name = 'failovercache_{0}'.format(vpool_name) params = {'<VPOOL_MOUNTPOINT>': storagedriver.mountpoint, '<HYPERVISOR_TYPE>': storagerouter.pmachine.hvtype, '<VPOOL_NAME>': vpool_name, '<UUID>': str(uuid.uuid4())} if Osdist.is_ubuntu(client): if client.file_exists('/opt/OpenvStorage/config/templates/upstart/ovs-volumedriver.conf'): client.run('cp -f /opt/OpenvStorage/config/templates/upstart/ovs-volumedriver.conf /opt/OpenvStorage/config/templates/upstart/ovs-volumedriver_{0}.conf'.format(vpool_name)) client.run('cp -f /opt/OpenvStorage/config/templates/upstart/ovs-failovercache.conf /opt/OpenvStorage/config/templates/upstart/ovs-failovercache_{0}.conf'.format(vpool_name)) else: if client.file_exists('/opt/OpenvStorage/config/templates/systemd/ovs-volumedriver.service'): client.run('cp -f /opt/OpenvStorage/config/templates/systemd/ovs-volumedriver.service /opt/OpenvStorage/config/templates/systemd/ovs-volumedriver_{0}.service'.format(vpool_name)) client.run('cp -f /opt/OpenvStorage/config/templates/systemd/ovs-failovercache.service /opt/OpenvStorage/config/templates/systemd/ovs-failovercache_{0}.service'.format(vpool_name)) service_script = """ from ovs.plugin.provider.service import Service Service.add_service(package=('openvstorage', 'volumedriver'), name='{0}', command='{1}', stop_command='{2}', params={5}) Service.add_service(package=('openvstorage', 'failovercache'), name='{3}', command='{4}', stop_command=None, params={5}) """.format( vd_name, vd_cmd, vd_stopcmd, fc_name, fc_cmd, params ) System.exec_remote_python(client, service_script) if storagerouter.pmachine.hvtype == 'VMWARE': client.run("grep -q '/tmp localhost(ro,no_subtree_check)' /etc/exports || echo '/tmp localhost(ro,no_subtree_check)' >> /etc/exports") if Osdist.is_ubuntu(client): client.run('service nfs-kernel-server start') else: client.run('service nfs start') if storagerouter.pmachine.hvtype == 'KVM': client.run('virsh pool-define-as {0} dir - - - - {1}'.format(vpool_name, storagedriver.mountpoint)) client.run('virsh pool-build {0}'.format(vpool_name)) client.run('virsh pool-start {0}'.format(vpool_name)) client.run('virsh pool-autostart {0}'.format(vpool_name)) # Start services for node in nodes: node_client = SSHClient.load(node) for service in services: System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service Service.enable_service('{0}') """.format(service)) System.exec_remote_python(node_client, """ from ovs.plugin.provider.service import Service Service.start_service('{0}') """.format(service)) # Fill vPool size vfs_info = os.statvfs('/mnt/{0}'.format(vpool_name)) vpool.size = vfs_info.f_blocks * vfs_info.f_bsize vpool.save() # Configure Cinder ovsdb = PersistentFactory.get_client() vpool_config_key = str('ovs_openstack_cinder_%s' % storagedriver.vpool_guid) if ovsdb.exists(vpool_config_key): # Second node gets values saved by first node cinder_password, cinder_user, tenant_name, controller_ip, config_cinder = ovsdb.get(vpool_config_key) else: config_cinder = parameters.get('config_cinder', False) cinder_password = '' cinder_user = '' tenant_name = '' controller_ip = '' if config_cinder: cinder_password = parameters.get('cinder_pass', cinder_password) cinder_user = parameters.get('cinder_user', cinder_user) tenant_name = parameters.get('cinder_tenant', tenant_name) controller_ip = parameters.get('cinder_controller', controller_ip) # Keystone host if cinder_password: osc = OpenStackCinder(cinder_password = cinder_password, cinder_user = cinder_user, tenant_name = tenant_name, controller_ip = controller_ip) osc.configure_vpool(vpool_name, storagedriver.mountpoint) # Save values for first node to use ovsdb.set(vpool_config_key, [cinder_password, cinder_user, tenant_name, controller_ip, config_cinder])
def test_clone_snapshot(self): """ Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted """ # Setup # There are 2 disks, second one cloned from a snapshot of the first vpool = VPool() vpool.name = 'vpool' vpool.status = 'RUNNING' vpool.save() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.save() disk = Disk() disk.name = 'physical_disk_1' disk.aliases = ['/dev/non-existent'] disk.size = 500 * 1024 ** 3 disk.state = 'OK' disk.is_ssd = True disk.storagerouter = storage_router disk.save() disk_partition = DiskPartition() disk_partition.disk = disk disk_partition.aliases = ['/dev/disk/non-existent'] disk_partition.size = 400 * 1024 ** 3 disk_partition.state = 'OK' disk_partition.offset = 1024 disk_partition.roles = [DiskPartition.ROLES.SCRUB] disk_partition.mountpoint = '/var/tmp' disk_partition.save() storage_driver = StorageDriver() storage_driver.vpool = vpool storage_driver.storagerouter = storage_router storage_driver.name = 'storage_driver_1' storage_driver.mountpoint = '/' storage_driver.cluster_ip = storage_router.ip storage_driver.storage_ip = '127.0.0.1' storage_driver.storagedriver_id = 'storage_driver_1' storage_driver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storage_driver.save() service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() service = Service() service.name = 'service_1' service.storagerouter = storage_driver.storagerouter service.ports = [1] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storage_driver.vpool mds_service.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client('storagedriver') [dynamic for dynamic in vdisk_1_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' if travis is True: print 'Running in Travis, reducing output.' base = datetime.datetime.now().date() day = datetime.timedelta(1) base_timestamp = self._make_timestamp(base, day) minute = 60 hour = minute * 60 for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=vdisk_1_1.guid, metadata={'label': 'snapshot_{0}:30'.format(str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None}) base_snapshot_guid = vdisk_1_1.snapshots[0]['guid'] # Oldest clone_vdisk = VDisk() clone_vdisk.name = 'clone_vdisk' clone_vdisk.volume_id = 'clone_vdisk' clone_vdisk.vpool = vpool clone_vdisk.devicename = 'dummy' clone_vdisk.parentsnapshot = base_snapshot_guid clone_vdisk.size = 0 clone_vdisk.save() clone_vdisk.reload_client('storagedriver') for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid, metadata={'label': 'snapshot_{0}:30'.format(str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None}) base_timestamp = self._make_timestamp(base, day * 2) ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) self.assertIn(base_snapshot_guid, [snap['guid'] for snap in vdisk_1_1.snapshots], 'Snapshot was deleted while there are still clones of it')
def _build_service_structure(self, structure): """ Builds an MDS service structure """ vpools = {} storagerouters = {} storagedrivers = {} services = {} mds_services = {} service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() for vpool_id in structure['vpools']: vpool = VPool() vpool.name = str(vpool_id) vpool.backend_type = BackendType() vpool.save() vpools[vpool_id] = vpool for sr_id in structure['storagerouters']: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.pmachine = PMachine() storagerouter.save() storagerouters[sr_id] = storagerouter for sd_info in structure['storagedrivers']: sd_id, vpool_id, sr_id = sd_info storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint_temp = '/' storagedriver.mountpoint_foc = '/' storagedriver.mountpoint_readcaches = ['/'] storagedriver.mountpoint_writecaches = ['/'] storagedriver.mountpoint_temp = '/' storagedriver.mountpoint_md = '/' storagedriver.mountpoint_bfs = '/' storagedriver.mountpoint_fragmentcache = '/' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = [1, 2, 3] storagedriver.save() storagedrivers[sd_id] = storagedriver for mds_info in structure['mds_services']: mds_id, sd_id = mds_info sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service return vpools, storagerouters, storagedrivers, services, mds_services, service_type
def build_service_structure(structure, previous_structure=None): """ Builds an MDS service structure Example: structure = Helper.build_service_structure( {'vpools': [1], 'domains': [], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouter_domains': []} # (<id>, <storagerouter_id>, <domain_id>) ) """ if previous_structure is None: previous_structure = {} vdisks = previous_structure.get('vdisks', {}) vpools = previous_structure.get('vpools', {}) domains = previous_structure.get('domains', {}) services = previous_structure.get('services', {}) mds_services = previous_structure.get('mds_services', {}) storagerouters = previous_structure.get('storagerouters', {}) storagedrivers = previous_structure.get('storagedrivers', {}) storagerouter_domains = previous_structure.get('storagerouter_domains', {}) service_type = ServiceTypeList.get_by_name('MetadataServer') if service_type is None: service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() srclients = {} for domain_id in structure.get('domains', []): if domain_id not in domains: domain = Domain() domain.name = 'domain_{0}'.format(domain_id) domain.save() domains[domain_id] = domain for vpool_id in structure.get('vpools', []): if vpool_id not in vpools: vpool = VPool() vpool.name = str(vpool_id) vpool.status = 'RUNNING' vpool.save() vpools[vpool_id] = vpool else: vpool = vpools[vpool_id] srclients[vpool_id] = StorageRouterClient(vpool.guid, None) for sr_id in structure.get('storagerouters', []): if sr_id not in storagerouters: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = '10.0.0.{0}'.format(sr_id) storagerouter.rdma_capable = False storagerouter.node_type = 'MASTER' storagerouter.machine_id = str(sr_id) storagerouter.save() storagerouters[sr_id] = storagerouter disk = Disk() disk.storagerouter = storagerouter disk.state = 'OK' disk.name = '/dev/uda' disk.size = 1 * 1024**4 disk.is_ssd = True disk.aliases = ['/dev/uda'] disk.save() partition = DiskPartition() partition.offset = 0 partition.size = disk.size partition.aliases = ['/dev/uda-1'] partition.state = 'OK' partition.mountpoint = '/tmp/unittest/sr_{0}/disk_1/partition_1'.format( sr_id) partition.disk = disk partition.roles = [ DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB ] partition.save() for sd_id, vpool_id, sr_id in structure.get('storagedrivers', ()): if sd_id not in storagedrivers: storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = '10.0.1.{0}'.format(sr_id) storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() storagedrivers[sd_id] = storagedriver Helper._set_vpool_storage_driver_configuration( vpool=vpools[vpool_id], storagedriver=storagedriver) for mds_id, sd_id in structure.get('mds_services', ()): if mds_id not in mds_services: sd = storagedrivers[sd_id] s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service StorageDriverController.add_storagedriverpartition( sd, { 'size': None, 'role': DiskPartition.ROLES.DB, 'sub_role': StorageDriverPartition.SUBROLE.MDS, 'partition': sd.storagerouter.disks[0].partitions[0], 'mds_service': mds_service }) for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get( 'vdisks', ()): if vdisk_id not in vdisks: vpool = vpools[vpool_id] devicename = 'vdisk_{0}'.format(vdisk_id) mds_backend_config = Helper._generate_mdsmetadatabackendconfig( [] if mds_id is None else [mds_services[mds_id]]) volume_id = srclients[vpool_id].create_volume( devicename, mds_backend_config, 0, str(storage_driver_id)) vdisk = VDisk() vdisk.name = str(vdisk_id) vdisk.devicename = devicename vdisk.volume_id = volume_id vdisk.vpool = vpool vdisk.size = 0 vdisk.save() vdisk.reload_client('storagedriver') vdisks[vdisk_id] = vdisk for srd_id, sr_id, domain_id, backup in structure.get( 'storagerouter_domains', ()): if srd_id not in storagerouter_domains: sr_domain = StorageRouterDomain() sr_domain.backup = backup sr_domain.domain = domains[domain_id] sr_domain.storagerouter = storagerouters[sr_id] sr_domain.save() storagerouter_domains[srd_id] = sr_domain return { 'vdisks': vdisks, 'vpools': vpools, 'domains': domains, 'services': services, 'service_type': service_type, 'mds_services': mds_services, 'storagerouters': storagerouters, 'storagedrivers': storagedrivers, 'storagerouter_domains': storagerouter_domains }
def _prepare(self): # Setup failure_domain = FailureDomain() failure_domain.name = 'Test' failure_domain.save() backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() vpool = VPool() vpool.name = 'vpool' vpool.backend_type = backend_type vpool.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'KVM' pmachine.save() vmachine_1 = VMachine() vmachine_1.name = 'vmachine_1' vmachine_1.devicename = 'dummy' vmachine_1.pmachine = pmachine vmachine_1.is_vtemplate = True vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.pmachine = pmachine storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.primary_failure_domain = failure_domain storage_router.save() storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storage_router storagedriver.name = '1' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storage_router.ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '1' storagedriver.ports = [1, 2, 3] storagedriver.save() service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() s_id = '{0}-{1}'.format(storagedriver.storagerouter.name, '1') service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [1] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() def ensure_safety(vdisk): pass class Dtl_Checkup(): @staticmethod def delay(vpool_guid=None, vdisk_guid=None, storagerouters_to_exclude=None): pass MDSServiceController.ensure_safety = staticmethod(ensure_safety) VDiskController.dtl_checkup = Dtl_Checkup return vdisk_1_1, pmachine
def build_service_structure(structure, previous_structure=None): """ Builds an MDS service structure Example: structure = Helper.build_service_structure( {'vpools': [1], 'domains': [], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouter_domains': []} # (<id>, <storagerouter_id>, <domain_id>) ) """ if previous_structure is None: previous_structure = {} vdisks = previous_structure.get("vdisks", {}) vpools = previous_structure.get("vpools", {}) domains = previous_structure.get("domains", {}) services = previous_structure.get("services", {}) mds_services = previous_structure.get("mds_services", {}) storagerouters = previous_structure.get("storagerouters", {}) storagedrivers = previous_structure.get("storagedrivers", {}) storagerouter_domains = previous_structure.get("storagerouter_domains", {}) service_type = ServiceTypeList.get_by_name("MetadataServer") if service_type is None: service_type = ServiceType() service_type.name = "MetadataServer" service_type.save() srclients = {} for domain_id in structure.get("domains", []): if domain_id not in domains: domain = Domain() domain.name = "domain_{0}".format(domain_id) domain.save() domains[domain_id] = domain for vpool_id in structure.get("vpools", []): if vpool_id not in vpools: vpool = VPool() vpool.name = str(vpool_id) vpool.status = "RUNNING" vpool.save() vpools[vpool_id] = vpool else: vpool = vpools[vpool_id] srclients[vpool_id] = StorageRouterClient(vpool.guid, None) for sr_id in structure.get("storagerouters", []): if sr_id not in storagerouters: storagerouter = StorageRouter() storagerouter.name = str(sr_id) storagerouter.ip = "10.0.0.{0}".format(sr_id) storagerouter.rdma_capable = False storagerouter.node_type = "MASTER" storagerouter.machine_id = str(sr_id) storagerouter.save() storagerouters[sr_id] = storagerouter disk = Disk() disk.storagerouter = storagerouter disk.state = "OK" disk.name = "/dev/uda" disk.size = 1 * 1024 ** 4 disk.is_ssd = True disk.aliases = ["/dev/uda"] disk.save() partition = DiskPartition() partition.offset = 0 partition.size = disk.size partition.aliases = ["/dev/uda-1"] partition.state = "OK" partition.mountpoint = "/tmp/unittest/sr_{0}/disk_1/partition_1".format(sr_id) partition.disk = disk partition.roles = [DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB] partition.save() for sd_id, vpool_id, sr_id in structure.get("storagedrivers", ()): if sd_id not in storagedrivers: storagedriver = StorageDriver() storagedriver.vpool = vpools[vpool_id] storagedriver.storagerouter = storagerouters[sr_id] storagedriver.name = str(sd_id) storagedriver.mountpoint = "/" storagedriver.cluster_ip = storagerouters[sr_id].ip storagedriver.storage_ip = "10.0.1.{0}".format(sr_id) storagedriver.storagedriver_id = str(sd_id) storagedriver.ports = {"management": 1, "xmlrpc": 2, "dtl": 3, "edge": 4} storagedriver.save() storagedrivers[sd_id] = storagedriver Helper._set_vpool_storage_driver_configuration(vpool=vpools[vpool_id], storagedriver=storagedriver) for mds_id, sd_id in structure.get("mds_services", ()): if mds_id not in mds_services: sd = storagedrivers[sd_id] s_id = "{0}-{1}".format(sd.storagerouter.name, mds_id) service = Service() service.name = s_id service.storagerouter = sd.storagerouter service.ports = [mds_id] service.type = service_type service.save() services[s_id] = service mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = sd.vpool mds_service.save() mds_services[mds_id] = mds_service StorageDriverController.add_storagedriverpartition( sd, { "size": None, "role": DiskPartition.ROLES.DB, "sub_role": StorageDriverPartition.SUBROLE.MDS, "partition": sd.storagerouter.disks[0].partitions[0], "mds_service": mds_service, }, ) for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get("vdisks", ()): if vdisk_id not in vdisks: vpool = vpools[vpool_id] devicename = "vdisk_{0}".format(vdisk_id) mds_backend_config = Helper._generate_mdsmetadatabackendconfig( [] if mds_id is None else [mds_services[mds_id]] ) volume_id = srclients[vpool_id].create_volume(devicename, mds_backend_config, 0, str(storage_driver_id)) vdisk = VDisk() vdisk.name = str(vdisk_id) vdisk.devicename = devicename vdisk.volume_id = volume_id vdisk.vpool = vpool vdisk.size = 0 vdisk.save() vdisk.reload_client("storagedriver") vdisks[vdisk_id] = vdisk for srd_id, sr_id, domain_id, backup in structure.get("storagerouter_domains", ()): if srd_id not in storagerouter_domains: sr_domain = StorageRouterDomain() sr_domain.backup = backup sr_domain.domain = domains[domain_id] sr_domain.storagerouter = storagerouters[sr_id] sr_domain.save() storagerouter_domains[srd_id] = sr_domain return { "vdisks": vdisks, "vpools": vpools, "domains": domains, "services": services, "service_type": service_type, "mds_services": mds_services, "storagerouters": storagerouters, "storagedrivers": storagedrivers, "storagerouter_domains": storagerouter_domains, }
def test_clone_snapshot(self): """ Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted """ # Setup # There are 2 disks, second one cloned from a snapshot of the first vpool = VPool() vpool.name = 'vpool' vpool.status = 'RUNNING' vpool.save() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.save() disk = Disk() disk.name = 'physical_disk_1' disk.aliases = ['/dev/non-existent'] disk.size = 500 * 1024**3 disk.state = 'OK' disk.is_ssd = True disk.storagerouter = storage_router disk.save() disk_partition = DiskPartition() disk_partition.disk = disk disk_partition.aliases = ['/dev/disk/non-existent'] disk_partition.size = 400 * 1024**3 disk_partition.state = 'OK' disk_partition.offset = 1024 disk_partition.roles = [DiskPartition.ROLES.SCRUB] disk_partition.mountpoint = '/var/tmp' disk_partition.save() storage_driver = StorageDriver() storage_driver.vpool = vpool storage_driver.storagerouter = storage_router storage_driver.name = 'storage_driver_1' storage_driver.mountpoint = '/' storage_driver.cluster_ip = storage_router.ip storage_driver.storage_ip = '127.0.0.1' storage_driver.storagedriver_id = 'storage_driver_1' storage_driver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storage_driver.save() service_type = ServiceType() service_type.name = 'MetadataServer' service_type.save() service = Service() service.name = 'service_1' service.storagerouter = storage_driver.storagerouter service.ports = [1] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storage_driver.vpool mds_service.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client('storagedriver') [ dynamic for dynamic in vdisk_1_1._dynamics if dynamic.name == 'snapshots' ][0].timeout = 0 travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' if travis is True: print 'Running in Travis, reducing output.' base = datetime.datetime.now().date() day = datetime.timedelta(1) base_timestamp = self._make_timestamp(base, day) minute = 60 hour = minute * 60 for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=vdisk_1_1.guid, metadata={ 'label': 'snapshot_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None }) base_snapshot_guid = vdisk_1_1.snapshots[0]['guid'] # Oldest clone_vdisk = VDisk() clone_vdisk.name = 'clone_vdisk' clone_vdisk.volume_id = 'clone_vdisk' clone_vdisk.vpool = vpool clone_vdisk.devicename = 'dummy' clone_vdisk.parentsnapshot = base_snapshot_guid clone_vdisk.size = 0 clone_vdisk.save() clone_vdisk.reload_client('storagedriver') for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid, metadata={ 'label': 'snapshot_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(timestamp), 'machineguid': None }) base_timestamp = self._make_timestamp(base, day * 2) ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) self.assertIn( base_snapshot_guid, [snap['guid'] for snap in vdisk_1_1.snapshots], 'Snapshot was deleted while there are still clones of it')
def test_clone(self): """ Test the clone functionality - Create a vDisk with name 'clone1' - Clone the vDisk and make some assertions - Attempt to clone again using same name and same devicename - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted - Attempt to clone on Storage Driver without MDS service - Attempt to clone from snapshot which is not yet completely synced to backend - Attempt to delete the snapshot from which a clone was made - Clone the vDisk on another Storage Router - Clone another vDisk with name 'clone1' linked to another vPool """ structure = Helper.build_service_structure( {'vpools': [1, 2], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)]} # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] service_type = structure['service_type'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters) self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters) # Basic clone scenario vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshots[0]['guid']) self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot') self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk') for expected_key in ['vdisk_guid', 'name', 'backingdevice']: self.assertTrue(expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format(expected_key)) self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match') self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match') self.assertTrue(expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match') # Attempt to clone again with same name with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1') # Attempt to clone again with a name which will have identical devicename with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2') # Attempt to clone on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', storagerouter_guid=storagerouters[2].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3') # Attempt to clone on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to clone on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Attempt to clone by providing snapshot_id not synced to backend self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning') metadata = {'label': 'label1', 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': False, 'is_automatic': True, 'is_consistent': True} snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots') with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6') # Update backend synced flag and retry vdisk1.storagedriver_client._set_snapshot_in_backend(vdisk1.volume_id, snapshot_id, True) vdisk1.invalidate_dynamics('snapshots') VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() vdisk1.invalidate_dynamics() self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks') self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks') self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots after cloning from a specified snapshot') # Attempt to delete the snapshot that has clones with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=snapshot_id) # Clone on specific Storage Router storagedriver = StorageDriver() storagedriver.vpool = vpools[1] storagedriver.storagerouter = storagerouters[2] storagedriver.name = '3' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[2].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '3' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() s_id = '{0}-1'.format(storagedriver.storagerouter.name) service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [3] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() clone3 = VDisk(VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone3', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue(expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached') # Clone vDisk with existing name on another vPool vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[2].guid)) clone_vdisk2 = VDisk(VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone1')['vdisk_guid']) self.assertTrue(expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool') self.assertTrue(expr=len([vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1']) == 2, msg='Expected to find 2 vDisks with name "clone1"') # Attempt to clone without specifying snapshot and snapshot fails to sync to backend StorageRouterClient.synced = False vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_2', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4') vdisk2.invalidate_dynamics() self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure') self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure') StorageRouterClient.synced = True