def test_dtl_no_unnecessary_reconfiguration(self): """ Verify that when more than 3 Storage Routers are available as possible DTL target, the same target is used over and over again """ structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'domains': [1], 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 'storagerouter_domains': [(1, 1, 1, True), (2, 2, 1, False), (3, 3, 1, False), (4, 4, 1, False), (5, 5, 1, False), (6, 6, 1, False), (7, 7, 1, False), (8, 8, 1, False), (9, 9, 1, False), (10, 10, 1, False)], # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>) 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5), (6, 1, 6), (7, 1, 7), (8, 1, 8), (9, 1, 9), (10, 1, 10)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) config = self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:]]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Rerun DTL checkup 10 times and validate target does not change even though 9 Storage Routers are potential candidate for _ in xrange(10): self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': config.host}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_incorrect_dtl_fixup(self): """ Validates whether the DTL checkup logic can fix a vDisk who's DTL is configured to an unexpected ip """ structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1, 2], 'storagedrivers': [(1, 1, 1), (2, 1, 2)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Set DTL manually to an unexpected IP vdisk.storagedriver_client.set_manual_dtl_config(volume_id=vdisk.volume_id, config=DTLConfig(str(storagerouters[1].ip), 3, DTLMode.SYNCHRONOUS)) # And after another DTL checkup, it should be restored again self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.SYNCHRONOUS}])
def test_multi_node_with_unused_domains(self): """ Test DTL checkup on a multi node setup and create some Domains, but do not link them to any Storage Router """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | # | sr 2 | | | | 1 | # | sr 3 | | | | 1 | # | sr 4 | | | | 1 | # | sr 5 | | | | 1 | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'domains': [1, 2, 3], 'storagerouters': [1, 2, 3, 4, 5], 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:]]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_event_migrate_from_volumedriver(self): """ Test migrate from volumedriver event """ _ = self structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1, 2], 'storagedrivers': [(1, 1, 1), (2, 1, 2)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpool = structure['vpools'][1] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) vdisk = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) vdisk.storagedriver_client.migrate(vdisk.volume_id, storagedrivers[2].storagedriver_id, False) VDiskController.migrate_from_voldrv( volume_id=vdisk.volume_id, new_owner_id=storagedrivers[2].storagedriver_id) self.assertEqual(vdisk.storagedriver_id, storagedrivers[2].storagedriver_id)
def test_delete_snapshot(self): """ Test the delete snapshot functionality - Create a vDisk and take a snapshot - Attempt to delete a non-existing snapshot """ structure = DalHelper.build_dal_structure( {'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)]} # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata={'timestamp': int(time.time()), 'label': 'label1', 'is_consistent': True, 'is_automatic': True, 'is_sticky': False}) self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find 1 snapshot') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find 1 snapshot ID') with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id='non-existing') VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=vdisk1.snapshot_ids[0]) self.assertTrue(expr=len(vdisk1.snapshots) == 0, msg='Expected to find no more snapshots') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 0, msg='Expected to find no more snapshot IDs')
def test_single_node(self): """ Execute some DTL checkups on a single node installation """ # Create 1 vdisk in single node without domains structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouter = structure['storagerouters'][1] # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | 1 | 1 | | | | self._roll_out_dtl_services(vpool=vpool, storagerouters=structure['storagerouters']) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Create some domains, but do not attach them yet # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | 1 | 1 | | | | domains = {} for domain_id in range(1, 3): domain = Domain() domain.name = 'domain_{0}'.format(domain_id) domain.save() domains[domain_id] = domain self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Attach a regular Domain to the single Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | domain 1 | | | sr_domain = StorageRouterDomain() sr_domain.backup = False sr_domain.domain = domains[1] sr_domain.storagerouter = storagerouter sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Attach a recovery Domain to the single Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | domain 1 | | for junction in storagerouter.domains: junction.delete() sr_domain = StorageRouterDomain() sr_domain.backup = True sr_domain.domain = domains[1] sr_domain.storagerouter = storagerouter sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}])
def test_delete(self): """ Test the delete of a vDisk - Create 2 vDisks with identical names on 2 different vPools - Delete 1st vDisk and verify other still remains on correct vPool - Delete 2nd vDisk and verify no more volumes left """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'domains': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) domains = structure['domains'] storagedrivers = structure['storagedrivers'] vdisk1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[2].guid)) vdisk_domain = VDiskDomain() vdisk_domain.domain = domains[1] vdisk_domain.vdisk = vdisk1 vdisk_domain.save() # Delete vDisk1 and make some assertions VDiskController.delete(vdisk_guid=vdisk1.guid) with self.assertRaises(ObjectNotFoundException): VDisk(vdisk1.guid) self.assertEqual( first=len(VDiskController.list_volumes()), second=1, msg='Expected to find only 1 volume in Storage Driver list_volumes' ) self.assertIn(member=vdisk2, container=VDiskList.get_vdisks(), msg='vDisk2 should still be modeled') # Delete vDisk2 and make some assertions VDiskController.delete(vdisk_guid=vdisk2.guid) with self.assertRaises(ObjectNotFoundException): VDisk(vdisk2.guid) self.assertEqual( first=len(VDiskController.list_volumes()), second=0, msg= 'Expected to find no more volumes in Storage Driver list_volumes')
def test_sync_vdisk_with_voldrv(self): clone_depth = 3 def _make_clones(vdisks_map, depth=clone_depth): for level in range(depth): previous_vd = list(vdisks_map.itervalues())[-1] new_name = previous_vd.name + '_clone' new_guid = VDiskController.clone(previous_vd.guid, new_name).get('vdisk_guid') vdisks_map[new_name] = VDisk(new_guid) structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) vdisk_name = 'vdisk_1' storagedriver = structure['storagedrivers'][1] vdisk_1 = VDisk( VDiskController.create_new(volume_name=vdisk_name, volume_size=1024**4, storagedriver_guid=storagedriver.guid)) vdisks = OrderedDict() vdisks[vdisk_name] = vdisk_1 _make_clones(vdisks) self.assertEquals(clone_depth + 1, len(list(VDiskList.get_vdisks()))) delete_list = list(vdisks.itervalues( ))[::-1][:-1] # These vDisks are clones and ought to be deleted for vdisk in delete_list: for mds_service in vdisk.mds_services: mds_service.delete() vdisk.delete() self.assertEquals(1, len(list(VDiskList.get_vdisks())) ) # Make sure vDisk clones are properly removed self.assertEquals( VDiskList.get_vdisks()[0].name, vdisk_name) # Make sure only item left is original vDisk VDiskController.sync_with_reality() self.assertEquals(clone_depth + 1, len(list( VDiskList.get_vdisks()))) # The clones should be in place now parents = 0 for vdisk in VDiskList.get_vdisks(): try: if vdisk.parent_vdisk.name: parents += 1 except AttributeError: pass self.assertEquals( clone_depth, parents ) # As much parents should be detected as the depth of the clones
def test_folder_renames(self): """ Validates whether folder renames are correctly processed """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) storagedriver = structure['storagedrivers'][1] vdisk1 = VDisk( VDiskController.create_new('foo/one.raw', 1024**3, storagedriver.guid)) vdisk2 = VDisk( VDiskController.create_new('bar/two.raw', 1024**3, storagedriver.guid)) vdisk3 = VDisk( VDiskController.create_new('three.raw', 1024**3, storagedriver.guid)) VDiskController.rename_from_voldrv( old_path='/thr', new_path='/test', storagedriver_id=storagedriver.storagedriver_id) vdisk1.discard() vdisk2.discard() vdisk3.discard() self.assertEqual(vdisk1.devicename, '/foo/one.raw') self.assertEqual(vdisk2.devicename, '/bar/two.raw') self.assertEqual(vdisk3.devicename, '/three.raw') VDiskController.rename_from_voldrv( old_path='/foo', new_path='/bar', storagedriver_id=storagedriver.storagedriver_id) vdisk1.discard() vdisk2.discard() vdisk3.discard() self.assertEqual(vdisk1.devicename, '/bar/one.raw') self.assertEqual(vdisk2.devicename, '/bar/two.raw') self.assertEqual(vdisk3.devicename, '/three.raw') VDiskController.rename_from_voldrv( old_path='/bar', new_path='/foo', storagedriver_id=storagedriver.storagedriver_id) vdisk1.discard() vdisk2.discard() vdisk3.discard() self.assertEqual(vdisk1.devicename, '/foo/one.raw') self.assertEqual(vdisk2.devicename, '/foo/two.raw') self.assertEqual(vdisk3.devicename, '/three.raw')
def test_exception_handling(self): """ Test if the scheduled job can handle exceptions """ def raise_an_exception(*args, **kwargs): raise RuntimeError('Emulated snapshot delete error') structure = DalHelper.build_dal_structure({ 'vpools': [1], 'vdisks': [ (1, 1, 1, 1), (2, 1, 1, 1) ], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)] } # (<id>, <vpool_id>, <storagerouter_id>) ) vdisk_1, vdisk_2 = structure['vdisks'].values() storagedriver_1 = structure['storagedrivers'][1] vdisks = [vdisk_1, vdisk_2] for vdisk in vdisks: [ dynamic for dynamic in vdisk._dynamics if dynamic.name == 'snapshots' ][0].timeout = 0 for i in xrange(0, 2): metadata = { 'label': str(i), 'is_consistent': False, 'is_sticky': False, 'timestamp': str((int(time.time() - datetime.timedelta(2).total_seconds() - i))) } snapshot_id = VDiskController.create_snapshot( vdisk.guid, metadata) if vdisk == vdisk_1: StorageRouterClient.delete_snapshot_callbacks[ vdisk.volume_id] = { snapshot_id: raise_an_exception } with self.assertRaises(RuntimeError): GenericController.delete_snapshots_storagedriver( storagedriver_guid=storagedriver_1.guid) self.assertEqual(1, len(vdisk_2.snapshot_ids), 'One snapshot should be removed for vdisk 2') self.assertEqual(2, len(vdisk_1.snapshot_ids), 'No snapshots should be removed for vdisk 1')
def test_from_single_node_to_multi_node(self): """ Deploy a vDisk on a single node --> This should result in no DTL configured Add an additional node and verify DTL will be set """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}]) # Add a Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | | | # | sr 2 | | | | 1 | storagerouter = StorageRouter() storagerouter.name = '2' storagerouter.ip = '10.0.0.2' storagerouter.rdma_capable = False storagerouter.save() storagerouters[2] = storagerouter self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) storagedriver = StorageDriver() storagedriver.vpool = vpool storagedriver.storagerouter = storagerouter storagedriver.name = '2' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouter.ip storagedriver.storage_ip = '10.0.1.2' storagedriver.storagedriver_id = '2' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_multi_node_with_used_domains_on_local_sr(self): """ Test DTL checkup on a multi node setup and create some Domains and link them to the Storage Router on which the vDisk lives """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | domain 1 | | | # | sr 2 | | | | | # | sr 3 | | | | | # | sr 4 | | | | | # | sr 5 | | | | | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'domains': [1, 2, 3], 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1, 2, 3, 4, 5], 'storagerouter_domains': [(1, 1, 1, False)], # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>) 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] domain = structure['domains'][1] storagerouters = structure['storagerouters'] storagerouter = storagerouters[1] # When domains have been attached to the StorageRouter on which the vDisk resides, but no other Storage Routers have same Domain, random SR is chosen self._roll_out_dtl_services(vpool=vpool, storagerouters=structure['storagerouters']) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Remove the linked Domain and add a recovery Domain instead --> DTL checkup should not change anything # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | domain 1 | | # | sr 2 | | | | | # | sr 3 | | | | | # | sr 4 | | | | | # | sr 5 | | | | | for junction in storagerouter.domains: junction.delete() sr_domain = StorageRouterDomain() sr_domain.backup = True sr_domain.domain = domain sr_domain.storagerouter = storagerouter sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_delete_snapshot_scrubbing_lock(self): """ Tests the skip-if-scrubbed logic """ snapshot_while_scrub_results = [] def delete_snapshot_while_scrubbing(*args, **kwargs): _ = args, kwargs try: snapshot_while_scrub_results.append(VDiskController.delete_snapshot(vdisk_1.guid, vdisk_1.snapshot_ids[0])) except RuntimeError as ex: snapshot_while_scrub_results.append(ex) structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)]} # (<id>, <vpool_id>, <storagerouter_id>) ) vdisks = structure['vdisks'] vdisk_1 = vdisks[1] # Create automatic snapshot for both vDisks success, fail = GenericController.snapshot_all_vdisks() self.assertEqual(first=len(fail), second=0, msg='Expected 0 failed snapshots') self.assertEqual(first=len(success), second=1, msg='Expected 1 successful snapshots') self.assertEqual(first=len(vdisk_1.snapshot_ids), second=1, msg='Expected 1 snapshot ID for vDisk {0}'.format(vdisk_1.name)) self.assertEqual(first=len(vdisk_1.snapshots), second=1, msg='Expected 1 snapshot for vDisk {0}'.format(vdisk_1.name)) proxy_names, thread_names, vdisk_namespaces = self.generate_scrub_related_info(structure) LockedClient.scrub_controller = {'possible_threads': thread_names, 'volumes': {}, 'waiter': Waiter(len(thread_names[0:1]))} # only 1 disks -> 1 thread # Scrub all volumes for vdisk_id, vdisk in vdisks.iteritems(): LockedClient.scrub_controller['volumes'][vdisk.volume_id] = {'success': True, 'scrub_work': range(vdisk_id)} hooks = {'post_vdisk_scrub_registration': delete_snapshot_while_scrubbing} # Make the scrubber wait ScrubShared._test_hooks.update(hooks) GenericController.execute_scrub() # Ensure delete snapshot fails for vdisk_1 because it is being scrubbed result_while_scrub = snapshot_while_scrub_results[0] self.assertIsInstance(result_while_scrub, Exception, 'Expected an exception to have occurred') self.assertEqual(str(result_while_scrub), 'VDisk is being scrubbed. Unable to remove snapshots at this time', 'Excpetion should be about disk being scrubbed') self.assertEqual(first=len(vdisk_1.snapshot_ids), second=1, msg='Expected 1 snapshot ID for vDisk {0}'.format(vdisk_1.name)) self.assertEqual(first=len(vdisk_1.snapshots), second=1, msg='Expected 1 snapshot for vDisk {0}'.format(vdisk_1.name))
def test_scrubbing_exception_handling(self): """ Test if the scheduled job can handle scrub related exceptions """ def raise_an_exception(*args, **kwargs): raise RuntimeError(SCRUB_VDISK_EXCEPTION_MESSAGE) structure = DalHelper.build_dal_structure({ 'vpools': [1], 'vdisks': [ (1, 1, 1, 1) ], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)] } # (<id>, <vpool_id>, <storagerouter_id>) ) vdisk_1 = structure['vdisks'][1] storagedriver_1 = structure['storagedrivers'][1] [ dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots' ][0].timeout = 0 for i in xrange(0, 2): metadata = { 'label': str(i), 'is_consistent': False, 'is_sticky': False, 'timestamp': str((int(time.time() - datetime.timedelta(2).total_seconds() - i))) } snapshot_id = VDiskController.create_snapshot( vdisk_1.guid, metadata) StorageRouterClient.delete_snapshot_callbacks[ vdisk_1.volume_id] = { snapshot_id: raise_an_exception } GenericController.delete_snapshots_storagedriver( storagedriver_guid=storagedriver_1.guid) self.assertEqual(2, len(vdisk_1.snapshot_ids), 'No snapshots should be removed for vdisk 1')
def test_create_snapshot(self): """ Test the create snapshot functionality - Create a vDisk - Attempt to create a snapshot providing incorrect parameters - Create a snapshot and make some assertions """ structure = DalHelper.build_dal_structure( {'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)]} # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(ValueError): # noinspection PyTypeChecker VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata='') now = int(time.time()) snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata={'timestamp': now, 'label': 'label1', 'is_consistent': True, 'is_automatic': True, 'is_sticky': False}) self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find 1 snapshot') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find 1 snapshot ID') snapshot = vdisk1.snapshots[0] expected_keys = {'guid', 'timestamp', 'label', 'is_consistent', 'is_automatic', 'is_sticky', 'in_backend', 'stored'} self.assertEqual(first=expected_keys, second=set(snapshot.keys()), msg='Set of expected keys differs from reality. Expected: {0} - Reality: {1}'.format(expected_keys, set(snapshot.keys()))) for key, value in {'guid': snapshot_id, 'label': 'label1', 'stored': 0, 'is_sticky': False, 'timestamp': now, 'in_backend': True, 'is_automatic': True, 'is_consistent': True}.iteritems(): self.assertEqual(first=value, second=snapshot[key], msg='Value for key "{0}" does not match reality. Expected: {1} - Reality: {2}'.format(key, value, snapshot[key]))
def test_event_resize_from_volumedriver(self): """ Test resize from volumedriver event - Create a vDisk using the resize event - Resize the created vDisk using the same resize event """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] storagedrivers = structure['storagedrivers'] mds_service = structure['mds_services'][1] # Create volume using resize from voldrv device_name = '/vdisk.raw' srclient = StorageRouterClient(vpools[1].guid, None) mds_backend_config = DalHelper.generate_mds_metadata_backend_config( [mds_service]) volume_id = srclient.create_volume( device_name, mds_backend_config, 1024**4, str(storagedrivers[1].storagedriver_id)) VDiskController.resize_from_voldrv( volume_id=volume_id, volume_size=1024**4, volume_path=device_name, storagedriver_id=storagedrivers[1].storagedriver_id) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 1, msg='Expected to find 1 vDisk in model') self.assertEqual(first=vdisks[0].name, second='vdisk', msg='Volume name should be vdisk') self.assertEqual(first=vdisks[0].volume_id, second=volume_id, msg='Volume ID should be {0}'.format(volume_id)) self.assertEqual(first=vdisks[0].devicename, second=device_name, msg='Device name should be {0}'.format(device_name)) self.assertEqual(first=vdisks[0].size, second=1024**4, msg='Size should be 1 TiB') # Resize volume using resize from voldrv VDiskController.resize_from_voldrv( volume_id=volume_id, volume_size=2 * 1024**4, volume_path=device_name, storagedriver_id=storagedrivers[1].storagedriver_id) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 1, msg='Expected to find 1 vDisk in model') self.assertEqual(first=vdisks[0].name, second='vdisk', msg='Volume name should be vdisk') self.assertEqual(first=vdisks[0].size, second=2 * 1024**4, msg='Size should be 2 TiB')
def test_set_as_template(self): """ Test the set as template functionality - Create a vDisk - Set it as template and make some assertions """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk_1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) metadata = { 'is_consistent': True, 'is_automatic': True, 'is_sticky': False } for x in range(5): metadata['label'] = 'label{0}'.format(x) metadata['timestamp'] = int(time.time()) VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk_1.snapshots) == 5, msg='Expected to find 5 snapshots') self.assertTrue(expr=len(vdisk_1.snapshot_ids) == 5, msg='Expected to find 5 snapshot IDs') # Set as template and validate the model self.assertFalse(expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should be False') VDiskController.set_as_template(vdisk_guid=vdisk_1.guid) self.assertTrue(expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True') self.assertTrue( expr=len(vdisk_1.snapshots) == 1, msg='Expected to find only 1 snapshot after converting to template' ) self.assertTrue( expr=len(vdisk_1.snapshot_ids) == 1, msg= 'Expected to find only 1 snapshot ID after converting to template') # Try again and verify job succeeds, previously we raised error when setting as template an additional time VDiskController.set_as_template(vdisk_1.guid) self.assertTrue( expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should still be True') # Clone vDisk and verify both child and parent can no longer be set as template vdisk_2 = VDisk( VDiskController.create_new( volume_name='vdisk_2', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) vdisk_2_clone_1 = VDisk( VDiskController.clone(vdisk_guid=vdisk_2.guid, name='vdisk_2_clone_1')['vdisk_guid']) with self.assertRaises(RuntimeError): VDiskController.set_as_template(vdisk_guid=vdisk_2.guid) with self.assertRaises(RuntimeError): VDiskController.set_as_template(vdisk_guid=vdisk_2_clone_1.guid)
def test_create_new(self): """ Test the create new volume functionality - Attempt to create a vDisk larger than 2 TiB - Create a vDisk of exactly 2 TiB - Attempt to create a vDisk with identical name - Attempt to create a vDisk with identical devicename - Create a vDisk with identical name on another vPool """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'storagerouters': [1, 2], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] size_64_tib = 64 * 1024**4 # Verify maximum size of 64TiB vdisk_name_1 = 'vdisk_1' vdisk_name_2 = 'vdisk_2' with self.assertRaises(ValueError): VDiskController.create_new( volume_name=vdisk_name_1, volume_size=size_64_tib + 1, storagedriver_guid=storagedrivers[1].guid) self.assertTrue(expr=len(VDiskList.get_vdisks()) == 0, msg='Expected to find 0 vDisks after failure 1') # Create volume of maximum size VDiskController.create_new(volume_name=vdisk_name_1, volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 1, msg='Expected to find 1 vDisk') self.assertTrue( expr=vdisks[0].storagerouter_guid == storagerouters[1].guid, msg='Storage Router does not match expected value') self.assertTrue(expr=vdisks[0].size == size_64_tib, msg='Size does not match expected value') self.assertTrue(expr=vdisks[0].name == vdisk_name_1, msg='Name does not match expected value') self.assertTrue(expr=vdisks[0].vpool == vpools[1], msg='vPool does not match expected value') self.assertTrue(expr=vdisks[0].devicename == VDiskController.clean_devicename(vdisk_name_1), msg='Devicename does not match expected value') # Attempt to create same volume on same vPool with self.assertRaises(RuntimeError): VDiskController.create_new( volume_name=vdisk_name_1, volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid) self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1, msg='Expected to find 1 vDisk after failure 2') # Attempt to create volume with identical devicename on same vPool with self.assertRaises(RuntimeError): VDiskController.create_new( volume_name='{0}%^$'.format(vdisk_name_1), volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid) self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1, msg='Expected to find 1 vDisk after failure 3') # Create same volume on another vPool vdisk2 = VDisk( VDiskController.create_new( volume_name=vdisk_name_2, volume_size=size_64_tib, storagedriver_guid=storagedrivers[2].guid)) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') self.assertTrue( expr=vdisk2.storagerouter_guid == storagerouters[1].guid, msg='Storage Router does not match expected value') self.assertTrue(expr=vdisk2.size == size_64_tib, msg='Size does not match expected value') self.assertTrue(expr=vdisk2.name == vdisk_name_2, msg='Name does not match expected value') self.assertTrue(expr=vdisk2.vpool == vpools[2], msg='vPool does not match expected value') self.assertTrue(expr=vdisk2.devicename == VDiskController.clean_devicename(vdisk_name_2), msg='Devicename does not match expected value') # Attempt to create vDisk on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[2] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.create_new( volume_name='vdisk_3', volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid) self.assertTrue(expr=len(VDiskList.get_vdisks()) == 2, msg='Expected to find 2 vDisks after failure 4')
def test_list_volumes(self): """ Test the list volumes functionality - Create 1 vDisk on vPool1 and create 3 vDisks on vPool2 - List all volumes - List the volumes on vPool1 - List the volumes on vPool2 """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] storagedrivers = structure['storagedrivers'] vpool1 = vpools[1] vpool2 = vpools[2] VDiskController.create_new(volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid) VDiskController.create_new(volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedrivers[2].guid) VDiskController.create_new(volume_name='vdisk_2', volume_size=1024**4, storagedriver_guid=storagedrivers[2].guid) VDiskController.create_new(volume_name='vdisk_3', volume_size=1024**4, storagedriver_guid=storagedrivers[2].guid) all_vdisks = VDiskList.get_vdisks() # List all volumes sd_volume_ids = set(VDiskController.list_volumes()) model_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks]) self.assertEqual(first=len(sd_volume_ids), second=4, msg='Expected to retrieve all 4 volumes') self.assertEqual( first=sd_volume_ids, second=model_volume_ids, msg= 'Volume IDs from Storage Driver not identical to volume IDs in model. SD: {0} - Model: {1}' .format(sd_volume_ids, model_volume_ids)) # List all volumes of vpools[1] sd_vpool1_volume_ids = set( VDiskController.list_volumes(vpool_guid=vpool1.guid)) model_vpool1_volume_ids = set( [vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool1]) self.assertEqual(first=len(sd_vpool1_volume_ids), second=1, msg='Expected to retrieve 1 volume') self.assertEqual( first=sd_vpool1_volume_ids, second=model_vpool1_volume_ids, msg= 'Volume IDs for vPool1 from Storage Driver not identical to volume IDs in model. SD: {0} - Model: {1}' .format(sd_vpool1_volume_ids, model_vpool1_volume_ids)) # List all volumes of vpools[2] sd_vpool2_volume_ids = set( VDiskController.list_volumes(vpool_guid=vpool2.guid)) model_vpool2_volume_ids = set( [vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool2]) self.assertEqual(first=len(sd_vpool2_volume_ids), second=3, msg='Expected to retrieve 3 volumes') self.assertEqual( first=sd_vpool2_volume_ids, second=model_vpool2_volume_ids, msg= 'Volume IDs for vPool2 from Storage Driver not identical to volume IDs in model. SD: {0} - Model: {1}' .format(sd_vpool2_volume_ids, model_vpool2_volume_ids))
def test_multi_node_with_regular_domains(self): """ Test DTL checkup on a multi node setup and create some Domains and link them to the several Storage Routers """ # Add a regular domain to the Storage Router serving the vDisk and another Storage Router --> DTL target should be the specific Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | domain 1 | | | # | sr 2 | | domain 1 | | 1 | # | sr 3 | | | | | # | sr 4 | | | | | # | sr 5 | | | | | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'domains': [1, 2, 3], 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1, 2, 3, 4, 5], 'storagerouter_domains': [(1, 1, 1, False), (2, 2, 1, False)], # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>) 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] domains = structure['domains'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Add the regular Domain as regular Domain to additional Storage Routers --> DTL target should remain on same Storage Router # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | domain 1 | | | # | sr 2 | | domain 1 | | 1 | # | sr 3 | | domain 1 | | | # | sr 4 | | domain 1 | | | # | sr 5 | | domain 1 | | | for storagerouter in storagerouters.values()[2:]: sr_domain = StorageRouterDomain() sr_domain.backup = False sr_domain.domain = domains[1] sr_domain.storagerouter = storagerouter sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Add recovery Domain to the Storage Router on which the vDisks lives --> nothing should change for now # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | domain 1 | domain 2 | | # | sr 2 | | domain 1 | | 1 | # | sr 3 | | domain 1 | | | # | sr 4 | | domain 1 | | | # | sr 5 | | domain 1 | | | sr_domain = StorageRouterDomain() sr_domain.backup = True sr_domain.domain = domains[2] sr_domain.storagerouter = storagerouters[1] sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # # Add the recovery Domain as regular Domain to additional StorageRouters --> Recovery Domain should have priority over regular Domain # # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # # | sr 1 | 1 | domain 1 | domain 2 | | # # | sr 2 | | domain 1 | | | # # | sr 3 | | domain 1, domain 2 | | 1 | # # | sr 4 | | domain 1, domain 2 | | 1 | # # | sr 5 | | domain 1, domain 2 | | 1 | for storagerouter in storagerouters.values()[2:]: sr_domain = StorageRouterDomain() sr_domain.backup = False sr_domain.domain = domains[2] sr_domain.storagerouter = storagerouter sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[2:]]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_clone(self): """ Test the clone functionality - Create a vDisk with name 'clone1' - Clone the vDisk and make some assertions - Attempt to clone again using same name and same devicename - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted - Attempt to clone on Storage Driver without MDS service - Attempt to clone from snapshot which is not yet completely synced to backend - Attempt to delete the snapshot from which a clone was made - Clone the vDisk on another Storage Router - Clone another vDisk with name 'clone1' linked to another vPool """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] service_type = structure['service_types']['MetadataServer'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters) self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters) # Basic clone scenario vdisk1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshot_ids[0]) self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot') self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk') for expected_key in ['vdisk_guid', 'name', 'backingdevice']: self.assertTrue( expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format( expected_key)) self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match') self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match') self.assertTrue( expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match') # Attempt to clone again with same name with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1') # Attempt to clone again with a name which will have identical devicename with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2') # Attempt to clone on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', storagerouter_guid=storagerouters[2].guid) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3') # Attempt to clone on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to clone on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Attempt to clone by providing snapshot_id not synced to backend self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning') self.assertTrue( expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find only 1 snapshot ID before cloning') metadata = { 'label': 'label1', 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': False, 'is_automatic': True, 'is_consistent': True } snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 2, msg='Expected to find 2 snapshot IDs') with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6') # Update backend synced flag and retry vdisk1.storagedriver_client._set_snapshot_in_backend( vdisk1.volume_id, snapshot_id, True) vdisk1.invalidate_dynamics(['snapshots', 'snapshot_ids']) VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() vdisk1.invalidate_dynamics() self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks') self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks') self.assertTrue( expr=len(vdisk1.snapshots) == 2, msg= 'Expected to find 2 snapshots after cloning from a specified snapshot' ) self.assertTrue( expr=len(vdisk1.snapshot_ids) == 2, msg= 'Expected to find 2 snapshot IDs after cloning from a specified snapshot' ) # Attempt to delete the snapshot that has clones with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=snapshot_id) # Clone on specific Storage Router storagedriver = StorageDriver() storagedriver.vpool = vpools[1] storagedriver.storagerouter = storagerouters[2] storagedriver.name = '3' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[2].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '3' storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() s_id = '{0}-1'.format(storagedriver.storagerouter.name) service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [3] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() clone3 = VDisk( VDiskController.clone( vdisk_guid=vdisk1.guid, name='clone3', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue( expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached') # Clone vDisk with existing name on another vPool vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[2].guid)) clone_vdisk2 = VDisk( VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone1')['vdisk_guid']) self.assertTrue( expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool' ) self.assertTrue(expr=len([ vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1' ]) == 2, msg='Expected to find 2 vDisks with name "clone1"') # Attempt to clone without specifying snapshot and snapshot fails to sync to backend StorageRouterClient.synced = False vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_2', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4') vdisk2.invalidate_dynamics() self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure') self.assertTrue( expr=len(vdisk2.snapshot_ids) == 0, msg='Expected to find 0 snapshot IDs after clone failure') self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure') StorageRouterClient.synced = True
def test_remove_snapshots(self): """ Validates whether the remove_snapshots call works as expected. Due to openvstorage/framework#1534 it needs to handle some backwards compatibiltiy. """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) storagedriver = structure['storagedrivers'][1] vdisk = VDisk( VDiskController.create_new(volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedriver.guid)) snapshots = [] for i in xrange(10): metadata = { 'label': 'label{0}'.format(i), 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': True, 'is_automatic': True, 'is_consistent': True } snapshots.append( VDiskController.create_snapshot(vdisk_guid=vdisk.guid, metadata=metadata)) vdisk.invalidate_dynamics(['snapshots', 'snapshot_ids']) self.assertEqual(len(vdisk.snapshots), 10) self.assertEqual(len(vdisk.snapshot_ids), 10) snapshot_id = snapshots[0] # Old format results = VDiskController.delete_snapshots({vdisk.guid: snapshot_id}) expected = {vdisk.guid: [True, snapshot_id]} self.assertDictEqual(results, expected) self.assertEqual(len(vdisk.snapshots), 9) self.assertEqual(len(vdisk.snapshot_ids), 9) results = VDiskController.delete_snapshots({vdisk.guid: snapshot_id}) expected = {vdisk.guid: [False, results[vdisk.guid][1]]} self.assertDictEqual(results, expected) self.assertRegexpMatches(results[vdisk.guid][1], '^Snapshot (.*?) does not belong to vDisk') self.assertEqual(len(vdisk.snapshots), 9) self.assertEqual(len(vdisk.snapshot_ids), 9) results = VDiskController.delete_snapshots({'foo': snapshot_id}) expected = {'foo': [False, results['foo'][1]]} self.assertDictEqual(results, expected) self.assertRegexpMatches(results['foo'][1], 'VDisk with guid (.*?) could not be found') # New format snapshot_id1 = snapshots[1] snapshot_id2 = snapshots[2] results = VDiskController.delete_snapshots( {vdisk.guid: [snapshot_id1, snapshot_id2]}) expected = { vdisk.guid: { 'success': True, 'error': None, 'results': { snapshot_id1: [True, snapshot_id1], snapshot_id2: [True, snapshot_id2] } } } self.assertDictEqual(results, expected) self.assertEqual(len(vdisk.snapshots), 7) self.assertEqual(len(vdisk.snapshot_ids), 7) snapshot_id2 = snapshots[3] results = VDiskController.delete_snapshots( {vdisk.guid: [snapshot_id1, snapshot_id2]}) expected = { vdisk.guid: { 'success': False, 'error': results[vdisk.guid]['error'], 'results': { snapshot_id1: [False, results[vdisk.guid]['results'][snapshot_id1][1]], snapshot_id2: [True, snapshot_id2] } } } self.assertDictEqual(results, expected) self.assertEquals(results[vdisk.guid]['error'], 'One or more snapshots could not be removed') self.assertRegexpMatches( results[vdisk.guid]['results'][snapshot_id1][1], '^Snapshot (.*?) does not belong to vDisk') self.assertEqual(len(vdisk.snapshots), 6) self.assertEqual(len(vdisk.snapshot_ids), 6) results = VDiskController.delete_snapshots({'foo': [snapshot_id1]}) expected = { 'foo': { 'success': False, 'error': results['foo']['error'], 'results': {} } } self.assertDictEqual(results, expected) self.assertRegexpMatches(results['foo']['error'], 'VDisk with guid (.*?) could not be found') snapshot_id = snapshots[4] VDiskController.clone(vdisk.guid, 'clone', snapshot_id) results = VDiskController.delete_snapshots({vdisk.guid: [snapshot_id]}) expected = { vdisk.guid: { 'success': False, 'error': results[vdisk.guid]['error'], 'results': { snapshot_id: [False, results[vdisk.guid]['results'][snapshot_id][1]] } } } self.assertDictEqual(results, expected) self.assertEquals(results[vdisk.guid]['error'], 'One or more snapshots could not be removed') self.assertRegexpMatches( results[vdisk.guid]['results'][snapshot_id][1], '^Snapshot (.*?) has [0-9]+ volume(.?) cloned from it, cannot remove$' )
def test_create_from_template(self): """ Test the create from template functionality - Create a vDisk and convert to vTemplate - Attempt to create from template from a vDisk which is not a vTemplate - Create from template basic scenario - Attempt to create from template using same name - Attempt to create from template using same devicename - Attempt to create from template using Storage Router on which vPool is not extended - Attempt to create from template using non-existing Storage Driver - Attempt to create from template using Storage Driver which does not have an MDS service - Create from template on another Storage Router - Create from template without specifying a Storage Router """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 1, 2)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpool = structure['vpools'][1] mds_services = structure['mds_services'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) template = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) vdisk_name = 'from_template_1' VDiskController.set_as_template(vdisk_guid=template.guid) self.assertTrue(expr=template.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True') # Create from vDisk which is not a vTemplate template.storagedriver_client._set_object_type(template.volume_id, 'BASE') template.invalidate_dynamics(['info', 'is_vtemplate']) with self.assertRaises(RuntimeError): VDiskController.create_from_template( vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid) # Create from template template.storagedriver_client._set_object_type(template.volume_id, 'TEMPLATE') template.invalidate_dynamics(['info', 'is_vtemplate']) info = VDiskController.create_from_template( vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid) expected_keys = ['vdisk_guid', 'name', 'backingdevice'] self.assertEqual( first=set(info.keys()), second=set(expected_keys), msg='Create from template returned not the expected keys') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks') vdisk = [vdisk for vdisk in vdisks if vdisk.is_vtemplate is False][0] self.assertTrue( expr=vdisk.name == vdisk_name, msg='vDisk name is incorrect. Expected: {0} - Actual: {1}'. format(vdisk_name, vdisk.name)) self.assertTrue(expr=vdisk.parent_vdisk == template, msg='The parent of the vDisk is incorrect') # Attempt to create from template using same name with self.assertRaises(RuntimeError): VDiskController.create_from_template( vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 1') # Attempt to create from template using same devicename with self.assertRaises(RuntimeError): VDiskController.create_from_template( vdisk_guid=template.guid, name='^{0}$*'.format(vdisk_name), storagerouter_guid=storagerouters[1].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 2') # Attempt to create from template on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.create_from_template( vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[3].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 3') # Attempt to create on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to create on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.create_from_template( vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[1].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Create from template on another Storage Router vdisk2 = VDisk( VDiskController.create_from_template( vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue( expr=vdisk2.storagerouter_guid == storagerouters[2].guid, msg='Expected vdisk2 to be hosted by Storage Router 2') # Create from template without specifying Storage Router vdisk3 = VDisk( VDiskController.create_from_template( vdisk_guid=template.guid, name='from_template_3')['vdisk_guid']) self.assertTrue( expr=vdisk3.storagerouter_guid == template.storagerouter_guid, msg='Expected vdisk3 to be hosted by Storage Router 1')
def test_snapshot_all_vdisks(self): """ Tests GenericController.snapshot_all_vdisks functionality """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'vdisks': [ (1, 1, 1, 1), (2, 1, 1, 1) ], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)] } # (<id>, <vpool_id>, <storagerouter_id>) ) vdisk_1 = structure['vdisks'][1] vdisk_2 = structure['vdisks'][2] # Create automatic snapshot for both vDisks success, fail = GenericController.snapshot_all_vdisks() self.assertEqual(first=len(fail), second=0, msg='Expected 0 failed snapshots') self.assertEqual(first=len(success), second=2, msg='Expected 2 successful snapshots') self.assertEqual(first=len(vdisk_1.snapshot_ids), second=1, msg='Expected 1 snapshot ID for vDisk {0}'.format( vdisk_1.name)) self.assertEqual(first=len(vdisk_2.snapshot_ids), second=1, msg='Expected 1 snapshot ID for vDisk {0}'.format( vdisk_2.name)) self.assertEqual(first=len(vdisk_1.snapshots), second=1, msg='Expected 1 snapshot for vDisk {0}'.format( vdisk_1.name)) self.assertEqual(first=len(vdisk_2.snapshots), second=1, msg='Expected 1 snapshot for vDisk {0}'.format( vdisk_2.name)) # Ensure automatic snapshot fails for vdisk_1 and succeeds for vdisk_2 vdisk_1.storagedriver_client._set_snapshot_in_backend( volume_id=vdisk_1.volume_id, snapshot_id=vdisk_1.snapshots[0]['guid'], in_backend=False) success, fail = GenericController.snapshot_all_vdisks() self.assertEqual(first=len(fail), second=1, msg='Expected 1 failed snapshot') self.assertEqual(first=fail[0], second=vdisk_1.guid, msg='Expected vDisk {0} to have failed'.format( vdisk_1.name)) self.assertEqual(first=len(success), second=1, msg='Expected 1 successful snapshot') self.assertEqual(first=success[0], second=vdisk_2.guid, msg='Expected vDisk {0} to have succeeded'.format( vdisk_2.name)) self.assertEqual(first=len(vdisk_1.snapshot_ids), second=1, msg='Expected 1 snapshot ID for vDisk {0}'.format( vdisk_1.name)) self.assertEqual(first=len(vdisk_2.snapshot_ids), second=2, msg='Expected 2 snapshot IDs for vDisk {0}'.format( vdisk_2.name)) self.assertEqual(first=len(vdisk_1.snapshots), second=1, msg='Expected 1 snapshot for vDisk {0}'.format( vdisk_1.name)) self.assertEqual(first=len(vdisk_2.snapshots), second=2, msg='Expected 2 snapshots for vDisk {0}'.format( vdisk_2.name))
def test_multi_node_with_recovery_domains(self): """ Test DTL checkup on a multi node setup and create some Domains and link them to the several Storage Routers """ # Add a recovery Domain to the Storage Router serving the vDisk --> DTL should be random # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | domain 1 | | # | sr 2 | | | | | # | sr 3 | | | | | # | sr 4 | | | | | # | sr 5 | | | | | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'domains': [1, 2, 3], 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1, 2, 3, 4, 5], 'storagerouter_domains': [(1, 1, 1, True)], # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>) 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] domains = structure['domains'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:]]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Add the recovery domain as regular Domain of the same Storage Router --> nothing should change # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | domain 1 | | # | sr 2 | | domain 1 | | 1 | # | sr 3 | | | | | # | sr 4 | | | | | # | sr 5 | | | | | sr_domain = StorageRouterDomain() sr_domain.backup = False sr_domain.domain = domains[1] sr_domain.storagerouter = storagerouters[2] sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Add the recovery domain as regular Domain to the other Storage Routers --> nothing should change # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | domain 1 | | # | sr 2 | | domain 1 | | 1 | # | sr 3 | | domain 1 | | | # | sr 4 | | domain 1 | | | # | sr 5 | | domain 1 | | | for storagerouter in storagerouters.values()[2:]: sr_domain = StorageRouterDomain() sr_domain.backup = False sr_domain.domain = domains[1] sr_domain.storagerouter = storagerouter sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Remove the domain from the Storage Router which is used as DTL target # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | domain 1 | | # | sr 2 | | | | | # | sr 3 | | domain 1 | | 1 | # | sr 4 | | domain 1 | | 1 | # | sr 5 | | domain 1 | | 1 | for junction in storagerouters[2].domains: junction.delete() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[2:]]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Add regular domain to the Storage Router serving the vDisk and some other Storage Routers --> recovery Domain should still get priority # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | domain 2 | domain 1 | | # | sr 2 | | domain 2 | | | # | sr 3 | | domain 2 | | | # | sr 4 | | domain 1, domain 2 | | 1 | # | sr 5 | | domain 2 | | | for junction in storagerouters[3].domains: junction.delete() for junction in storagerouters[5].domains: junction.delete() for storagerouter in storagerouters.values(): sr_domain = StorageRouterDomain() sr_domain.backup = False sr_domain.domain = domains[2] sr_domain.storagerouter = storagerouter sr_domain.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[4].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
def test_happypath(self): """ Validates the happy path; Hourly snapshots are taken with a few manual consistent every now and then. The delete policy is executed every day """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'vdisks': [ (1, 1, 1, 1) ], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)] } # (<id>, <vpool_id>, <storagerouter_id>) ) vdisk_1 = structure['vdisks'][1] [ dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots' ][0].timeout = 0 # Run the testing scenario travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' if travis is True: self._print_message('Running in Travis, reducing output.') base = datetime.datetime.now().date() minute = 60 hour = minute * 60 consistent_hours = [6, 12, 18] inconsistent_hours = xrange(2, 23) for day in xrange(0, 35): base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) self._print_message('') self._print_message('Day cycle: {0}: {1}'.format( day, datetime.datetime.fromtimestamp(base_timestamp).strftime( '%Y-%m-%d'))) # At the start of the day, delete snapshot policy runs at 00:30 self._print_message('- Deleting snapshots') GenericController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) # Validate snapshots self._print_message('- Validating snapshots') self._validate(vdisk=vdisk_1, current_day=day, base_date=base, sticky_hours=[], consistent_hours=consistent_hours, inconsistent_hours=inconsistent_hours) # During the day, snapshots are taken # - Create non consistent snapshot every hour, between 2:00 and 22:00 # - Create consistent snapshot at 6:30, 12:30, 18:30 self._print_message('- Creating snapshots') for h in inconsistent_hours: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata={ 'label': 'ss_i_{0}:00'.format( str(h)), 'is_consistent': False, 'timestamp': str(timestamp) }) if h in consistent_hours: ts = (timestamp + (minute * 30)) VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata={ 'label': 'ss_c_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(ts) })
def test_different_snapshot_flags(self): """ Tests the GenericController.delete_snapshots() call, but with different snapshot flags Scenario 1: is_automatic: True, is_consistent: True --> Automatically created consistent snapshots should be deleted Scenario 2: is_automatic: True, is_consistent: False --> Automatically created non-consistent snapshots should be deleted Scenario 3: is_automatic: False, is_consistent: True --> Manually created consistent snapshots should be deleted Scenario 4: is_automatic: False, is_consistent: False --> Manually created non-consistent snapshots should be deleted Scenario 5: is_sticky: True --> Sticky snapshots of any kind should never be deleted (Only possible to delete manually) """ minute = 60 hour = minute * 60 for scenario in range(5): structure = DalHelper.build_dal_structure({ 'vpools': [1], 'vdisks': [ (1, 1, 1, 1) ], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)] } # (<id>, <vpool_id>, <storagerouter_id>) ) base = datetime.datetime.now().date() vdisk_1 = structure['vdisks'][1] is_sticky = False sticky_hours = [] if scenario % 2 == 0: label = 'c' additional_time = minute * 30 consistent_hours = [2] inconsistent_hours = [] else: label = 'i' additional_time = 0 consistent_hours = [] inconsistent_hours = [2] if scenario == 4: is_sticky = True sticky_hours = consistent_hours for day in xrange(35): base_timestamp = self._make_timestamp( base, datetime.timedelta(1) * day) self._print_message('') self._print_message('Day cycle: {0}: {1}'.format( day, datetime.datetime.fromtimestamp(base_timestamp).strftime( '%Y-%m-%d'))) self._print_message('- Deleting snapshots') GenericController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) self._validate(vdisk=vdisk_1, current_day=day, base_date=base, sticky_hours=sticky_hours, consistent_hours=consistent_hours, inconsistent_hours=inconsistent_hours) self._print_message('- Creating snapshots') for x in consistent_hours + inconsistent_hours: timestamp = base_timestamp + (hour * x) + additional_time VDiskController.create_snapshot( vdisk_guid=vdisk_1.guid, metadata={ 'label': 'ss_{0}_{1}:00'.format(label, x), 'is_sticky': is_sticky, 'timestamp': str(timestamp), 'is_automatic': scenario in [0, 1], 'is_consistent': len(consistent_hours) > 0 }) self.persistent._clean() self.volatile._clean()
def test_clone_snapshot(self): """ Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted """ # Setup # There are 2 disks, second one cloned from a snapshot of the first structure = DalHelper.build_dal_structure({ 'vpools': [1], 'vdisks': [ (1, 1, 1, 1), (2, 1, 1, 1) ], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)] } # (<id>, <vpool_id>, <storagerouter_id>) ) vdisk_1 = structure['vdisks'][1] [ dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots' ][0].timeout = 0 base = datetime.datetime.now().date() base_timestamp = self._make_timestamp(base, datetime.timedelta(1)) minute = 60 hour = minute * 60 for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata={ 'label': 'snapshot_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(timestamp) }) structure = DalHelper.build_dal_structure( structure={'vdisks': [(2, 1, 1, 1)]}, previous_structure=structure) clone_vdisk = structure['vdisks'][2] base_snapshot_guid = vdisk_1.snapshot_ids[0] # Oldest clone_vdisk.parentsnapshot = base_snapshot_guid clone_vdisk.save() for day in range(10): base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) for h in [6, 12, 18]: timestamp = base_timestamp + (hour * h) VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid, metadata={ 'label': 'snapshot_{0}:30'.format( str(h)), 'is_consistent': True, 'timestamp': str(timestamp) }) base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * 2) GenericController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) self.assertIn( base_snapshot_guid, vdisk_1.snapshot_ids, 'Snapshot was deleted while there are still clones of it')
def test_manually_overruled_dtl(self): """ The DTL target of a vDisk can be manually overruled by the customer """ # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target || # | sr 1 | 1 | | domain 1 | | # | sr 2 | | domain 1 | | | # | sr 3 | | domain 1 | | | # | sr 4 | | domain 2 | | | # | sr 5 | | | | | structure = DalHelper.build_dal_structure( {'vpools': [1], 'vdisks': [(1, 1, 1, 1)], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'domains': [1, 2], 'mds_services': [(1, 1)], # (<id>, <storagedriver_id>) 'storagerouters': [1, 2, 3, 4, 5], 'storagerouter_domains': [(1, 1, 1, True), (2, 2, 1, False), (3, 3, 1, False), (4, 4, 2, False)], # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>) 'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]} # (<id>, <vpool_id>, <sr_id>) ) vpool = structure['vpools'][1] vdisk = structure['vdisks'][1] domains = structure['domains'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:3]]}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}]) # Set DTL manually to node 2 and add 2 vdisk domains to the vdisk vdisk.storagedriver_client.set_manual_dtl_config(volume_id=vdisk.volume_id, config=DTLConfig(str(storagerouters[2].storagedrivers[0].storage_ip), 3, DTLMode.SYNCHRONOUS)) vdomain1 = VDiskDomain() vdomain2 = VDiskDomain() vdomain1.vdisk = vdisk vdomain2.vdisk = vdisk vdomain1.domain = domains[1] vdomain2.domain = domains[2] vdomain1.save() vdomain2.save() vdisk.has_manual_dtl = True vdisk.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.SYNCHRONOUS}]) # Delete the vDiskDomain on which the DTL resides, 1 other vDiskDomain remains, no changes should be made, but OVS_WARNING should be logged vdomain1.delete() LogHandler._logs = {} self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.SYNCHRONOUS}]) warning_logs = [] for log in LogHandler._logs['lib_vdisk']: if 'OVS_WARNING' in log and 'manual DTL configuration is no longer' in log and vdisk.guid in log: warning_logs.append(log) self.assertEqual(first=1, second=len(warning_logs)) # Delete the last vDiskDomain --> DTL should not be changed vdomain2.delete() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip}, {'key': 'port', 'value': 3}, {'key': 'mode', 'value': DTLMode.SYNCHRONOUS}]) vdisk.discard() self.assertTrue(expr=vdisk.has_manual_dtl) # Overrules the DTL manually to None and validate DTL checkup leaves it as it is vdisk.storagedriver_client.set_manual_dtl_config(volume_id=vdisk.volume_id, config=None) vdisk.has_manual_dtl = True vdisk.save() self._run_and_validate_dtl_checkup(vdisk=vdisk, validations=[{'key': 'config', 'value': None}])
def test_snapshot_sticky(self): """ is_sticky: True --> Sticky snapshots of any kind should never be deleted (Only possible to delete manually) """ minute = 60 hour = minute * 60 structure = DalHelper.build_dal_structure({ 'vpools': [1], 'vdisks': [ (1, 1, 1, 1) ], # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>) 'mds_services': [(1, 1)], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)] } # (<id>, <vpool_id>, <storagerouter_id>) ) base = datetime.datetime.now().date() vdisk_1 = structure['vdisks'][1] storagedriver_1 = structure['storagedrivers'][1] label = 'c' # Extra time to add to the hourly timestamps additional_time = minute * 30 # Hours to create a snapshot on sticky_hours = [2] consistent_hours = [2] inconsistent_hours = [] # Snapshot details is_sticky = len(sticky_hours) > 0 is_consistent = len(consistent_hours) > 0 is_automatic = False for day in xrange(35): base_timestamp = self._make_timestamp(base, datetime.timedelta(1) * day) self._print_message('') self._print_message('Day cycle: {0}: {1}'.format( day, datetime.datetime.fromtimestamp(base_timestamp).strftime( '%Y-%m-%d'))) self._print_message('- Deleting snapshots') GenericController.delete_snapshots_storagedriver( storagedriver_guid=storagedriver_1.guid, timestamp=base_timestamp + (minute * 30)) self._validate(vdisk=vdisk_1, current_day=day, base_date=base, sticky_hours=sticky_hours, consistent_hours=consistent_hours, inconsistent_hours=inconsistent_hours) self._print_message('- Creating snapshots') for x in consistent_hours + inconsistent_hours: timestamp = base_timestamp + (hour * x) + additional_time VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata={ 'label': 'ss_{0}_{1}:00'.format( label, x), 'is_sticky': is_sticky, 'timestamp': str(timestamp), 'is_automatic': is_automatic, 'is_consistent': is_consistent })