def check_dtl(result_handler): """ Checks the dtl for all vdisks on the local node :param result_handler: logging object :type result_handler: ovs.extensions.healthcheck.result.HCResults :return: None :rtype: NoneType """ # Fetch vdisks hosted on this machine local_sr = System.get_my_storagerouter() if len(local_sr.vdisks_guids) == 0: return result_handler.skip('No VDisks present in cluster.') for vdisk_guid in local_sr.vdisks_guids: vdisk = VDisk(vdisk_guid) vdisk.invalidate_dynamics(['dtl_status', 'info']) if vdisk.dtl_status == 'ok_standalone' or vdisk.dtl_status == 'disabled': result_handler.success('VDisk {0}s DTL is disabled'.format(vdisk.name), code=ErrorCodes.volume_dtl_standalone) elif vdisk.dtl_status == 'ok_sync': result_handler.success('VDisk {0}s DTL is enabled and running.'.format(vdisk.name), code=ErrorCodes.volume_dtl_ok) elif vdisk.dtl_status == 'degraded': result_handler.warning('VDisk {0}s DTL is degraded.'.format(vdisk.name), code=ErrorCodes.volume_dtl_degraded) elif vdisk.dtl_status == 'checkup_required': result_handler.warning('VDisk {0}s DTL should be configured.'.format(vdisk.name), code=ErrorCodes.volume_dtl_checkup_required) elif vdisk.dtl_status == 'catch_up': result_handler.warning('VDisk {0}s DTL is enabled but still syncing.'.format(vdisk.name), code=ErrorCodes.volume_dtl_catch_up) else: result_handler.warning('VDisk {0}s DTL has an unknown status: {1}.'.format(vdisk.name, vdisk.dtl_status), code=ErrorCodes.volume_dtl_unknown)
def _create_vdisks_for_mds_service(self, amount, start_id, mds_service=None, vpool=None): """ Generates vdisks and appends them to a given mds_service """ vdisks = {} for i in xrange(start_id, start_id + amount): disk = VDisk() disk.name = str(i) disk.devicename = 'disk_{0}'.format(i) disk.volume_id = 'disk_{0}'.format(i) disk.vpool = mds_service.vpool if mds_service is not None else vpool disk.size = 0 disk.save() disk.reload_client() if mds_service is not None: storagedriver_id = None for sd in mds_service.vpool.storagedrivers: if sd.storagerouter_guid == mds_service.service.storagerouter_guid: storagedriver_id = sd.storagedriver_id junction = MDSServiceVDisk() junction.vdisk = disk junction.mds_service = mds_service junction.is_master = True junction.save() config = type('MDSNodeConfig', (), {'address': self._generate_nc_function(True, mds_service), 'port': self._generate_nc_function(False, mds_service)})() mds_backend_config = type('MDSMetaDataBackendConfig', (), {'node_configs': self._generate_bc_function([config])})() StorageDriverClient.metadata_backend_config['disk_{0}'.format(i)] = mds_backend_config StorageDriverClient.catch_up['disk_{0}'.format(i)] = 50 StorageDriverClient.vrouter_id['disk_{0}'.format(i)] = storagedriver_id vdisks[i] = disk return vdisks
def test_set_as_template(self): """ Test the set as template functionality - Create a vDisk - Set it as template and make some assertions """ structure = Helper.build_service_structure( {'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)]} # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[1].guid)) metadata = {'is_consistent': True, 'is_automatic': True, 'is_sticky': False} for x in range(5): metadata['label'] = 'label{0}'.format(x) metadata['timestamp'] = int(time.time()) VDiskController.create_snapshot(vdisk_guid=vdisk.guid, metadata=metadata) self.assertTrue(expr=len(vdisk.snapshots) == 5, msg='Expected to find 5 snapshots') # Set as template and validate the model self.assertFalse(expr=vdisk.is_vtemplate, msg='Dynamic property "is_vtemplate" should be False') VDiskController.set_as_template(vdisk.guid) vdisk.invalidate_dynamics('snapshots') self.assertTrue(expr=vdisk.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True') self.assertTrue(expr=len(vdisk.snapshots) == 1, msg='Expected to find only 1 snapshot after converting to template') # Try again and verify job succeeds, previously we raised error when setting as template an additional time VDiskController.set_as_template(vdisk.guid) self.assertTrue(expr=vdisk.is_vtemplate, msg='Dynamic property "is_vtemplate" should still be True')
def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. Called on "cinder create --snapshot-id ..." :param snapshot: snapshot reference (sqlalchemy Model) :param volume: volume reference (sqlalchemy Model) Volume here is just a ModelObject, it doesn't exist physically, it will be created by OVS. Diskguid to be passed to the clone method is the ovs diskguid of the parent of the snapshot with snapshot.id OVS: Clone from arbitrary volume, requires volumedriver 3.6 release > 15.08.2014 """ _debug_vol_info('CLONE_VOL', volume) _debug_vol_info('CLONE_SNAP', snapshot) mountpoint = self._get_hostname_mountpoint(str(volume.host)) ovs_snap_disk = self._find_ovs_model_disk_by_snapshot_id(snapshot.id) OVSVolumeDriver._wait_for_snapshot(ovs_snap_disk, snapshot.id) devicename = volume.display_name if not devicename: devicename = volume.name pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname( str(volume.host)) LOG.info('[CLONE FROM SNAP] %s %s %s %s' % (ovs_snap_disk.guid, snapshot.id, devicename, pmachineguid)) try: kwargs = dict(diskguid = ovs_snap_disk.guid, snapshotid = snapshot.id, devicename = devicename, pmachineguid = pmachineguid, machinename = "", machineguid=None) LOG.debug('[CLONE FROM SNAP] Executing clone - async') # Execute "clone" task async, using celery workers # wait for the result for 30 minutes then raise TimeoutError disk_meta = VDiskController.clone.apply_async(kwargs = kwargs)\ .get(timeout = 1800) LOG.debug('[CLONE FROM SNAP] Executing clone - async - DONE') volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta)) LOG.debug('[CLONE FROM SNAP] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = devicename vdisk.save() except Exception as ex: LOG.error('CLONE FROM SNAP: Internal error %s ' % str(ex)) self.delete_volume(volume) raise return {'provider_location': volume['provider_location'], 'display_name': volume['display_name']}
def test_delete(self): """ Test the delete of a vDisk - Create 2 vDisks with identical names on 2 different vPools - Delete 1st vDisk and verify other still remains on correct vPool - Delete 2nd vDisk and verify no more volumes left """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'domains': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) domains = structure['domains'] storagedrivers = structure['storagedrivers'] vdisk1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[2].guid)) vdisk_domain = VDiskDomain() vdisk_domain.domain = domains[1] vdisk_domain.vdisk = vdisk1 vdisk_domain.save() # Delete vDisk1 and make some assertions VDiskController.delete(vdisk_guid=vdisk1.guid) with self.assertRaises(ObjectNotFoundException): VDisk(vdisk1.guid) self.assertEqual( first=len(VDiskController.list_volumes()), second=1, msg='Expected to find only 1 volume in Storage Driver list_volumes' ) self.assertIn(member=vdisk2, container=VDiskList.get_vdisks(), msg='vDisk2 should still be modeled') # Delete vDisk2 and make some assertions VDiskController.delete(vdisk_guid=vdisk2.guid) with self.assertRaises(ObjectNotFoundException): VDisk(vdisk2.guid) self.assertEqual( first=len(VDiskController.list_volumes()), second=0, msg= 'Expected to find no more volumes in Storage Driver list_volumes')
def _statistics(self, dynamic): """ Aggregates the Statistics (IOPS, Bandwidth, ...) of the vDisks connected to the Storage Driver. """ from ovs.dal.hybrids.vdisk import VDisk statistics = {} for key, value in self.fetch_statistics().iteritems(): statistics[key] = value statistics['timestamp'] = time.time() VDisk.calculate_delta(self._key, dynamic, statistics) return statistics
def _execute_scrub_work(scrub_location, vdisk_guids): def _verify_mds_config(current_vdisk): current_vdisk.invalidate_dynamics(['info']) vdisk_configs = current_vdisk.info['metadata_backend_config'] if len(vdisk_configs) == 0: raise RuntimeError('Could not load MDS configuration') return vdisk_configs ScheduledTaskController._logger.info('Execute Scrub - Started') ScheduledTaskController._logger.info('Execute Scrub - Scrub location - {0}'.format(scrub_location)) total = len(vdisk_guids) skipped = 0 storagedrivers = {} failures = [] for vdisk_guid in vdisk_guids: vdisk = VDisk(vdisk_guid) try: # Load the vDisk's StorageDriver ScheduledTaskController._logger.info('Execute Scrub - Virtual disk {0} - {1} - Started'.format(vdisk.guid, vdisk.name)) vdisk.invalidate_dynamics(['storagedriver_id']) if vdisk.storagedriver_id not in storagedrivers: storagedrivers[vdisk.storagedriver_id] = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id) storagedriver = storagedrivers[vdisk.storagedriver_id] # Load the vDisk's MDS configuration configs = _verify_mds_config(current_vdisk=vdisk) # Check MDS master is local. Trigger MDS handover if necessary if configs[0].get('ip') != storagedriver.storagerouter.ip: ScheduledTaskController._logger.debug('Execute Scrub - Virtual disk {0} - {1} - MDS master is not local, trigger handover'.format(vdisk.guid, vdisk.name)) MDSServiceController.ensure_safety(vdisk) configs = _verify_mds_config(current_vdisk=vdisk) if configs[0].get('ip') != storagedriver.storagerouter.ip: skipped += 1 ScheduledTaskController._logger.info('Execute Scrub - Virtual disk {0} - {1} - Skipping because master MDS still not local'.format(vdisk.guid, vdisk.name)) continue with vdisk.storagedriver_client.make_locked_client(str(vdisk.volume_id)) as locked_client: ScheduledTaskController._logger.info('Execute Scrub - Virtual disk {0} - {1} - Retrieve and apply scrub work'.format(vdisk.guid, vdisk.name)) work_units = locked_client.get_scrubbing_workunits() for work_unit in work_units: scrubbing_result = locked_client.scrub(work_unit, scrub_location, log_sinks=[SCRUBBER_LOGFILE_LOCATION]) locked_client.apply_scrubbing_result(scrubbing_result) if work_units: ScheduledTaskController._logger.info('Execute Scrub - Virtual disk {0} - {1} - Scrub successfully applied'.format(vdisk.guid, vdisk.name)) else: ScheduledTaskController._logger.info('Execute Scrub - Virtual disk {0} - {1} - No scrubbing required'.format(vdisk.guid, vdisk.name)) except Exception as ex: failures.append('Failed scrubbing work unit for volume {0} with guid {1}: {2}'.format(vdisk.name, vdisk.guid, ex)) failed = len(failures) ScheduledTaskController._logger.info('Execute Scrub - Finished - Success: {0} - Failed: {1} - Skipped: {2}'.format((total - failed - skipped), failed, skipped)) if failed > 0: raise Exception('\n - '.join(failures)) return vdisk_guids
def rollback(diskguid, timestamp): """ Rolls back a disk based on a given disk snapshot timestamp """ disk = VDisk(diskguid) snapshots = [snap for snap in disk.snapshots if snap['timestamp'] == timestamp] if not snapshots: raise ValueError('No snapshot found for timestamp {}'.format(timestamp)) snapshotguid = snapshots[0]['guid'] disk.storagedriver_client.rollback_volume(str(disk.volume_id), snapshotguid) disk.invalidate_dynamics(['snapshots']) return True
def _execute_scrub_work(scrub_location, vdisk_guids): def verify_mds_config(current_vdisk): current_vdisk.invalidate_dynamics(["info"]) vdisk_configs = current_vdisk.info["metadata_backend_config"] if len(vdisk_configs) == 0: raise RuntimeError("Could not load MDS configuration") return vdisk_configs logger.info("Scrub location: {0}".format(scrub_location)) total = len(vdisk_guids) skipped = 0 storagedrivers = {} failures = [] for vdisk_guid in vdisk_guids: vdisk = VDisk(vdisk_guid) try: # Load the vDisk's StorageDriver logger.info("Scrubbing virtual disk {0} with guid {1}".format(vdisk.name, vdisk.guid)) vdisk.invalidate_dynamics(["storagedriver_id"]) if vdisk.storagedriver_id not in storagedrivers: storagedrivers[vdisk.storagedriver_id] = StorageDriverList.get_by_storagedriver_id( vdisk.storagedriver_id ) storagedriver = storagedrivers[vdisk.storagedriver_id] # Load the vDisk's MDS configuration configs = verify_mds_config(current_vdisk=vdisk) # Check MDS master is local. Trigger MDS handover if necessary if configs[0].get("ip") != storagedriver.storagerouter.ip: logger.debug("MDS for volume {0} is not local. Trigger handover".format(vdisk.volume_id)) MDSServiceController.ensure_safety(vdisk) configs = verify_mds_config(current_vdisk=vdisk) if configs[0].get("ip") != storagedriver.storagerouter.ip: skipped += 1 logger.info( "Skipping scrubbing work unit for volume {0}: MDS master is not local".format( vdisk.volume_id ) ) continue with vdisk.storagedriver_client.make_locked_client(str(vdisk.volume_id)) as locked_client: work_units = locked_client.get_scrubbing_workunits() for work_unit in work_units: scrubbing_result = locked_client.scrub(work_unit, scrub_location) locked_client.apply_scrubbing_result(scrubbing_result) if work_units: logger.info("Scrubbing successfully applied") except Exception, ex: failures.append( "Failed scrubbing work unit for volume {0} with guid {1}: {2}".format(vdisk.name, vdisk.guid, ex) )
def set_as_template(diskguid): """ Set a disk as template @param diskguid: guid of the disk """ disk = VDisk(diskguid) if disk.is_vtemplate is True: raise RuntimeError('Disk {0} is already set as template'.format(disk.name)) logger.info('Setting disk {0} as template'.format(disk.name)) disk.storagedriver_client.set_volume_as_template(str(disk.volume_id)) disk.is_vtemplate = True disk.save()
def _statistics(self, dynamic): """ Aggregates the Statistics (IOPS, Bandwidth, ...) of the vDisks connected to the Storage Driver. """ from ovs.dal.hybrids.vdisk import VDisk statistics = {} for key in StorageDriverClient.STAT_KEYS: statistics[key] = 0 statistics['{0}_ps'.format(key)] = 0 for key, value in self.fetch_statistics().iteritems(): statistics[key] += value statistics['timestamp'] = time.time() VDisk.calculate_delta(self._key, dynamic, statistics) return statistics
def resize_from_voldrv(volumename, volumesize, volumepath, storagedriver_id): """ Resize a disk Triggered by volumedriver messages on the queue @param volumepath: path on hypervisor to the volume @param volumename: volume id of the disk @param volumesize: size of the volume """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id( storagedriver_id) hypervisor = Factory.get(pmachine) volumepath = hypervisor.clean_backing_disk_filename(volumepath) mutex = VolatileMutex('{}_{}'.format(volumename, volumepath)) try: mutex.acquire(wait=30) disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk is None: disk = VDiskList.get_by_devicename_and_vpool( volumepath, storagedriver.vpool) if disk is None: disk = VDisk() finally: mutex.release() disk.devicename = volumepath disk.volume_id = volumename disk.size = volumesize disk.vpool = storagedriver.vpool disk.save()
def _statistics(self, dynamic): """ Aggregates the Statistics (IOPS, Bandwidth, ...) of each vDisk served by the vPool. """ from ovs.dal.hybrids.vdisk import VDisk statistics = {} for key in StorageDriverClient.stat_keys: statistics[key] = 0 statistics['{0}_ps'.format(key)] = 0 for vdisk in self.vdisks: for key, value in vdisk.fetch_statistics().iteritems(): statistics[key] += value statistics['timestamp'] = time.time() VDisk.calculate_delta(self._key, dynamic, statistics) return statistics
def delete_snapshot(diskguid, snapshotid): """ Delete a disk snapshot @param diskguid: guid of the disk @param snapshotid: id of the snapshot @todo: Check if new volumedriver storagedriver upon deletion of a snapshot has built-in protection to block it from being deleted if a clone was created from it. """ disk = VDisk(diskguid) logger.info('Deleting snapshot {} from disk {}'.format(snapshotid, disk.name)) disk.storagedriver_client.delete_snapshot(str(disk.volume_id), str(snapshotid)) disk.invalidate_dynamics(['snapshots'])
def _statistics(self, dynamic): """ Aggregates the Statistics (IOPS, Bandwidth, ...) of each vDisk of the vMachine. """ from ovs.dal.hybrids.vdisk import VDisk statistics = {} for key in StorageDriverClient.STAT_KEYS: statistics[key] = 0 statistics['{0}_ps'.format(key)] = 0 for vdisk in self.vdisks: for key, value in vdisk.fetch_statistics().iteritems(): statistics[key] += value statistics['timestamp'] = time.time() VDisk.calculate_delta(self._key, dynamic, statistics) return statistics
def create_snapshot(diskguid, metadata, snapshotid=None): """ Create a disk snapshot @param diskguid: guid of the disk @param metadata: dict of metadata """ disk = VDisk(diskguid) logger.info("Create snapshot for disk {}".format(disk.name)) if snapshotid is None: snapshotid = str(uuid.uuid4()) metadata = pickle.dumps(metadata) disk.storagedriver_client.create_snapshot(str(disk.volume_id), snapshot_id=snapshotid, metadata=metadata) disk.invalidate_dynamics(["snapshots"]) return snapshotid
def _make_clones(vdisks_map, depth=clone_depth): for level in range(depth): previous_vd = list(vdisks_map.itervalues())[-1] new_name = previous_vd.name + '_clone' new_guid = VDiskController.clone(previous_vd.guid, new_name).get('vdisk_guid') vdisks_map[new_name] = VDisk(new_guid)
def test_delete_snapshot(self): """ Test the delete snapshot functionality - Create a vDisk and take a snapshot - Attempt to delete a non-existing snapshot """ structure = DalHelper.build_dal_structure( {'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)]} # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata={'timestamp': int(time.time()), 'label': 'label1', 'is_consistent': True, 'is_automatic': True, 'is_sticky': False}) self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find 1 snapshot') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find 1 snapshot ID') with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id='non-existing') VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=vdisk1.snapshot_ids[0]) self.assertTrue(expr=len(vdisk1.snapshots) == 0, msg='Expected to find no more snapshots') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 0, msg='Expected to find no more snapshot IDs')
def ensure_safety(vdisk_guid, excluded_storagerouter_guids=None, **kwargs): """ Ensures (or tries to ensure) the safety of a given vDisk. Assumptions: * A local overloaded master is better than a non-local non-overloaded master * Prefer master/slaves to be on different hosts, a subsequent slave on the same node doesn't add safety * Don't actively overload services (e.g. configure an MDS as slave causing it to get overloaded) * Too much safety is not wanted (it adds loads to nodes while not required) * Order of slaves is: * All slaves on StorageRouters in primary Domain of vDisk host * All slaves on StorageRouters in secondary Domain of vDisk host * Eg: Safety of 2 (1 master + 1 slave) mds config = [local master in primary, slave in secondary] * Eg: Safety of 3 (1 master + 2 slaves) mds config = [local master in primary, slave in primary, slave in secondary] * Eg: Safety of 4 (1 master + 3 slaves) mds config = [local master in primary, slave in primary, slave in secondary, slave in secondary] :param vdisk_guid: vDisk GUID to calculate a new safety for :type vdisk_guid: str :param excluded_storagerouter_guids: GUIDs of StorageRouters to leave out of calculation (Eg: When 1 is down or unavailable) :type excluded_storagerouter_guids: list[str] :raises RuntimeError: If host of vDisk is part of the excluded StorageRouters If host of vDisk is not part of the StorageRouters in the primary domain If catchup command fails for a slave If MDS client cannot be created for any of the current or new MDS services If updateMetadataBackendConfig would fail for whatever reason :raises SRCObjectNotFoundException: If vDisk does not have a StorageRouter GUID :return: None :rtype: NoneType """ vdisk = VDisk(vdisk_guid) return MDSServiceController._ensure_safety_vpool( vdisk.vpool_guid, vdisk.guid, excluded_storagerouter_guids, **kwargs)
def _find_ovs_model_disk_by_location(self, location, hostname, retry=3, timeout=3): """Find OVS disk object based on location and hostname :return VDisk: OVS DAL model object """ hostname = self._get_real_hostname(hostname) LOG.debug('[_FIND OVS DISK] Location %s, hostname %s' % (location, hostname)) attempt = 0 while attempt <= retry: for vd in VDiskList.get_vdisks(): if vd.vpool: for vsr in vd.vpool.storagedrivers: if vsr.storagerouter.name == hostname: _location = "{0}/{1}".format( vsr.mountpoint, vd.devicename) if _location == location: LOG.info('Location %s Disk found %s' % (location, vd.guid)) disk = VDisk(vd.guid) return disk msg = ' NO RESULT Attempt %s timeout %s max attempts %s' LOG.debug(msg % (attempt, timeout, retry)) if timeout: time.sleep(timeout) attempt += 1 raise RuntimeError('No disk found for location %s' % location)
def test_event_migrate_from_volumedriver(self): """ Test migrate from volumedriver event """ _ = self structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1, 2], 'storagedrivers': [(1, 1, 1), (2, 1, 2)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpool = structure['vpools'][1] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters) vdisk = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) vdisk.storagedriver_client.migrate(vdisk.volume_id, storagedrivers[2].storagedriver_id, False) VDiskController.migrate_from_voldrv( volume_id=vdisk.volume_id, new_owner_id=storagedrivers[2].storagedriver_id) self.assertEqual(vdisk.storagedriver_id, storagedrivers[2].storagedriver_id)
def rollback(diskguid, timestamp): """ Rolls back a disk based on a given disk snapshot timestamp """ disk = VDisk(diskguid) snapshots = [ snap for snap in disk.snapshots if snap['timestamp'] == timestamp ] if not snapshots: raise ValueError( 'No snapshot found for timestamp {}'.format(timestamp)) snapshotguid = snapshots[0]['guid'] disk.storagedriver_client.rollback_volume(str(disk.volume_id), snapshotguid) disk.invalidate_dynamics(['snapshots']) return True
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriver # Import required modules/classes after mocking is done from ovs.dal.hybrids.vmachine import VMachine from ovs.dal.hybrids.vdisk import VDisk from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.lib.vmachine import VMachineController from ovs.lib.vdisk import VDiskController from ovs.lib.scheduledtask import ScheduledTaskController # Globalize mocked classes global VDisk global VMachine global VolatileMutex global VMachineController global VDiskController global ScheduledTaskController _ = VDisk(), VolatileMutex('dummy'), VMachine( ), VMachineController, VDiskController, ScheduledTaskController # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def __init__(self, vdisk_guid, excluded_storagerouter_guids=None): """ :param vdisk_guid: vDisk GUID to calculate a new safety for :type vdisk_guid: str :param excluded_storagerouter_guids: GUIDs of StorageRouters to leave out of calculation (Eg: When 1 is down or unavailable) :type excluded_storagerouter_guids: list[str] """ if excluded_storagerouter_guids is None: excluded_storagerouter_guids = [] self.vdisk = VDisk(vdisk_guid) self.excluded_storagerouters = [ StorageRouter(sr_guid) for sr_guid in excluded_storagerouter_guids ] self.sr_client_timeout = Configuration.get( 'ovs/vpools/{0}/mds_config|sr_client_connection_timeout'.format( self.vdisk.vpool_guid), default=300) self.mds_client_timeout = Configuration.get( 'ovs/vpools/{0}/mds_config|mds_client_connection_timeout'.format( self.vdisk.vpool_guid), default=120) self.tlogs, self.safety, self.max_load = self.get_mds_config() # Filled in by functions self.metadata_backend_config_start = {} # Layout related self.mds_layout = { 'primary': { 'used': [], 'loads': {}, 'available': [] }, 'secondary': { 'used': [], 'loads': {}, 'available': [] } } self.services_load = {} self.recommended_primary = None self.recommended_secondary = None self.master_service = None self.slave_services = [] self.mds_client_cache = {}
def set_as_template(diskguid): """ Set a disk as template @param diskguid: guid of the disk """ disk = VDisk(diskguid) disk.storagedriver_client.set_volume_as_template(str(disk.volume_id))
def delete_snapshot(diskguid, snapshotid): """ Delete a disk snapshot @param diskguid: guid of the disk @param snapshotguid: guid of the snapshot @todo: Check if new volumedriver storagedriver upon deletion of a snapshot has built-in protection to block it from being deleted if a clone was created from it. """ disk = VDisk(diskguid) logger.info('Deleting snapshot {} from disk {}'.format( snapshotid, disk.name)) disk.storagedriver_client.delete_snapshot(str(disk.volume_id), str(snapshotid)) disk.invalidate_dynamics(['snapshots'])
def create_snapshot(diskguid, metadata, snapshotid=None): """ Create a disk snapshot @param diskguid: guid of the disk @param metadata: dict of metadata """ disk = VDisk(diskguid) logger.info('Create snapshot for disk {}'.format(disk.name)) if snapshotid is None: snapshotid = str(uuid.uuid4()) metadata = pickle.dumps(metadata) disk.storagedriver_client.create_snapshot(str(disk.volume_id), snapshot_id=snapshotid, metadata=metadata) disk.invalidate_dynamics(['snapshots']) return snapshotid
def _statistics(self, dynamic): """ Aggregates the Statistics (IOPS, Bandwidth, ...) of each vDisk of the vMachine. """ from ovs.dal.hybrids.vdisk import VDisk statistics = {} for key in StorageDriverClient.STAT_KEYS: statistics[key] = 0 statistics['{0}_ps'.format(key)] = 0 for storagedriver in self.storagedrivers: for vdisk in storagedriver.vpool.vdisks: if vdisk.storagedriver_id == storagedriver.storagedriver_id: for key, value in vdisk.fetch_statistics().iteritems(): statistics[key] += value statistics['timestamp'] = time.time() VDisk.calculate_delta(self._key, dynamic, statistics) return statistics
def resize_from_voldrv(volumename, volumesize, volumepath, storagedriver_id): """ Resize a disk Triggered by volumedriver messages on the queue @param volumepath: path on hypervisor to the volume @param volumename: volume id of the disk @param volumesize: size of the volume """ pmachine = PMachineList.get_by_storagedriver_id(storagedriver_id) storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) hypervisor = Factory.get(pmachine) volumepath = hypervisor.clean_backing_disk_filename(volumepath) mutex = VolatileMutex('{}_{}'.format(volumename, volumepath)) try: mutex.acquire(wait=30) disk = VDiskList.get_vdisk_by_volume_id(volumename) if disk is None: disk = VDiskList.get_by_devicename_and_vpool(volumepath, storagedriver.vpool) if disk is None: disk = VDisk() finally: mutex.release() disk.devicename = volumepath disk.volume_id = volumename disk.size = volumesize disk.vpool = storagedriver.vpool disk.save() VDiskController.sync_with_mgmtcenter(disk, pmachine, storagedriver) MDSServiceController.ensure_safety(disk)
def get_vdisk_by_guid(vdisk_guid): """ Fetch vdisk object by vdisk guid :param vdisk_guid: guid of a existing vdisk :type vdisk_guid: str :return: a vdisk object :rtype: ovs.dal.hybrids.vdisk.VDisk """ return VDisk(vdisk_guid)
def create_vdisks_for_mds_service(amount, start_id, mds_service=None, storagedriver=None): """ Generates vdisks and appends them to a given mds_service """ if (mds_service is None and storagedriver is None) or (mds_service is not None and storagedriver is not None): raise RuntimeError("Either `mds_service` or `storagedriver` should be passed") vdisks = {} storagedriver_id = None vpool = None mds_services = [] if mds_service is not None: mds_services.append(mds_service) for sd in mds_service.vpool.storagedrivers: if sd.storagerouter_guid == mds_service.service.storagerouter_guid: storagedriver_id = sd.storagedriver_id vpool = sd.vpool if storagedriver_id is None: raise RuntimeError("The given MDSService is located on a node without StorageDriver") else: storagedriver_id = storagedriver.storagedriver_id vpool = storagedriver.vpool srclient = StorageRouterClient(vpool.guid, None) for i in xrange(start_id, start_id + amount): devicename = "vdisk_{0}".format(i) mds_backend_config = Helper._generate_mdsmetadatabackendconfig(mds_services) volume_id = srclient.create_volume(devicename, mds_backend_config, 0, str(storagedriver_id)) if len(mds_services) == 1: MDSClient.set_catchup(mds_services[0], volume_id, 50) vdisk = VDisk() vdisk.name = str(i) vdisk.devicename = devicename vdisk.volume_id = volume_id vdisk.vpool = vpool vdisk.size = 0 vdisk.save() vdisk.reload_client("storagedriver") if mds_service is not None: junction = MDSServiceVDisk() junction.vdisk = vdisk junction.mds_service = mds_service junction.is_master = True junction.save() vdisks[i] = vdisk return vdisks
def test_sync_vdisk_with_voldrv(self): clone_depth = 3 def _make_clones(vdisks_map, depth=clone_depth): for level in range(depth): previous_vd = list(vdisks_map.itervalues())[-1] new_name = previous_vd.name + '_clone' new_guid = VDiskController.clone(previous_vd.guid, new_name).get('vdisk_guid') vdisks_map[new_name] = VDisk(new_guid) structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) vdisk_name = 'vdisk_1' storagedriver = structure['storagedrivers'][1] vdisk_1 = VDisk( VDiskController.create_new(volume_name=vdisk_name, volume_size=1024**4, storagedriver_guid=storagedriver.guid)) vdisks = OrderedDict() vdisks[vdisk_name] = vdisk_1 _make_clones(vdisks) self.assertEquals(clone_depth + 1, len(list(VDiskList.get_vdisks()))) delete_list = list(vdisks.itervalues( ))[::-1][:-1] # These vDisks are clones and ought to be deleted for vdisk in delete_list: for mds_service in vdisk.mds_services: mds_service.delete() vdisk.delete() self.assertEquals(1, len(list(VDiskList.get_vdisks())) ) # Make sure vDisk clones are properly removed self.assertEquals( VDiskList.get_vdisks()[0].name, vdisk_name) # Make sure only item left is original vDisk VDiskController.sync_with_reality() self.assertEquals(clone_depth + 1, len(list( VDiskList.get_vdisks()))) # The clones should be in place now parents = 0 for vdisk in VDiskList.get_vdisks(): try: if vdisk.parent_vdisk.name: parents += 1 except AttributeError: pass self.assertEquals( clone_depth, parents ) # As much parents should be detected as the depth of the clones
def delete_snapshot(diskguid, snapshotid): """ Delete a disk snapshot @param diskguid: guid of the disk @param snapshotid: ID of the snapshot @todo: Check if new volumedriver storagedriver upon deletion of a snapshot has built-in protection to block it from being deleted if a clone was created from it. """ disk = VDisk(diskguid) if snapshotid not in [snap['guid'] for snap in disk.snapshots]: raise RuntimeError('Snapshot {0} does not belong to disk {1}'.format(snapshotid, disk.name)) clones_of_snapshot = VDiskList.get_by_parentsnapshot(snapshotid) if len(clones_of_snapshot) > 0: raise RuntimeError('Snapshot {0} has {1} volumes cloned from it, cannot remove'.format(snapshotid, len(clones_of_snapshot))) logger.info('Deleting snapshot {0} from disk {1}'.format(snapshotid, disk.name)) disk.storagedriver_client.delete_snapshot(str(disk.volume_id), str(snapshotid)) disk.invalidate_dynamics(['snapshots'])
def create_snapshot(diskguid, metadata, snapshotid=None): """ Create a disk snapshot :param diskguid: guid of the disk :param metadata: dict of metadata :param snapshotid: ID of the snapshot """ if not isinstance(metadata, dict): raise ValueError('Expected metadata as dict, got {0} instead'.format(type(metadata))) disk = VDisk(diskguid) logger.info('Create snapshot for disk {0}'.format(disk.name)) if snapshotid is None: snapshotid = str(uuid.uuid4()) metadata = pickle.dumps(metadata) disk.storagedriver_client.create_snapshot(str(disk.volume_id), snapshot_id=snapshotid, metadata=metadata) disk.invalidate_dynamics(['snapshots']) return snapshotid
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.service import Service from ovs.dal.hybrids.vpool import VPool from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.j_mdsservice import MDSService from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.lib.mdsservice import MDSServiceController # Globalize mocked classes global VDisk global VPool global Service global StorageRouter global StorageDriver global BackendType global PMachine global MDSService global ServiceType global MDSServiceVDisk global VolatileMutex global MDSServiceController _ = VDisk(), VPool(), Service(), MDSService(), MDSServiceVDisk(), ServiceType(), \ StorageRouter(), StorageDriver(), BackendType(), PMachine(), \ VolatileMutex('dummy'), MDSServiceController # Configuration def _get(key): c = PersistentFactory.get_client() if c.exists(key): return c.get(key) return None Configuration.get = staticmethod(_get) # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. Called on "cinder create --snapshot-id ..." :param snapshot: snapshot reference (sqlalchemy Model) :param volume: volume reference (sqlalchemy Model) Volume here is just a ModelObject, it doesn't exist physically, it will be created by OVS. Diskguid to be passed to the clone method is the ovs diskguid of the parent of the snapshot with snapshot.id OVS: Clone from arbitrary volume, requires volumedriver 3.6 release > 15.08.2014 """ _debug_vol_info('CLONE_VOL', volume) _debug_vol_info('CLONE_SNAP', snapshot) mountpoint = self._get_hostname_mountpoint(str(volume.host)) ovs_snap_disk = self._find_ovs_model_disk_by_snapshot_id(snapshot.id) OVSVolumeDriver._wait_for_snapshot(ovs_snap_disk, snapshot.id) devicename = volume.display_name if not devicename: devicename = volume.name pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname( str(volume.host)) LOG.info('[CLONE FROM SNAP] %s %s %s %s' % (ovs_snap_disk.guid, snapshot.id, devicename, pmachineguid)) try: kwargs = dict(diskguid=ovs_snap_disk.guid, snapshotid=snapshot.id, devicename=devicename, pmachineguid=pmachineguid, machinename="", machineguid=None) LOG.debug('[CLONE FROM SNAP] Executing clone - async') # Execute "clone" task async, using celery workers # wait for the result for 30 minutes then raise TimeoutError disk_meta = VDiskController.clone.apply_async(kwargs = kwargs)\ .get(timeout = 1800) LOG.debug('[CLONE FROM SNAP] Executing clone - async - DONE') volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta)) LOG.debug('[CLONE FROM SNAP] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = devicename vdisk.save() except Exception as ex: LOG.error('CLONE FROM SNAP: Internal error %s ' % str(ex)) self.delete_volume(volume) raise return { 'provider_location': volume['provider_location'], 'display_name': volume['display_name'] }
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule sys.modules['ovs.extensions.hypervisor.hypervisors.kvm'] = KVMModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.j_mdsservice import MDSService from ovs.dal.hybrids.j_mdsservicevdisk import MDSServiceVDisk from ovs.lib.vdisk import VDiskController from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.vmachine import VMachine from ovs.dal.hybrids.vpool import VPool from ovs.dal.hybrids.storagedriver import StorageDriver from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.failuredomain import FailureDomain from ovs.dal.hybrids.service import Service from ovs.dal.hybrids.servicetype import ServiceType from ovs.dal.lists.vdisklist import VDiskList from ovs.lib.mdsservice import MDSServiceController # Globalize mocked classes global VDisk global VDiskController global PMachine global VMachine global BackendType global VPool global StorageDriver global StorageRouter global FailureDomain global MDSService global MDSServiceVDisk global Service global ServiceType global VDiskList global MDSServiceController _ = VDisk(), PMachine(), VMachine(), VDiskController, VPool(), BackendType(), StorageDriver(), StorageRouter(), \ FailureDomain(), MDSService(), MDSServiceVDisk(), Service(), ServiceType(), VDiskList, MDSServiceController # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def _statistics(self, dynamic): """ Aggregates the Statistics (IOPS, Bandwidth, ...) of each vDisk. """ from ovs.dal.hybrids.vdisk import VDisk statistics = {} for storagedriver in self.storagedrivers: for key, value in storagedriver.fetch_statistics().iteritems(): if isinstance(value, dict): if key not in statistics: statistics[key] = {} for subkey, subvalue in value.iteritems(): if subkey not in statistics[key]: statistics[key][subkey] = 0 statistics[key][subkey] += subvalue else: if key not in statistics: statistics[key] = 0 statistics[key] += value statistics['timestamp'] = time.time() VDisk.calculate_delta(self._key, dynamic, statistics) return statistics
def clean_bad_disk(vdiskguid): """ Cleanup bad vdisk: - in case create_from_template failed - remove mds_services so the vdisk can be properly cleaned up :param vdiskguid: guid of vdisk :return: None """ vdisk = VDisk(vdiskguid) logger.info('Cleanup vdisk {0}'.format(vdisk.name)) for mdss in vdisk.mds_services: mdss.delete() storagedriver = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id) if storagedriver is not None and vdisk.devicename is not None: logger.debug('Removing volume from filesystem') volumepath = vdisk.devicename mountpoint = storagedriver.mountpoint devicepath = '{0}/{1}'.format(mountpoint, volumepath) VDiskController.delete_volume(devicepath) logger.debug('Deleting vdisk {0} from model'.format(vdisk.name)) vdisk.delete()
def test_create_snapshot(self): """ Test the create snapshot functionality - Create a vDisk - Attempt to create a snapshot providing incorrect parameters - Create a snapshot and make some assertions """ structure = DalHelper.build_dal_structure( {'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)]} # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(ValueError): # noinspection PyTypeChecker VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata='') now = int(time.time()) snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata={'timestamp': now, 'label': 'label1', 'is_consistent': True, 'is_automatic': True, 'is_sticky': False}) self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find 1 snapshot') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find 1 snapshot ID') snapshot = vdisk1.snapshots[0] expected_keys = {'guid', 'timestamp', 'label', 'is_consistent', 'is_automatic', 'is_sticky', 'in_backend', 'stored'} self.assertEqual(first=expected_keys, second=set(snapshot.keys()), msg='Set of expected keys differs from reality. Expected: {0} - Reality: {1}'.format(expected_keys, set(snapshot.keys()))) for key, value in {'guid': snapshot_id, 'label': 'label1', 'stored': 0, 'is_sticky': False, 'timestamp': now, 'in_backend': True, 'is_automatic': True, 'is_consistent': True}.iteritems(): self.assertEqual(first=value, second=snapshot[key], msg='Value for key "{0}" does not match reality. Expected: {1} - Reality: {2}'.format(key, value, snapshot[key]))
def setUpClass(cls): """ Sets up the unittest, mocking a certain set of 3rd party libraries and extensions. This makes sure the unittests can be executed without those libraries installed """ # Load dummy stores PersistentFactory.store = DummyPersistentStore() VolatileFactory.store = DummyVolatileStore() # Replace mocked classes sys.modules[ 'ovs.extensions.storageserver.storagedriver'] = StorageDriverModule # Import required modules/classes after mocking is done from ovs.dal.hybrids.backendtype import BackendType from ovs.dal.hybrids.disk import Disk from ovs.dal.hybrids.diskpartition import DiskPartition from ovs.dal.hybrids.failuredomain import FailureDomain from ovs.dal.hybrids.pmachine import PMachine from ovs.dal.hybrids.storagerouter import StorageRouter from ovs.dal.hybrids.vdisk import VDisk from ovs.dal.hybrids.vmachine import VMachine from ovs.dal.hybrids.vpool import VPool from ovs.extensions.generic.volatilemutex import VolatileMutex from ovs.lib.vmachine import VMachineController from ovs.lib.vdisk import VDiskController from ovs.lib.scheduledtask import ScheduledTaskController # Globalize mocked classes global Disk global VDisk global VMachine global PMachine global VPool global BackendType global DiskPartition global FailureDomain global StorageRouter global VolatileMutex global VMachineController global VDiskController global ScheduledTaskController _ = VDisk(), VolatileMutex('dummy'), VMachine(), PMachine(), VPool(), BackendType(), FailureDomain(), \ VMachineController, VDiskController, ScheduledTaskController, StorageRouter(), Disk(), DiskPartition() # Cleaning storage VolatileFactory.store.clean() PersistentFactory.store.clean()
def fetch_statistics(self): """ Loads statistics from this vDisk - returns unprocessed data """ # Load data from volumedriver sdstats = StorageDriverClient.EMPTY_STATISTICS() if self.storagedriver_id and self.vpool: try: sdstats = self.vpool.storagedriver_client.statistics_node( str(self.storagedriver_id), req_timeout_secs=2) except Exception as ex: StorageDriver._logger.error( 'Error loading statistics_node from {0}: {1}'.format( self.storagedriver_id, ex)) # Load volumedriver data in dictionary return VDisk.extract_statistics( sdstats, None if len(self.vpool.vdisks) == 0 else self.vpool.vdisks[0])
def list(self, vdisk_guid=None): """ Lists all available Domains :param vdisk_guid: Optional vDisk GUID. If passed in, only domains suitable for this vDisk will be returned :type vdisk_guid: str """ domains = set() if vdisk_guid is not None: vdisk = VDisk(vdisk_guid) possible_storagerouter_guids = set(sd.storagerouter_guid for sd in vdisk.vpool.storagedrivers) storagerouter_guid = vdisk.storagerouter_guid for domain in DomainList.get_domains(): domain_sr_guids = domain.storage_router_layout['regular'] if storagerouter_guid in domain_sr_guids: domain_sr_guids.remove(storagerouter_guid) if len(domain_sr_guids) > 0 and not set(domain_sr_guids).isdisjoint(possible_storagerouter_guids): domains.add(domain) return list(domains) return DomainList.get_domains()
def generate_overview(self): # type: () -> dict """ Generate the move overview depending on the current state :return: The overview from where the disks are coming from :rtype: dict """ added_source_overview = {} for vdisk_guid in self.added: storagedriver_id = VDisk(vdisk_guid).storagedriver_id if storagedriver_id not in added_source_overview: added_source_overview[storagedriver_id] = [] added_source_overview[storagedriver_id].append(vdisk_guid) overview = { 'added': self.added, 'balance': self.balance, 'overflow': self.overflow, 'add_source_overview': added_source_overview } return overview
def __init__(self, vdisk_guid): # type: (str) -> None """ Initializes a new MDSCatchUp An instance populates some caches. These cached are cleared once the instance is garbage collected. When running MDSCatchup in bulk: add them to a list to speed up the process :param vdisk_guid: Guid of the vDisk to catch up for :type vdisk_guid: str """ self.id = str(uuid.uuid4()) self.vdisk = VDisk(vdisk_guid) self.mds_key = self._CATCH_UP_VDISK_KEY.format(self.vdisk.guid) self.tlog_threshold = Configuration.get( 'ovs/volumedriver/mds|tlogs_behind', default=100) self.volumedriver_service_name = 'ovs-volumedriver_{0}'.format( self.vdisk.vpool.name) self.mds_client_timeout = Configuration.get( 'ovs/vpools/{0}/mds_config|mds_client_connection_timeout'.format( self.vdisk.vpool_guid), default=120) self.mds_clients = {} self.dry_run = False self.catch_up_threads = [] self.errors = [] self._service_manager = ServiceFactory.get_manager() self._persistent = PersistentFactory.get_client() self._log = 'MDS catchup {0} - vDisk {1} (volume id: {2})'.format( self.id, self.vdisk.guid, self.vdisk.volume_id) self._clients = self.build_clients() self._volumedriver_contexts = self.get_volumedriver_contexts() self._worker_contexts = self.get_worker_contexts() self._worker_context = self._worker_contexts[ System.get_my_storagerouter()] self._relevant_contexts = self._get_all_relevant_contexts( ) # All possible contexts (by mixing volumedriver ones with workers)
def print_balances(cls, balances): # type: (List[VDiskBalance]) -> None """ Prints out balances :return: None :rtype: NoneType """ balances_by_vpool = {} for balance in balances: # type: VDiskBalance vpool = balance.storagedriver.vpool if vpool not in balances_by_vpool: balances_by_vpool[vpool] = [] balances_by_vpool[vpool].append(balance) for vpool, vpool_balances in balances_by_vpool.viewitems(): print('Balance for VPool {0}'.format(vpool.name)) for balance in vpool_balances: # type: VDiskBalance storagerouter = balance.storagedriver.storagerouter print( ' Storagerouter {0}, vdisks now: {1}, vdisks afterwards {2}, added {3}' .format(storagerouter.name, len(balance.hosted_guids), len(balance.balance), len(balance.added))) if balance.added: added_source_overview = {} for vdisk_guid in balance.added: current_storagerouter = StorageRouter( VDisk(vdisk_guid).storagerouter_guid) if current_storagerouter not in added_source_overview: added_source_overview[current_storagerouter] = [] added_source_overview[current_storagerouter].append( vdisk_guid) print(' Vdisks added from:') for current_storagerouter, moved_vdisk_guids in added_source_overview.iteritems( ): print(' StorageRouter {0}: {1}'.format( current_storagerouter.name, len(moved_vdisk_guids)))
def clone(diskguid, snapshotid, devicename, pmachineguid, machinename, machineguid=None, **kwargs): """ Clone a disk @param location: location where virtual device should be created (eg: myVM) @param devicename: device file name for the disk (eg: mydisk-flat.vmdk) @param parentdiskguid: guid of the disk @param snapshotid: guid of the snapshot @param machineguid: guid of the machine to assign disk to """ _ = kwargs pmachine = PMachine(pmachineguid) hypervisor = Factory.get(pmachine) description = '{} {}'.format(machinename, devicename) properties_to_clone = ['description', 'size', 'type', 'retentionpolicyguid', 'snapshotpolicyguid', 'autobackup'] new_disk = VDisk() disk = VDisk(diskguid) _log = 'Clone snapshot {} of disk {} to location {}' _location = hypervisor.get_backing_disk_path(machinename, devicename) _id = '{}'.format(disk.volume_id) _snap = '{}'.format(snapshotid) logger.info(_log.format(_snap, disk.name, _location)) volume_id = disk.storagedriver_client.create_clone(_location, _id, _snap) new_disk.copy(disk, include=properties_to_clone) new_disk.parent_vdisk = disk new_disk.name = '{}-clone'.format(disk.name) new_disk.description = description new_disk.volume_id = volume_id new_disk.devicename = hypervisor.clean_backing_disk_filename(_location) new_disk.parentsnapshot = snapshotid new_disk.vmachine = VMachine(machineguid) if machineguid else disk.vmachine new_disk.vpool = disk.vpool new_disk.save() return {'diskguid': new_disk.guid, 'name': new_disk.name, 'backingdevice': _location}
def create_cloned_volume(self, volume, src_vref): """Create a cloned volume from another volume. Called on "cinder create --source-volid ... " :param volume: volume reference - target volume (sqlalchemy Model) :param src_vref: volume reference - source volume (sqlalchemy Model) OVS: Create clone from template if the source is a template Create volume from snapshot if the source is a volume - create snapshot of source volume if it doesn't have snapshots """ _debug_vol_info('CREATE_CLONED_VOL', volume) _debug_vol_info('CREATE_CLONED_VOL Source', src_vref) mountpoint = self._get_hostname_mountpoint(str(volume.host)) name = volume.display_name if not name: name = volume.name volume.display_name = volume.name pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname( str(volume.host)) #source source_ovs_disk = self._find_ovs_model_disk_by_location( str(src_vref.provider_location), src_vref.host) if source_ovs_disk.info['object_type'] == 'TEMPLATE': LOG.info('[CREATE_FROM_TEMPLATE] VDisk %s is a template' % source_ovs_disk.devicename) # cloning from a template LOG.debug('[CREATE FROM TEMPLATE] ovs_disk %s ' % (source_ovs_disk.devicename)) disk_meta = VDiskController.create_from_template( diskguid = source_ovs_disk.guid, machinename = "", devicename = str(name), pmachineguid = pmachineguid, machineguid = None, storagedriver_guid = None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CREATE FROM TEMPLATE] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = name LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s' % (volume.id, name)) vdisk.save() else: LOG.info('[THIN CLONE] VDisk %s is not a template' % source_ovs_disk.devicename) # We do not support yet full volume clone # - requires "emancipate" functionality # So for now we'll take a snapshot # (or the latest snapshot existing) and clone from that snapshot available_snapshots = [snapshot for snapshot in source_ovs_disk.snapshots if 'in_backend' not in snapshot or snapshot['in_backend'] is True] if len(available_snapshots) == 0: metadata = {'label': "Cinder clone snapshot {0}".format(name), 'is_consistent': False, 'timestamp': time.time(), 'machineguid': source_ovs_disk.vmachine_guid, 'is_automatic': False} LOG.debug('CREATE_SNAP %s %s' % (name, str(metadata))) snapshotid = VDiskController.create_snapshot( diskguid = source_ovs_disk.guid, metadata = metadata, snapshotid = None) LOG.debug('CREATE_SNAP OK') OVSVolumeDriver._wait_for_snapshot(source_ovs_disk, snapshotid) else: snapshotid = available_snapshots[-1]['guid'] LOG.debug('[CREATE CLONE FROM SNAP] %s ' % snapshotid) disk_meta = VDiskController.clone(diskguid = source_ovs_disk.guid, snapshotid = snapshotid, devicename = str(name), pmachineguid = pmachineguid, machinename = "", machineguid=None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta)) LOG.debug('[CLONE FROM SNAP] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = name vdisk.save() return {'provider_location': volume['provider_location'], 'display_name': volume['display_name']}
def test_set_as_template(self): """ Test the set as template functionality - Create a vDisk - Set it as template and make some assertions """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk_1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) metadata = { 'is_consistent': True, 'is_automatic': True, 'is_sticky': False } for x in range(5): metadata['label'] = 'label{0}'.format(x) metadata['timestamp'] = int(time.time()) VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk_1.snapshots) == 5, msg='Expected to find 5 snapshots') self.assertTrue(expr=len(vdisk_1.snapshot_ids) == 5, msg='Expected to find 5 snapshot IDs') # Set as template and validate the model self.assertFalse(expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should be False') VDiskController.set_as_template(vdisk_guid=vdisk_1.guid) self.assertTrue(expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True') self.assertTrue( expr=len(vdisk_1.snapshots) == 1, msg='Expected to find only 1 snapshot after converting to template' ) self.assertTrue( expr=len(vdisk_1.snapshot_ids) == 1, msg= 'Expected to find only 1 snapshot ID after converting to template') # Try again and verify job succeeds, previously we raised error when setting as template an additional time VDiskController.set_as_template(vdisk_1.guid) self.assertTrue( expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should still be True') # Clone vDisk and verify both child and parent can no longer be set as template vdisk_2 = VDisk( VDiskController.create_new( volume_name='vdisk_2', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) vdisk_2_clone_1 = VDisk( VDiskController.clone(vdisk_guid=vdisk_2.guid, name='vdisk_2_clone_1')['vdisk_guid']) with self.assertRaises(RuntimeError): VDiskController.set_as_template(vdisk_guid=vdisk_2.guid) with self.assertRaises(RuntimeError): VDiskController.set_as_template(vdisk_guid=vdisk_2_clone_1.guid)
def test_happypath(self): """ Validates the happy path; Hourly snapshots are taken with a few manual consistent every now an then. The delete policy is executed every day """ # Setup # There are 2 machines; one with two disks, one with one disk and an additional disk failure_domain = FailureDomain() failure_domain.name = "Test" failure_domain.save() backend_type = BackendType() backend_type.name = "BackendType" backend_type.code = "BT" backend_type.save() vpool = VPool() vpool.name = "vpool" vpool.backend_type = backend_type vpool.save() pmachine = PMachine() pmachine.name = "PMachine" pmachine.username = "******" pmachine.ip = "127.0.0.1" pmachine.hvtype = "VMWARE" pmachine.save() storage_router = StorageRouter() storage_router.name = "storage_router" storage_router.ip = "127.0.0.1" storage_router.pmachine = pmachine storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.primary_failure_domain = failure_domain storage_router.save() disk = Disk() disk.name = "physical_disk_1" disk.path = "/dev/non-existent" disk.size = 500 * 1024 ** 3 disk.state = "OK" disk.is_ssd = True disk.storagerouter = storage_router disk.save() disk_partition = DiskPartition() disk_partition.id = "disk_partition_id" disk_partition.disk = disk disk_partition.path = "/dev/disk/non-existent" disk_partition.size = 400 * 1024 ** 3 disk_partition.state = "OK" disk_partition.offset = 1024 disk_partition.roles = [DiskPartition.ROLES.SCRUB] disk_partition.mountpoint = "/var/tmp" disk_partition.save() vmachine_1 = VMachine() vmachine_1.name = "vmachine_1" vmachine_1.devicename = "dummy" vmachine_1.pmachine = pmachine vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = "vdisk_1_1" vdisk_1_1.volume_id = "vdisk_1_1" vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = "dummy" vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() vdisk_1_2 = VDisk() vdisk_1_2.name = "vdisk_1_2" vdisk_1_2.volume_id = "vdisk_1_2" vdisk_1_2.vmachine = vmachine_1 vdisk_1_2.vpool = vpool vdisk_1_2.devicename = "dummy" vdisk_1_2.size = 0 vdisk_1_2.save() vdisk_1_2.reload_client() vmachine_2 = VMachine() vmachine_2.name = "vmachine_2" vmachine_2.devicename = "dummy" vmachine_2.pmachine = pmachine vmachine_2.save() vdisk_2_1 = VDisk() vdisk_2_1.name = "vdisk_2_1" vdisk_2_1.volume_id = "vdisk_2_1" vdisk_2_1.vmachine = vmachine_2 vdisk_2_1.vpool = vpool vdisk_2_1.devicename = "dummy" vdisk_2_1.size = 0 vdisk_2_1.save() vdisk_2_1.reload_client() vdisk_3 = VDisk() vdisk_3.name = "vdisk_3" vdisk_3.volume_id = "vdisk_3" vdisk_3.vpool = vpool vdisk_3.devicename = "dummy" vdisk_3.size = 0 vdisk_3.save() vdisk_3.reload_client() for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: [dynamic for dynamic in disk._dynamics if dynamic.name == "snapshots"][0].timeout = 0 # Run the testing scenario debug = True amount_of_days = 50 base = datetime.now().date() day = timedelta(1) minute = 60 hour = minute * 60 for d in xrange(0, amount_of_days): base_timestamp = DeleteSnapshots._make_timestamp(base, day * d) print "" print "Day cycle: {0}: {1}".format(d, datetime.fromtimestamp(base_timestamp).strftime("%Y-%m-%d")) # At the start of the day, delete snapshot policy runs at 00:30 print "- Deleting snapshots" ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) # Validate snapshots print "- Validating snapshots" for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: self._validate(vdisk, d, base, amount_of_days, debug) # During the day, snapshots are taken # - Create non consistent snapshot every hour, between 2:00 and 22:00 # - Create consistent snapshot at 6:30, 12:30, 18:30 print "- Creating snapshots" for h in xrange(2, 23): timestamp = base_timestamp + (hour * h) for vm in [vmachine_1, vmachine_2]: VMachineController.snapshot( machineguid=vm.guid, label="ss_i_{0}:00".format(str(h)), is_consistent=False, timestamp=timestamp, ) if h in [6, 12, 18]: ts = timestamp + (minute * 30) VMachineController.snapshot( machineguid=vm.guid, label="ss_c_{0}:30".format(str(h)), is_consistent=True, timestamp=ts ) VDiskController.create_snapshot( diskguid=vdisk_3.guid, metadata={ "label": "ss_i_{0}:00".format(str(h)), "is_consistent": False, "timestamp": str(timestamp), "machineguid": None, }, ) if h in [6, 12, 18]: ts = timestamp + (minute * 30) VDiskController.create_snapshot( diskguid=vdisk_3.guid, metadata={ "label": "ss_c_{0}:30".format(str(h)), "is_consistent": True, "timestamp": str(ts), "machineguid": None, }, )
def dtl_checkup(vpool_guid=None, vdisk_guid=None, storagerouters_to_exclude=None): """ Check DTL for all volumes :param vpool_guid: vPool to check the DTL configuration of all its disks :type vpool_guid: String :param vdisk_guid: Virtual Disk to check its DTL configuration :type vdisk_guid: String :param storagerouters_to_exclude: Storage Routers to exclude from possible targets :type storagerouters_to_exclude: List :return: None """ if vpool_guid is not None and vdisk_guid is not None: raise ValueError('vpool and vdisk are mutually exclusive') if storagerouters_to_exclude is None: storagerouters_to_exclude = [] from ovs.lib.vpool import VPoolController logger.info('DTL checkup started') required_params = {'dtl_mode': (str, StorageDriverClient.VPOOL_DTL_MODE_MAP.keys()), 'dtl_enabled': (bool, None)} vdisk = VDisk(vdisk_guid) if vdisk_guid else None vpool = VPool(vpool_guid) if vpool_guid else None errors_found = False root_client_map = {} vpool_dtl_config_cache = {} vdisks = VDiskList.get_vdisks() if vdisk is None and vpool is None else vpool.vdisks if vpool is not None else [vdisk] for vdisk in vdisks: logger.info(' Verifying vDisk {0} with guid {1}'.format(vdisk.name, vdisk.guid)) vdisk.invalidate_dynamics(['storagedriver_client', 'storagerouter_guid']) if vdisk.storagedriver_client is None: continue vpool = vdisk.vpool if vpool.guid not in vpool_dtl_config_cache: vpool_config = VPoolController.get_configuration(vpool.guid) # Config on vPool is permanent for DTL settings vpool_dtl_config_cache[vpool.guid] = vpool_config Toolbox.verify_required_params(required_params, vpool_config) volume_id = str(vdisk.volume_id) vpool_config = vpool_dtl_config_cache[vpool.guid] dtl_vpool_enabled = vpool_config['dtl_enabled'] try: current_dtl_config = vdisk.storagedriver_client.get_dtl_config(volume_id) current_dtl_config_mode = vdisk.storagedriver_client.get_dtl_config_mode(volume_id) except RuntimeError as rte: # Can occur when a volume has not been stolen yet from a dead node logger.error('Retrieving DTL configuration from storage driver failed with error: {0}'.format(rte)) errors_found = True continue if dtl_vpool_enabled is False and (current_dtl_config is None or current_dtl_config.host == 'null'): logger.info(' DTL is globally disabled for vPool {0} with guid {1}'.format(vpool.name, vpool.guid)) vdisk.storagedriver_client.set_manual_dtl_config(volume_id, None) continue elif current_dtl_config_mode == DTLConfigMode.MANUAL and (current_dtl_config is None or current_dtl_config.host == 'null'): logger.info(' DTL is disabled for virtual disk {0} with guid {1}'.format(vdisk.name, vdisk.guid)) continue storage_router = StorageRouter(vdisk.storagerouter_guid) available_storagerouters = [] # 1. Check available storage routers in the backup failure domain if storage_router.secondary_failure_domain is not None: for storagerouter in storage_router.secondary_failure_domain.primary_storagerouters: if vpool.guid not in storagerouter.vpools_guids: continue if storagerouter not in root_client_map: try: root_client = SSHClient(storagerouter, username='******') except UnableToConnectException: logger.warning(' Storage Router with IP {0} of vDisk {1} is not reachable'.format(storagerouter.ip, vdisk.name)) continue root_client_map[storagerouter] = root_client else: root_client = root_client_map[storagerouter] if ServiceManager.get_service_status('dtl_{0}'.format(vpool.name), client=root_client) is True: available_storagerouters.append(storagerouter) # 2. Check available storage routers in the same failure domain as current storage router if len(available_storagerouters) == 0: for storagerouter in storage_router.primary_failure_domain.primary_storagerouters: if vpool.guid not in storagerouter.vpools_guids or storagerouter == storage_router: continue if storagerouter not in root_client_map: try: root_client = SSHClient(storagerouter, username='******') except UnableToConnectException: logger.warning(' Storage Router with IP {0} of vDisk {1} is not reachable'.format(storagerouter.ip, vdisk.name)) continue root_client_map[storagerouter] = root_client else: root_client = root_client_map[storagerouter] if ServiceManager.get_service_status('dtl_{0}'.format(vpool.name), client=root_client) is True: available_storagerouters.append(storagerouter) # Remove storage routers to exclude for sr_guid in storagerouters_to_exclude: sr_to_exclude = StorageRouter(sr_guid) if sr_to_exclude in available_storagerouters: available_storagerouters.remove(sr_to_exclude) if len(available_storagerouters) == 0: logger.info(' No Storage Routers could be found as valid DTL target') vdisk.storagedriver_client.set_manual_dtl_config(volume_id, None) continue # Check whether reconfiguration is required reconfigure_required = False if current_dtl_config is None: logger.info(' No DTL configuration found, but there are Storage Routers available') reconfigure_required = True elif current_dtl_config_mode == DTLConfigMode.AUTOMATIC: logger.info(' DTL configuration set to AUTOMATIC, switching to manual') reconfigure_required = True else: dtl_host = current_dtl_config.host dtl_port = current_dtl_config.port storage_drivers = [sd for sd in vpool.storagedrivers if sd.storagerouter.ip == dtl_host] logger.info(' DTL host: {0}'.format(dtl_host or '-')) logger.info(' DTL port: {0}'.format(dtl_port or '-')) if dtl_host not in [sr.ip for sr in available_storagerouters]: logger.info(' Host not in available Storage Routers') reconfigure_required = True elif dtl_port != storage_drivers[0].ports[2]: logger.info(' Configured port does not match expected port ({0} vs {1})'.format(dtl_port, storage_drivers[0].ports[2])) reconfigure_required = True # Perform the reconfiguration if reconfigure_required is True: logger.info(' Reconfigure required') index = random.randint(0, len(available_storagerouters) - 1) dtl_target = available_storagerouters[index] storage_drivers = [sd for sd in vpool.storagedrivers if sd.storagerouter == dtl_target] if len(storage_drivers) == 0: raise ValueError('Could not retrieve related storagedriver') port = storage_drivers[0].ports[2] vpool_dtl_mode = vpool_config.get('dtl_mode', StorageDriverClient.FRAMEWORK_DTL_ASYNC) logger.info(' DTL config that will be set --> Host: {0}, Port: {1}, Mode: {2}'.format(dtl_target.ip, port, vpool_dtl_mode)) dtl_config = DTLConfig(str(dtl_target.ip), port, StorageDriverClient.VDISK_DTL_MODE_MAP[vpool_dtl_mode]) vdisk.storagedriver_client.set_manual_dtl_config(volume_id, dtl_config) if errors_found is True: logger.error('DTL checkup ended with errors') raise Exception('DTL checkup failed with errors. Please check /var/log/ovs/lib.log for more information') logger.info('DTL checkup ended')
def create_from_template(diskguid, devicename, pmachineguid, machinename='', machineguid=None): """ Create a disk from a template :param diskguid: Guid of the disk :param machinename: Name of the machine :param devicename: Device file name for the disk (eg: my_disk-flat.vmdk) :param pmachineguid: Guid of pmachine to create new vdisk on :param machineguid: Guid of the machine to assign disk to :return diskguid: Guid of new disk """ pmachine = PMachine(pmachineguid) hypervisor = Factory.get(pmachine) if machineguid is not None: new_vdisk_vmachine = VMachine(machineguid) machinename = new_vdisk_vmachine.name disk_path = hypervisor.get_disk_path(machinename, devicename) description = '{0} {1}'.format(machinename, devicename) properties_to_clone = [ 'description', 'size', 'type', 'retentionpolicyid', 'snapshotpolicyid', 'vmachine', 'vpool'] vdisk = VDisk(diskguid) if vdisk.vmachine and not vdisk.vmachine.is_vtemplate: # Disk might not be attached to a vmachine, but still be a template raise RuntimeError('The given vdisk does not belong to a template') if not vdisk.is_vtemplate: raise RuntimeError('The given vdisk is not a template') storagedriver = None for sd in vdisk.vpool.storagedrivers: if sd.storagerouter_guid in pmachine.storagerouters_guids: storagedriver = sd break if storagedriver is None: raise RuntimeError('Could not find Storage Driver') new_vdisk = VDisk() new_vdisk.copy(vdisk, include=properties_to_clone) new_vdisk.vpool = vdisk.vpool new_vdisk.devicename = hypervisor.clean_backing_disk_filename(disk_path) new_vdisk.parent_vdisk = vdisk new_vdisk.name = '{0}-clone'.format(vdisk.name) new_vdisk.description = description new_vdisk.vmachine = new_vdisk_vmachine if machineguid else vdisk.vmachine new_vdisk.save() mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, new_vdisk.vpool) if mds_service is None: raise RuntimeError('Could not find a MDS service') logger.info('Create disk from template {0} to new disk {1} to location {2}'.format(vdisk.name, new_vdisk.name, disk_path)) try: backend_config = MDSNodeConfig(address=str(mds_service.service.storagerouter.ip), port=mds_service.service.ports[0]) volume_id = vdisk.storagedriver_client.create_clone_from_template(target_path=disk_path, metadata_backend_config=MDSMetaDataBackendConfig([backend_config]), parent_volume_id=str(vdisk.volume_id), node_id=str(storagedriver.storagedriver_id)) new_vdisk.volume_id = volume_id new_vdisk.save() MDSServiceController.ensure_safety(new_vdisk) VDiskController.dtl_checkup.delay(vdisk_guid=new_vdisk.guid) except Exception as ex: logger.error('Clone disk on volumedriver level failed with exception: {0}'.format(str(ex))) try: VDiskController.clean_bad_disk(new_vdisk.guid) except Exception as ex2: logger.exception('Exception during exception handling of "create_clone_from_template" : {0}'.format(str(ex2))) raise ex return {'diskguid': new_vdisk.guid, 'name': new_vdisk.name, 'backingdevice': disk_path}
def clone(diskguid, snapshotid, devicename, pmachineguid, machinename, machineguid=None): """ Clone a disk """ pmachine = PMachine(pmachineguid) hypervisor = Factory.get(pmachine) description = '{} {}'.format(machinename, devicename) properties_to_clone = ['description', 'size', 'type', 'retentionpolicyguid', 'snapshotpolicyguid', 'autobackup'] vdisk = VDisk(diskguid) location = hypervisor.get_backing_disk_path(machinename, devicename) new_vdisk = VDisk() new_vdisk.copy(vdisk, include=properties_to_clone) new_vdisk.parent_vdisk = vdisk new_vdisk.name = '{0}-clone'.format(vdisk.name) new_vdisk.description = description new_vdisk.devicename = hypervisor.clean_backing_disk_filename(location) new_vdisk.parentsnapshot = snapshotid new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine new_vdisk.vpool = vdisk.vpool new_vdisk.save() try: storagedriver = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id) if storagedriver is None: raise RuntimeError('Could not find StorageDriver with id {0}'.format(vdisk.storagedriver_id)) mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool) if mds_service is None: raise RuntimeError('Could not find a MDS service') logger.info('Clone snapshot {} of disk {} to location {}'.format(snapshotid, vdisk.name, location)) volume_id = vdisk.storagedriver_client.create_clone( target_path=location, metadata_backend_config=MDSMetaDataBackendConfig([MDSNodeConfig(address=str(mds_service.service.storagerouter.ip), port=mds_service.service.ports[0])]), parent_volume_id=str(vdisk.volume_id), parent_snapshot_id=str(snapshotid), node_id=str(vdisk.storagedriver_id) ) except Exception as ex: logger.error('Caught exception during clone, trying to delete the volume. {0}'.format(ex)) new_vdisk.delete() VDiskController.delete_volume(location) raise new_vdisk.volume_id = volume_id new_vdisk.save() try: MDSServiceController.ensure_safety(new_vdisk) except Exception as ex: logger.error('Caught exception during "ensure_safety" {0}'.format(ex)) return {'diskguid': new_vdisk.guid, 'name': new_vdisk.name, 'backingdevice': location}
def clone(diskguid, snapshotid, devicename, pmachineguid, machinename=None, machineguid=None, detached=False): """ Clone a disk :param diskguid: Guid of the disk to clone :param snapshotid: ID of the snapshot to clone from :param devicename: Name of the device to use in clone's description :param pmachineguid: Guid of the physical machine :param machinename: Name of the machine the disk is attached to :param machineguid: Guid of the machine :param detached: Boolean indicating the disk is attached to a machine or not """ # 1. Validations name_regex = "^[0-9a-zA-Z][-_a-zA-Z0-9]{1,48}[a-zA-Z0-9]$" if not re.match(name_regex, devicename): raise RuntimeError("Invalid name for virtual disk clone") if VDiskList.get_vdisk_by_name(vdiskname=devicename) is not None: raise RuntimeError("A virtual disk with this name already exists") vdisk = VDisk(diskguid) storagedriver = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id) if storagedriver is None: raise RuntimeError('Could not find StorageDriver with ID {0}'.format(vdisk.storagedriver_id)) if machineguid is not None and detached is True: raise ValueError('A vMachine GUID was specified while detached is True') # 2. Create new snapshot if required if snapshotid is None: timestamp = str(int(time.time())) metadata = {'label': '', 'is_consistent': False, 'timestamp': timestamp, 'machineguid': machineguid, 'is_automatic': True} sd_snapshot_id = VDiskController.create_snapshot(diskguid, metadata) tries = 25 # 5 minutes while snapshotid is None and tries > 0: time.sleep(25 - tries) tries -= 1 vdisk.invalidate_dynamics(['snapshots']) for snapshot in vdisk.snapshots: if snapshot['guid'] != sd_snapshot_id: continue if snapshot['in_backend'] is True: snapshotid = snapshot['guid'] if snapshotid is None: try: VDiskController.delete_snapshot(diskguid=diskguid, snapshotid=sd_snapshot_id) except: pass raise RuntimeError('Could not find created snapshot in time') # 3. Model new cloned virtual disk hypervisor = Factory.get(PMachine(pmachineguid)) location = hypervisor.get_disk_path(machinename, devicename) new_vdisk = VDisk() new_vdisk.copy(vdisk, include=['description', 'size', 'type', 'retentionpolicyguid', 'snapshotpolicyguid', 'autobackup']) new_vdisk.parent_vdisk = vdisk new_vdisk.name = devicename new_vdisk.description = devicename if machinename is None else '{0} {1}'.format(machinename, devicename) new_vdisk.devicename = hypervisor.clean_backing_disk_filename(location) new_vdisk.parentsnapshot = snapshotid if detached is False: new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine new_vdisk.vpool = vdisk.vpool new_vdisk.save() # 4. Configure Storage Driver try: mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool) if mds_service is None: raise RuntimeError('Could not find a MDS service') logger.info('Clone snapshot {0} of disk {1} to location {2}'.format(snapshotid, vdisk.name, location)) backend_config = MDSMetaDataBackendConfig([MDSNodeConfig(address=str(mds_service.service.storagerouter.ip), port=mds_service.service.ports[0])]) volume_id = vdisk.storagedriver_client.create_clone(target_path=location, metadata_backend_config=backend_config, parent_volume_id=str(vdisk.volume_id), parent_snapshot_id=str(snapshotid), node_id=str(vdisk.storagedriver_id)) except Exception as ex: logger.error('Caught exception during clone, trying to delete the volume. {0}'.format(ex)) try: VDiskController.clean_bad_disk(new_vdisk.guid) except Exception as ex2: logger.exception('Exception during exception handling of "create_clone_from_template" : {0}'.format(str(ex2))) raise new_vdisk.volume_id = volume_id new_vdisk.save() # 5. Check MDS & DTL for new clone try: MDSServiceController.ensure_safety(new_vdisk) except Exception as ex: logger.error('Caught exception during "ensure_safety" {0}'.format(ex)) VDiskController.dtl_checkup.delay(vdisk_guid=new_vdisk.guid) return {'diskguid': new_vdisk.guid, 'name': new_vdisk.name, 'backingdevice': location}
def create_from_template(diskguid, machinename, devicename, pmachineguid, machineguid=None, storagedriver_guid=None): """ Create a disk from a template @param parentdiskguid: guid of the disk @param location: location where virtual device should be created (eg: myVM) @param devicename: device file name for the disk (eg: mydisk-flat.vmdk) @param machineguid: guid of the machine to assign disk to @return diskguid: guid of new disk """ pmachine = PMachine(pmachineguid) hypervisor = Factory.get(pmachine) disk_path = hypervisor.get_disk_path(machinename, devicename) description = '{} {}'.format(machinename, devicename) properties_to_clone = [ 'description', 'size', 'type', 'retentionpolicyid', 'snapshotpolicyid', 'vmachine', 'vpool'] disk = VDisk(diskguid) if disk.vmachine and not disk.vmachine.is_vtemplate: # Disk might not be attached to a vmachine, but still be a template raise RuntimeError('The given disk does not belong to a template') if storagedriver_guid is not None: storagedriver_id = StorageDriver(storagedriver_guid).storagedriver_id else: storagedriver_id = disk.storagedriver_id new_disk = VDisk() new_disk.copy(disk, include=properties_to_clone) new_disk.vpool = disk.vpool new_disk.devicename = hypervisor.clean_backing_disk_filename(disk_path) new_disk.parent_vdisk = disk new_disk.name = '{}-clone'.format(disk.name) new_disk.description = description new_disk.vmachine = VMachine(machineguid) if machineguid else disk.vmachine new_disk.save() logger.info('Create disk from template {} to new disk {} to location {}'.format( disk.name, new_disk.name, disk_path )) try: volume_id = disk.storagedriver_client.create_clone_from_template(disk_path, str(disk.volume_id), node_id=str(storagedriver_id)) new_disk.volume_id = volume_id new_disk.save() except Exception as ex: logger.error('Clone disk on volumedriver level failed with exception: {0}'.format(str(ex))) new_disk.delete() raise return {'diskguid': new_disk.guid, 'name': new_disk.name, 'backingdevice': disk_path}
def update_vmachine_config(vmachine, vm_object, pmachine=None): """ Update a vMachine configuration with a given vMachine configuration :param vmachine: Virtual Machine to update :param vm_object: New virtual machine info :param pmachine: Physical machine of the virtual machine """ try: vdisks_synced = 0 if vmachine.name is None: MessageController.fire(MessageController.Type.EVENT, {'type': 'vmachine_created', 'metadata': {'name': vm_object['name']}}) elif vmachine.name != vm_object['name']: MessageController.fire(MessageController.Type.EVENT, {'type': 'vmachine_renamed', 'metadata': {'old_name': vmachine.name, 'new_name': vm_object['name']}}) if pmachine is not None: vmachine.pmachine = pmachine vmachine.name = vm_object['name'] vmachine.hypervisor_id = vm_object['id'] vmachine.devicename = vm_object['backing']['filename'] vmachine.save() # Updating and linking disks storagedrivers = StorageDriverList.get_storagedrivers() datastores = dict([('{0}:{1}'.format(storagedriver.storage_ip, storagedriver.mountpoint), storagedriver) for storagedriver in storagedrivers]) vdisk_guids = [] mutex = volatile_mutex('{0}_{1}'.format(vmachine.name, vmachine.devicename)) for disk in vm_object['disks']: ensure_safety = False if disk['datastore'] in vm_object['datastores']: datastore = vm_object['datastores'][disk['datastore']] if datastore in datastores: try: mutex.acquire(wait=10) vdisk = VDiskList.get_by_devicename_and_vpool(disk['filename'], datastores[datastore].vpool) if vdisk is None: # The disk couldn't be located, but is in our datastore. We might be in a recovery scenario vdisk = VDisk() vdisk.vpool = datastores[datastore].vpool vdisk.reload_client() vdisk.devicename = disk['filename'] vdisk.volume_id = vdisk.storagedriver_client.get_volume_id(str(disk['backingfilename'])) vdisk.size = vdisk.info['volume_size'] vdisk.metadata = {'lba_size': vdisk.info['lba_size'], 'cluster_multiplier': vdisk.info['cluster_multiplier']} # Create the disk in a locked context, but don't execute long running-task in same context vdisk.save() ensure_safety = True finally: mutex.release() if ensure_safety: MDSServiceController.ensure_safety(vdisk) VDiskController.dtl_checkup(vdisk_guid=vdisk.guid) # Update the disk with information from the hypervisor if vdisk.vmachine is None: MessageController.fire(MessageController.Type.EVENT, {'type': 'vdisk_attached', 'metadata': {'vmachine_name': vmachine.name, 'vdisk_name': disk['name']}}) vdisk.vmachine = vmachine vdisk.name = disk['name'] vdisk.order = disk['order'] vdisk.save() vdisk_guids.append(vdisk.guid) vdisks_synced += 1 for vdisk in vmachine.vdisks: if vdisk.guid not in vdisk_guids: MessageController.fire(MessageController.Type.EVENT, {'type': 'vdisk_detached', 'metadata': {'vmachine_name': vmachine.name, 'vdisk_name': vdisk.name}}) vdisk.vmachine = None vdisk.save() VMachineController._logger.info('Updating vMachine finished (name {0}, {1} vdisks (re)linked)'.format( vmachine.name, vdisks_synced )) except Exception as ex: VMachineController._logger.info('Error during vMachine update: {0}'.format(str(ex))) raise
def test_happypath(self): """ Validates the happy path; Hourly snapshots are taken with a few manual consistents every now an then. The delelete policy is exectued every day """ # Setup # There are 2 machines; one with two disks, one with one disk and an additional disk backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() vpool = VPool() vpool.name = 'vpool' vpool.backend_type = backend_type vpool.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'VMWARE' pmachine.save() vmachine_1 = VMachine() vmachine_1.name = 'vmachine_1' vmachine_1.devicename = 'dummy' vmachine_1.pmachine = pmachine vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() vdisk_1_2 = VDisk() vdisk_1_2.name = 'vdisk_1_2' vdisk_1_2.volume_id = 'vdisk_1_2' vdisk_1_2.vmachine = vmachine_1 vdisk_1_2.vpool = vpool vdisk_1_2.devicename = 'dummy' vdisk_1_2.size = 0 vdisk_1_2.save() vdisk_1_2.reload_client() vmachine_2 = VMachine() vmachine_2.name = 'vmachine_2' vmachine_2.devicename = 'dummy' vmachine_2.pmachine = pmachine vmachine_2.save() vdisk_2_1 = VDisk() vdisk_2_1.name = 'vdisk_2_1' vdisk_2_1.volume_id = 'vdisk_2_1' vdisk_2_1.vmachine = vmachine_2 vdisk_2_1.vpool = vpool vdisk_2_1.devicename = 'dummy' vdisk_2_1.size = 0 vdisk_2_1.save() vdisk_2_1.reload_client() vdisk_3 = VDisk() vdisk_3.name = 'vdisk_3' vdisk_3.volume_id = 'vdisk_3' vdisk_3.vpool = vpool vdisk_3.devicename = 'dummy' vdisk_3.size = 0 vdisk_3.save() vdisk_3.reload_client() for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: [dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 # Run the testing scenario debug = True amount_of_days = 50 base = datetime.now().date() day = timedelta(1) minute = 60 hour = minute * 60 for d in xrange(0, amount_of_days): base_timestamp = DeleteSnapshots._make_timestamp(base, day * d) print '' print 'Day cycle: {}: {}'.format( d, datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d') ) # At the start of the day, delete snapshot policy runs at 00:30 print '- Deleting snapshots' ScheduledTaskController.deletescrubsnapshots(timestamp=base_timestamp + (minute * 30)) # Validate snapshots print '- Validating snapshots' for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: self._validate(vdisk, d, base, amount_of_days, debug) # During the day, snapshots are taken # - Create non consistent snapshot every hour, between 2:00 and 22:00 # - Create consistent snapshot at 6:30, 12:30, 18:30 print '- Creating snapshots' for h in xrange(2, 23): timestamp = base_timestamp + (hour * h) for vm in [vmachine_1, vmachine_2]: VMachineController.snapshot(machineguid=vm.guid, label='ss_i_{0}:00'.format(str(h)), is_consistent=False, timestamp=timestamp) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VMachineController.snapshot(machineguid=vm.guid, label='ss_c_{0}:30'.format(str(h)), is_consistent=True, timestamp=ts) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={'label': 'ss_i_{0}:00'.format(str(h)), 'is_consistent': False, 'timestamp': str(timestamp), 'machineguid': None}) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={'label': 'ss_c_{0}:30'.format(str(h)), 'is_consistent': True, 'timestamp': str(ts), 'machineguid': None})
def create_from_template(diskguid, machinename, devicename, pmachineguid, machineguid=None, storagedriver_guid=None): """ Create a disk from a template @param devicename: device file name for the disk (eg: mydisk-flat.vmdk) @param machineguid: guid of the machine to assign disk to @return diskguid: guid of new disk """ pmachine = PMachine(pmachineguid) hypervisor = Factory.get(pmachine) disk_path = hypervisor.get_disk_path(machinename, devicename) description = '{} {}'.format(machinename, devicename) properties_to_clone = [ 'description', 'size', 'type', 'retentionpolicyid', 'snapshotpolicyid', 'vmachine', 'vpool'] vdisk = VDisk(diskguid) if vdisk.vmachine and not vdisk.vmachine.is_vtemplate: # Disk might not be attached to a vmachine, but still be a template raise RuntimeError('The given vdisk does not belong to a template') if storagedriver_guid is not None: storagedriver_id = StorageDriver(storagedriver_guid).storagedriver_id else: storagedriver_id = vdisk.storagedriver_id storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id) if storagedriver is None: raise RuntimeError('Could not find StorageDriver with id {0}'.format(storagedriver_id)) new_vdisk = VDisk() new_vdisk.copy(vdisk, include=properties_to_clone) new_vdisk.vpool = vdisk.vpool new_vdisk.devicename = hypervisor.clean_backing_disk_filename(disk_path) new_vdisk.parent_vdisk = vdisk new_vdisk.name = '{}-clone'.format(vdisk.name) new_vdisk.description = description new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine new_vdisk.save() mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool) if mds_service is None: raise RuntimeError('Could not find a MDS service') logger.info('Create disk from template {} to new disk {} to location {}'.format( vdisk.name, new_vdisk.name, disk_path )) try: volume_id = vdisk.storagedriver_client.create_clone_from_template( target_path=disk_path, metadata_backend_config=MDSMetaDataBackendConfig([MDSNodeConfig(address=str(mds_service.service.storagerouter.ip), port=mds_service.service.ports[0])]), parent_volume_id=str(vdisk.volume_id), node_id=str(storagedriver_id) ) new_vdisk.volume_id = volume_id new_vdisk.save() MDSServiceController.ensure_safety(new_vdisk) except Exception as ex: logger.error('Clone disk on volumedriver level failed with exception: {0}'.format(str(ex))) new_vdisk.delete() raise return {'diskguid': new_vdisk.guid, 'name': new_vdisk.name, 'backingdevice': disk_path}
def test_happypath(self): """ Validates the happy path; Hourly snapshots are taken with a few manual consistent every now an then. The delete policy is executed every day """ # Setup # There are 2 machines; one with two disks, one with one disk and a stand-alone additional disk failure_domain = FailureDomain() failure_domain.name = 'Test' failure_domain.save() backend_type = BackendType() backend_type.name = 'BackendType' backend_type.code = 'BT' backend_type.save() vpool = VPool() vpool.name = 'vpool' vpool.status = 'RUNNING' vpool.backend_type = backend_type vpool.save() pmachine = PMachine() pmachine.name = 'PMachine' pmachine.username = '******' pmachine.ip = '127.0.0.1' pmachine.hvtype = 'VMWARE' pmachine.save() storage_router = StorageRouter() storage_router.name = 'storage_router' storage_router.ip = '127.0.0.1' storage_router.pmachine = pmachine storage_router.machine_id = System.get_my_machine_id() storage_router.rdma_capable = False storage_router.primary_failure_domain = failure_domain storage_router.save() disk = Disk() disk.name = 'physical_disk_1' disk.path = '/dev/non-existent' disk.size = 500 * 1024 ** 3 disk.state = 'OK' disk.is_ssd = True disk.storagerouter = storage_router disk.save() disk_partition = DiskPartition() disk_partition.id = 'disk_partition_id' disk_partition.disk = disk disk_partition.path = '/dev/disk/non-existent' disk_partition.size = 400 * 1024 ** 3 disk_partition.state = 'OK' disk_partition.offset = 1024 disk_partition.roles = [DiskPartition.ROLES.SCRUB] disk_partition.mountpoint = '/var/tmp' disk_partition.save() vmachine_1 = VMachine() vmachine_1.name = 'vmachine_1' vmachine_1.devicename = 'dummy' vmachine_1.pmachine = pmachine vmachine_1.save() vdisk_1_1 = VDisk() vdisk_1_1.name = 'vdisk_1_1' vdisk_1_1.volume_id = 'vdisk_1_1' vdisk_1_1.vmachine = vmachine_1 vdisk_1_1.vpool = vpool vdisk_1_1.devicename = 'dummy' vdisk_1_1.size = 0 vdisk_1_1.save() vdisk_1_1.reload_client() vdisk_1_2 = VDisk() vdisk_1_2.name = 'vdisk_1_2' vdisk_1_2.volume_id = 'vdisk_1_2' vdisk_1_2.vmachine = vmachine_1 vdisk_1_2.vpool = vpool vdisk_1_2.devicename = 'dummy' vdisk_1_2.size = 0 vdisk_1_2.save() vdisk_1_2.reload_client() vmachine_2 = VMachine() vmachine_2.name = 'vmachine_2' vmachine_2.devicename = 'dummy' vmachine_2.pmachine = pmachine vmachine_2.save() vdisk_2_1 = VDisk() vdisk_2_1.name = 'vdisk_2_1' vdisk_2_1.volume_id = 'vdisk_2_1' vdisk_2_1.vmachine = vmachine_2 vdisk_2_1.vpool = vpool vdisk_2_1.devicename = 'dummy' vdisk_2_1.size = 0 vdisk_2_1.save() vdisk_2_1.reload_client() vdisk_3 = VDisk() vdisk_3.name = 'vdisk_3' vdisk_3.volume_id = 'vdisk_3' vdisk_3.vpool = vpool vdisk_3.devicename = 'dummy' vdisk_3.size = 0 vdisk_3.save() vdisk_3.reload_client() for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: [dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots'][0].timeout = 0 # Run the testing scenario travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true' if travis is True: print 'Running in Travis, reducing output.' debug = not travis amount_of_days = 50 base = datetime.datetime.now().date() day = datetime.timedelta(1) minute = 60 hour = minute * 60 for d in xrange(0, amount_of_days): base_timestamp = self._make_timestamp(base, day * d) print '' print 'Day cycle: {0}: {1}'.format(d, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d')) # At the start of the day, delete snapshot policy runs at 00:30 print '- Deleting snapshots' ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30)) # Validate snapshots print '- Validating snapshots' for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]: self._validate(vdisk, d, base, amount_of_days, debug) # During the day, snapshots are taken # - Create non consistent snapshot every hour, between 2:00 and 22:00 # - Create consistent snapshot at 6:30, 12:30, 18:30 print '- Creating snapshots' for h in xrange(2, 23): timestamp = base_timestamp + (hour * h) for vm in [vmachine_1, vmachine_2]: VMachineController.snapshot(machineguid=vm.guid, label='ss_i_{0}:00'.format(str(h)), is_consistent=False, timestamp=timestamp) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VMachineController.snapshot(machineguid=vm.guid, label='ss_c_{0}:30'.format(str(h)), is_consistent=True, timestamp=ts) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={'label': 'ss_i_{0}:00'.format(str(h)), 'is_consistent': False, 'timestamp': str(timestamp), 'machineguid': None}) if h in [6, 12, 18]: ts = (timestamp + (minute * 30)) VDiskController.create_snapshot(diskguid=vdisk_3.guid, metadata={'label': 'ss_c_{0}:30'.format(str(h)), 'is_consistent': True, 'timestamp': str(ts), 'machineguid': None})