def _make_clones(vdisks_map, depth=clone_depth): for level in range(depth): previous_vd = list(vdisks_map.itervalues())[-1] new_name = previous_vd.name + '_clone' new_guid = VDiskController.clone(previous_vd.guid, new_name).get('vdisk_guid') vdisks_map[new_name] = VDisk(new_guid)
def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. Called on "cinder create --snapshot-id ..." :param snapshot: snapshot reference (sqlalchemy Model) :param volume: volume reference (sqlalchemy Model) Volume here is just a ModelObject, it doesn't exist physically, it will be created by OVS. Diskguid to be passed to the clone method is the ovs diskguid of the parent of the snapshot with snapshot.id OVS: Clone from arbitrary volume, requires volumedriver 3.6 release > 15.08.2014 """ _debug_vol_info('CLONE_VOL', volume) _debug_vol_info('CLONE_SNAP', snapshot) mountpoint = self._get_hostname_mountpoint(str(volume.host)) ovs_snap_disk = self._find_ovs_model_disk_by_snapshot_id(snapshot.id) devicename = volume.display_name if not devicename: devicename = volume.name pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname( str(volume.host)) LOG.info('[CLONE FROM SNAP] %s %s %s %s' % (ovs_snap_disk.guid, snapshot.id, devicename, pmachineguid)) try: disk_meta = VDiskController.clone(diskguid=ovs_snap_disk.guid, snapshotid=snapshot.id, devicename=devicename, pmachineguid=pmachineguid, machinename="", machineguid=None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta)) LOG.debug('[CLONE FROM SNAP] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = devicename vdisk.save() except Exception as ex: LOG.error('CLONE FROM SNAP: Internal error %s ' % str(ex)) self.delete_volume(volume) self.delete_snapshot(snapshot) raise return { 'provider_location': volume['provider_location'], 'display_name': volume['display_name'] }
def clone(machineguid, timestamp, name): """ Clone a vmachine using the disk snapshot based on a snapshot timestamp @param machineguid: guid of the machine to clone @param timestamp: timestamp of the disk snapshots to use for the clone @param name: name for the new machine """ machine = VMachine(machineguid) disks = {} for snapshot in machine.snapshots: if snapshot['timestamp'] == timestamp: for diskguid, snapshotguid in snapshot['snapshots'].iteritems( ): disks[diskguid] = snapshotguid new_machine = VMachine() new_machine.copy(machine) new_machine.name = name new_machine.pmachine = machine.pmachine new_machine.save() new_disk_guids = [] disks_by_order = sorted(machine.vdisks, key=lambda x: x.order) for currentDisk in disks_by_order: if machine.is_vtemplate and currentDisk.templatesnapshot: snapshotid = currentDisk.templatesnapshot else: snapshotid = disks[currentDisk.guid] prefix = '%s-clone' % currentDisk.name result = VDiskController.clone( diskguid=currentDisk.guid, snapshotid=snapshotid, devicename=prefix, pmachineguid=new_machine.pmachine_guid, machinename=new_machine.name, machineguid=new_machine.guid) new_disk_guids.append(result['diskguid']) hv = Factory.get(machine.pmachine) try: result = hv.clone_vm(machine.hypervisor_id, name, disks, None, True) except: VMachineController.delete(machineguid=new_machine.guid) raise new_machine.hypervisor_id = result new_machine.save() return new_machine.guid
def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot. Called on "cinder create --snapshot-id ..." :param snapshot: snapshot reference (sqlalchemy Model) :param volume: volume reference (sqlalchemy Model) Volume here is just a ModelObject, it doesn't exist physically, it will be created by OVS. Diskguid to be passed to the clone method is the ovs diskguid of the parent of the snapshot with snapshot.id OVS: Clone from arbitrary volume, requires volumedriver 3.6 release > 15.08.2014 """ _debug_vol_info('CLONE_VOL', volume) _debug_vol_info('CLONE_SNAP', snapshot) mountpoint = self._get_hostname_mountpoint(str(volume.host)) ovs_snap_disk = self._find_ovs_model_disk_by_snapshot_id(snapshot.id) devicename = volume.display_name if not devicename: devicename = volume.name pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname( str(volume.host)) LOG.info('[CLONE FROM SNAP] %s %s %s %s' % (ovs_snap_disk.guid, snapshot.id, devicename, pmachineguid)) try: disk_meta = VDiskController.clone(diskguid = ovs_snap_disk.guid, snapshotid = snapshot.id, devicename = devicename, pmachineguid = pmachineguid, machinename = "", machineguid=None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta)) LOG.debug('[CLONE FROM SNAP] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = devicename vdisk.save() except Exception as ex: LOG.error('CLONE FROM SNAP: Internal error %s ' % str(ex)) self.delete_volume(volume) self.delete_snapshot(snapshot) raise return {'provider_location': volume['provider_location'], 'display_name': volume['display_name']}
def clone(machineguid, timestamp, name): """ Clone a vmachine using the disk snapshot based on a snapshot timestamp @param machineguid: guid of the machine to clone @param timestamp: timestamp of the disk snapshots to use for the clone @param name: name for the new machine """ machine = VMachine(machineguid) disks = {} for snapshot in machine.snapshots: if snapshot['timestamp'] == timestamp: for diskguid, snapshotguid in snapshot['snapshots'].iteritems(): disks[diskguid] = snapshotguid new_machine = VMachine() new_machine.copy(machine) new_machine.name = name new_machine.pmachine = machine.pmachine new_machine.save() new_disk_guids = [] disks_by_order = sorted(machine.vdisks, key=lambda x: x.order) for currentDisk in disks_by_order: if machine.is_vtemplate and currentDisk.templatesnapshot: snapshotid = currentDisk.templatesnapshot else: snapshotid = disks[currentDisk.guid] prefix = '%s-clone' % currentDisk.name result = VDiskController.clone(diskguid=currentDisk.guid, snapshotid=snapshotid, devicename=prefix, pmachineguid=new_machine.pmachine_guid, machinename=new_machine.name, machineguid=new_machine.guid) new_disk_guids.append(result['diskguid']) hv = Factory.get(machine.pmachine) try: result = hv.clone_vm(machine.hypervisor_id, name, disks, None, True) except: VMachineController.delete(machineguid=new_machine.guid) raise new_machine.hypervisor_id = result new_machine.save() return new_machine.guid
def test_set_as_template(self): """ Test the set as template functionality - Create a vDisk - Set it as template and make some assertions """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) storagedrivers = structure['storagedrivers'] vdisk_1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) metadata = { 'is_consistent': True, 'is_automatic': True, 'is_sticky': False } for x in range(5): metadata['label'] = 'label{0}'.format(x) metadata['timestamp'] = int(time.time()) VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk_1.snapshots) == 5, msg='Expected to find 5 snapshots') self.assertTrue(expr=len(vdisk_1.snapshot_ids) == 5, msg='Expected to find 5 snapshot IDs') # Set as template and validate the model self.assertFalse(expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should be False') VDiskController.set_as_template(vdisk_guid=vdisk_1.guid) self.assertTrue(expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True') self.assertTrue( expr=len(vdisk_1.snapshots) == 1, msg='Expected to find only 1 snapshot after converting to template' ) self.assertTrue( expr=len(vdisk_1.snapshot_ids) == 1, msg= 'Expected to find only 1 snapshot ID after converting to template') # Try again and verify job succeeds, previously we raised error when setting as template an additional time VDiskController.set_as_template(vdisk_1.guid) self.assertTrue( expr=vdisk_1.is_vtemplate, msg='Dynamic property "is_vtemplate" should still be True') # Clone vDisk and verify both child and parent can no longer be set as template vdisk_2 = VDisk( VDiskController.create_new( volume_name='vdisk_2', volume_size=1024**4, storagedriver_guid=storagedrivers[1].guid)) vdisk_2_clone_1 = VDisk( VDiskController.clone(vdisk_guid=vdisk_2.guid, name='vdisk_2_clone_1')['vdisk_guid']) with self.assertRaises(RuntimeError): VDiskController.set_as_template(vdisk_guid=vdisk_2.guid) with self.assertRaises(RuntimeError): VDiskController.set_as_template(vdisk_guid=vdisk_2_clone_1.guid)
def test_clone(self): """ Test the clone functionality - Create a vDisk with name 'clone1' - Clone the vDisk and make some assertions - Attempt to clone again using same name and same devicename - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted - Attempt to clone on Storage Driver without MDS service - Attempt to clone from snapshot which is not yet completely synced to backend - Attempt to delete the snapshot from which a clone was made - Clone the vDisk on another Storage Router - Clone another vDisk with name 'clone1' linked to another vPool """ structure = DalHelper.build_dal_structure({ 'vpools': [1, 2], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)] } # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] service_type = structure['service_types']['MetadataServer'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters) self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters) # Basic clone scenario vdisk1 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshot_ids[0]) self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot') self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk') for expected_key in ['vdisk_guid', 'name', 'backingdevice']: self.assertTrue( expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format( expected_key)) self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match') self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match') self.assertTrue( expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match') # Attempt to clone again with same name with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1') # Attempt to clone again with a name which will have identical devicename with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2') # Attempt to clone on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', storagerouter_guid=storagerouters[2].guid) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3') # Attempt to clone on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to clone on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Attempt to clone by providing snapshot_id not synced to backend self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning') self.assertTrue( expr=len(vdisk1.snapshot_ids) == 1, msg='Expected to find only 1 snapshot ID before cloning') metadata = { 'label': 'label1', 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': False, 'is_automatic': True, 'is_consistent': True } snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots') self.assertTrue(expr=len(vdisk1.snapshot_ids) == 2, msg='Expected to find 2 snapshot IDs') with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() self.assertTrue( expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6') # Update backend synced flag and retry vdisk1.storagedriver_client._set_snapshot_in_backend( vdisk1.volume_id, snapshot_id, True) vdisk1.invalidate_dynamics(['snapshots', 'snapshot_ids']) VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() vdisk1.invalidate_dynamics() self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks') self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks') self.assertTrue( expr=len(vdisk1.snapshots) == 2, msg= 'Expected to find 2 snapshots after cloning from a specified snapshot' ) self.assertTrue( expr=len(vdisk1.snapshot_ids) == 2, msg= 'Expected to find 2 snapshot IDs after cloning from a specified snapshot' ) # Attempt to delete the snapshot that has clones with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=snapshot_id) # Clone on specific Storage Router storagedriver = StorageDriver() storagedriver.vpool = vpools[1] storagedriver.storagerouter = storagerouters[2] storagedriver.name = '3' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[2].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '3' storagedriver.ports = { 'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4 } storagedriver.save() s_id = '{0}-1'.format(storagedriver.storagerouter.name) service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [3] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() clone3 = VDisk( VDiskController.clone( vdisk_guid=vdisk1.guid, name='clone3', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue( expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached') # Clone vDisk with existing name on another vPool vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_1', volume_size=1024**3, storagedriver_guid=storagedrivers[2].guid)) clone_vdisk2 = VDisk( VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone1')['vdisk_guid']) self.assertTrue( expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool' ) self.assertTrue(expr=len([ vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1' ]) == 2, msg='Expected to find 2 vDisks with name "clone1"') # Attempt to clone without specifying snapshot and snapshot fails to sync to backend StorageRouterClient.synced = False vdisk2 = VDisk( VDiskController.create_new( volume_name='vdisk_2', volume_size=1024**3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4') vdisk2.invalidate_dynamics() self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure') self.assertTrue( expr=len(vdisk2.snapshot_ids) == 0, msg='Expected to find 0 snapshot IDs after clone failure') self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure') StorageRouterClient.synced = True
def test_remove_snapshots(self): """ Validates whether the remove_snapshots call works as expected. Due to openvstorage/framework#1534 it needs to handle some backwards compatibiltiy. """ structure = DalHelper.build_dal_structure({ 'vpools': [1], 'storagerouters': [1], 'storagedrivers': [(1, 1, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1)] } # (<id>, <storagedriver_id>) ) storagedriver = structure['storagedrivers'][1] vdisk = VDisk( VDiskController.create_new(volume_name='vdisk_1', volume_size=1024**4, storagedriver_guid=storagedriver.guid)) snapshots = [] for i in xrange(10): metadata = { 'label': 'label{0}'.format(i), 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': True, 'is_automatic': True, 'is_consistent': True } snapshots.append( VDiskController.create_snapshot(vdisk_guid=vdisk.guid, metadata=metadata)) vdisk.invalidate_dynamics(['snapshots', 'snapshot_ids']) self.assertEqual(len(vdisk.snapshots), 10) self.assertEqual(len(vdisk.snapshot_ids), 10) snapshot_id = snapshots[0] # Old format results = VDiskController.delete_snapshots({vdisk.guid: snapshot_id}) expected = {vdisk.guid: [True, snapshot_id]} self.assertDictEqual(results, expected) self.assertEqual(len(vdisk.snapshots), 9) self.assertEqual(len(vdisk.snapshot_ids), 9) results = VDiskController.delete_snapshots({vdisk.guid: snapshot_id}) expected = {vdisk.guid: [False, results[vdisk.guid][1]]} self.assertDictEqual(results, expected) self.assertRegexpMatches(results[vdisk.guid][1], '^Snapshot (.*?) does not belong to vDisk') self.assertEqual(len(vdisk.snapshots), 9) self.assertEqual(len(vdisk.snapshot_ids), 9) results = VDiskController.delete_snapshots({'foo': snapshot_id}) expected = {'foo': [False, results['foo'][1]]} self.assertDictEqual(results, expected) self.assertRegexpMatches(results['foo'][1], 'VDisk with guid (.*?) could not be found') # New format snapshot_id1 = snapshots[1] snapshot_id2 = snapshots[2] results = VDiskController.delete_snapshots( {vdisk.guid: [snapshot_id1, snapshot_id2]}) expected = { vdisk.guid: { 'success': True, 'error': None, 'results': { snapshot_id1: [True, snapshot_id1], snapshot_id2: [True, snapshot_id2] } } } self.assertDictEqual(results, expected) self.assertEqual(len(vdisk.snapshots), 7) self.assertEqual(len(vdisk.snapshot_ids), 7) snapshot_id2 = snapshots[3] results = VDiskController.delete_snapshots( {vdisk.guid: [snapshot_id1, snapshot_id2]}) expected = { vdisk.guid: { 'success': False, 'error': results[vdisk.guid]['error'], 'results': { snapshot_id1: [False, results[vdisk.guid]['results'][snapshot_id1][1]], snapshot_id2: [True, snapshot_id2] } } } self.assertDictEqual(results, expected) self.assertEquals(results[vdisk.guid]['error'], 'One or more snapshots could not be removed') self.assertRegexpMatches( results[vdisk.guid]['results'][snapshot_id1][1], '^Snapshot (.*?) does not belong to vDisk') self.assertEqual(len(vdisk.snapshots), 6) self.assertEqual(len(vdisk.snapshot_ids), 6) results = VDiskController.delete_snapshots({'foo': [snapshot_id1]}) expected = { 'foo': { 'success': False, 'error': results['foo']['error'], 'results': {} } } self.assertDictEqual(results, expected) self.assertRegexpMatches(results['foo']['error'], 'VDisk with guid (.*?) could not be found') snapshot_id = snapshots[4] VDiskController.clone(vdisk.guid, 'clone', snapshot_id) results = VDiskController.delete_snapshots({vdisk.guid: [snapshot_id]}) expected = { vdisk.guid: { 'success': False, 'error': results[vdisk.guid]['error'], 'results': { snapshot_id: [False, results[vdisk.guid]['results'][snapshot_id][1]] } } } self.assertDictEqual(results, expected) self.assertEquals(results[vdisk.guid]['error'], 'One or more snapshots could not be removed') self.assertRegexpMatches( results[vdisk.guid]['results'][snapshot_id][1], '^Snapshot (.*?) has [0-9]+ volume(.?) cloned from it, cannot remove$' )
def create_cloned_volume(self, volume, src_vref): """Create a cloned volume from another volume. Called on "cinder create --source-volid ... " :param volume: volume reference - target volume (sqlalchemy Model) :param src_vref: volume reference - source volume (sqlalchemy Model) OVS: Create clone from template if the source is a template Create volume from snapshot if the source is a volume - create snapshot of source volume if it doesn't have snapshots """ _debug_vol_info('CREATE_CLONED_VOL', volume) _debug_vol_info('CREATE_CLONED_VOL Source', src_vref) mountpoint = self._get_hostname_mountpoint(str(volume.host)) name = volume.display_name if not name: name = volume.name volume.display_name = volume.name pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname( str(volume.host)) #source source_ovs_disk = self._find_ovs_model_disk_by_location( str(src_vref.provider_location), src_vref.host) if source_ovs_disk.info['object_type'] == 'TEMPLATE': LOG.info('[CREATE_FROM_TEMPLATE] VDisk %s is a template' % source_ovs_disk.devicename) # cloning from a template LOG.debug('[CREATE FROM TEMPLATE] ovs_disk %s ' % (source_ovs_disk.devicename)) disk_meta = VDiskController.create_from_template( diskguid = source_ovs_disk.guid, machinename = "", devicename = str(name), pmachineguid = pmachineguid, machineguid = None, storagedriver_guid = None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CREATE FROM TEMPLATE] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = name LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s' % (volume.id, name)) vdisk.save() else: LOG.info('[THIN CLONE] VDisk %s is not a template' % source_ovs_disk.devicename) # We do not support yet full volume clone # - requires "emancipate" functionality # So for now we'll take a snapshot # (or the latest snapshot existing) and clone from that snapshot available_snapshots = [snapshot for snapshot in source_ovs_disk.snapshots if 'in_backend' not in snapshot or snapshot['in_backend'] is True] if len(available_snapshots) == 0: metadata = {'label': "Cinder clone snapshot {0}".format(name), 'is_consistent': False, 'timestamp': time.time(), 'machineguid': source_ovs_disk.vmachine_guid, 'is_automatic': False} LOG.debug('CREATE_SNAP %s %s' % (name, str(metadata))) snapshotid = VDiskController.create_snapshot( diskguid = source_ovs_disk.guid, metadata = metadata, snapshotid = None) LOG.debug('CREATE_SNAP OK') OVSVolumeDriver._wait_for_snapshot(source_ovs_disk, snapshotid) else: snapshotid = available_snapshots[-1]['guid'] LOG.debug('[CREATE CLONE FROM SNAP] %s ' % snapshotid) disk_meta = VDiskController.clone(diskguid = source_ovs_disk.guid, snapshotid = snapshotid, devicename = str(name), pmachineguid = pmachineguid, machinename = "", machineguid=None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta)) LOG.debug('[CLONE FROM SNAP] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = name vdisk.save() return {'provider_location': volume['provider_location'], 'display_name': volume['display_name']}
def clone(machineguid, timestamp, name): """ Clone a vmachine using the disk snapshot based on a snapshot timestamp @param machineguid: guid of the machine to clone @param timestamp: timestamp of the disk snapshots to use for the clone @param name: name for the new machine """ machine = VMachine(machineguid) timestamp = str(timestamp) if timestamp not in (snap['timestamp'] for snap in machine.snapshots): raise RuntimeError('Invalid timestamp provided, not a valid snapshot of this vmachine.') vpool = None storagerouter = None if machine.pmachine is not None and machine.pmachine.hvtype == 'VMWARE': for vdisk in machine.vdisks: if vdisk.vpool is not None: vpool = vdisk.vpool break for vdisk in machine.vdisks: if vdisk.storagerouter_guid: storagerouter = StorageRouter(vdisk.storagerouter_guid) break hv = Factory.get(machine.pmachine) vm_path = hv.get_vmachine_path(name, storagerouter.machine_id if storagerouter is not None else '') # mutex in sync_with_hypervisor uses "None" for KVM hvtype mutex = volatile_mutex('{0}_{1}'.format(hv.clean_vmachine_filename(vm_path), vpool.guid if vpool is not None else 'none')) disks = {} for snapshot in machine.snapshots: if snapshot['timestamp'] == timestamp: for diskguid, snapshotguid in snapshot['snapshots'].iteritems(): disks[diskguid] = snapshotguid try: mutex.acquire(wait=120) new_machine = VMachine() new_machine.copy(machine) new_machine.name = name new_machine.devicename = hv.clean_vmachine_filename(vm_path) new_machine.pmachine = machine.pmachine new_machine.save() finally: mutex.release() new_disk_guids = [] vm_disks = [] mountpoint = None disks_by_order = sorted(machine.vdisks, key=lambda x: x.order) try: for currentDisk in disks_by_order: if machine.is_vtemplate and currentDisk.templatesnapshot: snapshotid = currentDisk.templatesnapshot else: snapshotid = disks[currentDisk.guid] prefix = '%s-clone' % currentDisk.name result = VDiskController.clone(diskguid=currentDisk.guid, snapshotid=snapshotid, devicename=prefix, pmachineguid=new_machine.pmachine_guid, machinename=new_machine.name, machineguid=new_machine.guid) new_disk_guids.append(result['diskguid']) mountpoint = StorageDriverList.get_by_storagedriver_id(currentDisk.storagedriver_id).mountpoint vm_disks.append(result) except Exception as ex: VMachineController._logger.error('Failed to clone disks. {0}'.format(ex)) VMachineController.delete(machineguid=new_machine.guid) raise try: result = hv.clone_vm(machine.hypervisor_id, name, vm_disks, mountpoint) except Exception as ex: VMachineController._logger.error('Failed to clone vm. {0}'.format(ex)) VMachineController.delete(machineguid=new_machine.guid) raise try: mutex.acquire(wait=120) new_machine.hypervisor_id = result new_machine.save() finally: mutex.release() return new_machine.guid
def create_cloned_volume(self, volume, src_vref): """Create a cloned volume from another volume. Called on "cinder create --source-volid ... " :param volume: volume reference - target volume (sqlalchemy Model) :param src_vref: volume reference - source volume (sqlalchemy Model) OVS: Create clone from template if the source is a template Create volume from snapshot if the source is a volume - create snapshot of source volume if it doesn't have snapshots """ _debug_vol_info('CREATE_CLONED_VOL', volume) _debug_vol_info('CREATE_CLONED_VOL Source', src_vref) mountpoint = self._get_hostname_mountpoint(str(volume.host)) name = volume.display_name if not name: name = volume.name volume.display_name = volume.name pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname( str(volume.host)) #source source_ovs_disk = self._find_ovs_model_disk_by_location( str(src_vref.provider_location), src_vref.host) if source_ovs_disk.info['object_type'] == 'TEMPLATE': LOG.info('[CREATE_FROM_TEMPLATE] VDisk %s is a template' % source_ovs_disk.devicename) # cloning from a template LOG.debug('[CREATE FROM TEMPLATE] ovs_disk %s ' % (source_ovs_disk.devicename)) disk_meta = VDiskController.create_from_template( diskguid=source_ovs_disk.guid, machinename="", devicename=str(name), pmachineguid=pmachineguid, machineguid=None, storagedriver_guid=None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CREATE FROM TEMPLATE] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = name LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s' % (volume.id, name)) vdisk.save() else: LOG.info('[THIN CLONE] VDisk %s is not a template' % source_ovs_disk.devicename) # We do not support yet full volume clone # - requires "emancipate" functionality # So for now we'll take a snapshot # (or the latest snapshot existing) and clone from that snapshot available_snapshots = [ snapshot for snapshot in source_ovs_disk.snapshots if 'in_backend' not in snapshot or snapshot['in_backend'] is True ] if len(available_snapshots) == 0: metadata = { 'label': "Cinder clone snapshot {0}".format(name), 'is_consistent': False, 'timestamp': time.time(), 'machineguid': source_ovs_disk.vmachine_guid, 'is_automatic': False } LOG.debug('CREATE_SNAP %s %s' % (name, str(metadata))) snapshotid = VDiskController.create_snapshot( diskguid=source_ovs_disk.guid, metadata=metadata, snapshotid=None) LOG.debug('CREATE_SNAP OK') OVSVolumeDriver._wait_for_snapshot(source_ovs_disk, snapshotid) else: snapshotid = available_snapshots[-1]['guid'] LOG.debug('[CREATE CLONE FROM SNAP] %s ' % snapshotid) disk_meta = VDiskController.clone(diskguid=source_ovs_disk.guid, snapshotid=snapshotid, devicename=str(name), pmachineguid=pmachineguid, machinename="", machineguid=None) volume['provider_location'] = '{}{}'.format( mountpoint, disk_meta['backingdevice']) LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta)) LOG.debug('[CLONE FROM SNAP] New volume %s' % volume['provider_location']) vdisk = VDisk(disk_meta['diskguid']) vdisk.cinder_id = volume.id vdisk.name = name vdisk.save() return { 'provider_location': volume['provider_location'], 'display_name': volume['display_name'] }
def test_clone(self): """ Test the clone functionality - Create a vDisk with name 'clone1' - Clone the vDisk and make some assertions - Attempt to clone again using same name and same devicename - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted - Attempt to clone on Storage Driver without MDS service - Attempt to clone from snapshot which is not yet completely synced to backend - Attempt to delete the snapshot from which a clone was made - Clone the vDisk on another Storage Router - Clone another vDisk with name 'clone1' linked to another vPool """ structure = Helper.build_service_structure( {'vpools': [1, 2], 'storagerouters': [1, 2, 3], 'storagedrivers': [(1, 1, 1), (2, 2, 1)], # (<id>, <vpool_id>, <storagerouter_id>) 'mds_services': [(1, 1), (2, 2)]} # (<id>, <storagedriver_id>) ) vpools = structure['vpools'] mds_services = structure['mds_services'] service_type = structure['service_type'] storagedrivers = structure['storagedrivers'] storagerouters = structure['storagerouters'] self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters) self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters) # Basic clone scenario vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks') clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshots[0]['guid']) self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot') self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk') for expected_key in ['vdisk_guid', 'name', 'backingdevice']: self.assertTrue(expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format(expected_key)) self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match') self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match') self.assertTrue(expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match') # Attempt to clone again with same name with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1') # Attempt to clone again with a name which will have identical devicename with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2') # Attempt to clone on Storage Router on which vPool is not extended with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', storagerouter_guid=storagerouters[2].guid) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3') # Attempt to clone on non-existing Storage Driver storagedrivers[1].storagedriver_id = 'non-existing' storagedrivers[1].save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4') storagedrivers[1].storagedriver_id = '1' storagedrivers[1].save() # Attempt to clone on Storage Driver without MDS service mds_services[1].service.storagerouter = storagerouters[3] mds_services[1].service.save() with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2') vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5') mds_services[1].service.storagerouter = storagerouters[1] mds_services[1].service.save() # Attempt to clone by providing snapshot_id not synced to backend self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning') metadata = {'label': 'label1', 'timestamp': int(time.time()), 'is_sticky': False, 'in_backend': False, 'is_automatic': True, 'is_consistent': True} snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata) self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots') with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6') # Update backend synced flag and retry vdisk1.storagedriver_client._set_snapshot_in_backend(vdisk1.volume_id, snapshot_id, True) vdisk1.invalidate_dynamics('snapshots') VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2', snapshot_id=snapshot_id) vdisks = VDiskList.get_vdisks() vdisk1.invalidate_dynamics() self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks') self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks') self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots after cloning from a specified snapshot') # Attempt to delete the snapshot that has clones with self.assertRaises(RuntimeError): VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid, snapshot_id=snapshot_id) # Clone on specific Storage Router storagedriver = StorageDriver() storagedriver.vpool = vpools[1] storagedriver.storagerouter = storagerouters[2] storagedriver.name = '3' storagedriver.mountpoint = '/' storagedriver.cluster_ip = storagerouters[2].ip storagedriver.storage_ip = '127.0.0.1' storagedriver.storagedriver_id = '3' storagedriver.ports = {'management': 1, 'xmlrpc': 2, 'dtl': 3, 'edge': 4} storagedriver.save() s_id = '{0}-1'.format(storagedriver.storagerouter.name) service = Service() service.name = s_id service.storagerouter = storagedriver.storagerouter service.ports = [3] service.type = service_type service.save() mds_service = MDSService() mds_service.service = service mds_service.number = 0 mds_service.capacity = 10 mds_service.vpool = storagedriver.vpool mds_service.save() clone3 = VDisk(VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone3', storagerouter_guid=storagerouters[2].guid)['vdisk_guid']) self.assertTrue(expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached') # Clone vDisk with existing name on another vPool vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[2].guid)) clone_vdisk2 = VDisk(VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone1')['vdisk_guid']) self.assertTrue(expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool') self.assertTrue(expr=len([vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1']) == 2, msg='Expected to find 2 vDisks with name "clone1"') # Attempt to clone without specifying snapshot and snapshot fails to sync to backend StorageRouterClient.synced = False vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_2', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid)) with self.assertRaises(RuntimeError): VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4') vdisk2.invalidate_dynamics() self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure') self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure') StorageRouterClient.synced = True