コード例 #1
0
 def _create_vdisks_for_mds_service(self, amount, start_id, mds_service=None, vpool=None):
     """
     Generates vdisks and appends them to a given mds_service
     """
     vdisks = {}
     for i in xrange(start_id, start_id + amount):
         disk = VDisk()
         disk.name = str(i)
         disk.devicename = 'disk_{0}'.format(i)
         disk.volume_id = 'disk_{0}'.format(i)
         disk.vpool = mds_service.vpool if mds_service is not None else vpool
         disk.size = 0
         disk.save()
         disk.reload_client()
         if mds_service is not None:
             storagedriver_id = None
             for sd in mds_service.vpool.storagedrivers:
                 if sd.storagerouter_guid == mds_service.service.storagerouter_guid:
                     storagedriver_id = sd.storagedriver_id
             junction = MDSServiceVDisk()
             junction.vdisk = disk
             junction.mds_service = mds_service
             junction.is_master = True
             junction.save()
             config = type('MDSNodeConfig', (),
                           {'address': self._generate_nc_function(True, mds_service),
                            'port': self._generate_nc_function(False, mds_service)})()
             mds_backend_config = type('MDSMetaDataBackendConfig', (),
                                       {'node_configs': self._generate_bc_function([config])})()
             StorageDriverClient.metadata_backend_config['disk_{0}'.format(i)] = mds_backend_config
             StorageDriverClient.catch_up['disk_{0}'.format(i)] = 50
             StorageDriverClient.vrouter_id['disk_{0}'.format(i)] = storagedriver_id
         vdisks[i] = disk
     return vdisks
コード例 #2
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot.
        Called on "cinder create --snapshot-id ..."
        :param snapshot: snapshot reference (sqlalchemy Model)
        :param volume: volume reference (sqlalchemy Model)

        Volume here is just a ModelObject, it doesn't exist physically,
            it will be created by OVS.
        Diskguid to be passed to the clone method is the ovs diskguid of the
            parent of the snapshot with snapshot.id

        OVS: Clone from arbitrary volume,
        requires volumedriver 3.6 release > 15.08.2014
        """
        _debug_vol_info('CLONE_VOL', volume)
        _debug_vol_info('CLONE_SNAP', snapshot)

        mountpoint = self._get_hostname_mountpoint(str(volume.host))
        ovs_snap_disk = self._find_ovs_model_disk_by_snapshot_id(snapshot.id)
        OVSVolumeDriver._wait_for_snapshot(ovs_snap_disk, snapshot.id)
        devicename = volume.display_name
        if not devicename:
            devicename = volume.name
        pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
            str(volume.host))

        LOG.info('[CLONE FROM SNAP] %s %s %s %s' %
                 (ovs_snap_disk.guid, snapshot.id, devicename, pmachineguid))
        try:
            kwargs = dict(diskguid=ovs_snap_disk.guid,
                          snapshotid=snapshot.id,
                          devicename=devicename,
                          pmachineguid=pmachineguid,
                          machinename="",
                          machineguid=None)
            LOG.debug('[CLONE FROM SNAP] Executing clone - async')
            # Execute "clone" task async, using celery workers
            # wait for the result for 30 minutes then raise TimeoutError
            disk_meta = VDiskController.clone.apply_async(kwargs = kwargs)\
                .get(timeout = 1800)
            LOG.debug('[CLONE FROM SNAP] Executing clone - async - DONE')
            volume['provider_location'] = '{}{}'.format(
                mountpoint, disk_meta['backingdevice'])

            LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta))
            LOG.debug('[CLONE FROM SNAP] New volume %s' %
                      volume['provider_location'])
            vdisk = VDisk(disk_meta['diskguid'])
            vdisk.cinder_id = volume.id
            vdisk.name = devicename
            vdisk.save()
        except Exception as ex:
            LOG.error('CLONE FROM SNAP: Internal error %s ' % str(ex))
            self.delete_volume(volume)
            raise

        return {
            'provider_location': volume['provider_location'],
            'display_name': volume['display_name']
        }
コード例 #3
0
ファイル: vdisk.py プロジェクト: tcpcloud/openvstorage
    def clone(diskguid, snapshotid, devicename, pmachineguid, machinename, machineguid=None):
        """
        Clone a disk
        """
        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = ['description', 'size', 'type', 'retentionpolicyguid',
                               'snapshotpolicyguid', 'autobackup']
        vdisk = VDisk(diskguid)
        location = hypervisor.get_backing_disk_path(machinename, devicename)

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = '{0}-clone'.format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(location)
        new_vdisk.parentsnapshot = snapshotid
        new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.save()

        try:
            storagedriver = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id)
            if storagedriver is None:
                raise RuntimeError('Could not find StorageDriver with id {0}'.format(vdisk.storagedriver_id))

            mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool)
            if mds_service is None:
                raise RuntimeError('Could not find a MDS service')

            logger.info('Clone snapshot {} of disk {} to location {}'.format(snapshotid, vdisk.name, location))
            volume_id = vdisk.storagedriver_client.create_clone(
                target_path=location,
                metadata_backend_config=MDSMetaDataBackendConfig([MDSNodeConfig(address=str(mds_service.service.storagerouter.ip),
                                                                                port=mds_service.service.ports[0])]),
                parent_volume_id=str(vdisk.volume_id),
                parent_snapshot_id=str(snapshotid),
                node_id=str(vdisk.storagedriver_id)
            )
        except Exception as ex:
            logger.error('Caught exception during clone, trying to delete the volume. {0}'.format(ex))
            new_vdisk.delete()
            VDiskController.delete_volume(location)
            raise

        new_vdisk.volume_id = volume_id
        new_vdisk.save()

        try:
            MDSServiceController.ensure_safety(new_vdisk)
        except Exception as ex:
            logger.error('Caught exception during "ensure_safety" {0}'.format(ex))

        return {'diskguid': new_vdisk.guid,
                'name': new_vdisk.name,
                'backingdevice': location}
コード例 #4
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot.
        Called on "cinder create --snapshot-id ..."
        :param snapshot: snapshot reference (sqlalchemy Model)
        :param volume: volume reference (sqlalchemy Model)

        Volume here is just a ModelObject, it doesn't exist physically,
            it will be created by OVS.
        Diskguid to be passed to the clone method is the ovs diskguid of the
            parent of the snapshot with snapshot.id

        OVS: Clone from arbitrary volume,
        requires volumedriver 3.6 release > 15.08.2014
        """
        _debug_vol_info('CLONE_VOL', volume)
        _debug_vol_info('CLONE_SNAP', snapshot)

        mountpoint = self._get_hostname_mountpoint(str(volume.host))
        ovs_snap_disk = self._find_ovs_model_disk_by_snapshot_id(snapshot.id)
        OVSVolumeDriver._wait_for_snapshot(ovs_snap_disk, snapshot.id)
        devicename = volume.display_name
        if not devicename:
            devicename = volume.name
        pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
            str(volume.host))

        LOG.info('[CLONE FROM SNAP] %s %s %s %s'
                 % (ovs_snap_disk.guid, snapshot.id, devicename, pmachineguid))
        try:
            kwargs = dict(diskguid = ovs_snap_disk.guid,
                          snapshotid = snapshot.id,
                          devicename = devicename,
                          pmachineguid = pmachineguid,
                          machinename = "",
                          machineguid=None)
            LOG.debug('[CLONE FROM SNAP] Executing clone - async')
            # Execute "clone" task async, using celery workers
            # wait for the result for 30 minutes then raise TimeoutError
            disk_meta = VDiskController.clone.apply_async(kwargs = kwargs)\
                .get(timeout = 1800)
            LOG.debug('[CLONE FROM SNAP] Executing clone - async - DONE')
            volume['provider_location'] = '{}{}'.format(
                mountpoint, disk_meta['backingdevice'])

            LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta))
            LOG.debug('[CLONE FROM SNAP] New volume %s'
                      % volume['provider_location'])
            vdisk = VDisk(disk_meta['diskguid'])
            vdisk.cinder_id = volume.id
            vdisk.name = devicename
            vdisk.save()
        except Exception as ex:
            LOG.error('CLONE FROM SNAP: Internal error %s ' % str(ex))
            self.delete_volume(volume)
            raise

        return {'provider_location': volume['provider_location'],
                'display_name': volume['display_name']}
コード例 #5
0
ファイル: helpers.py プロジェクト: yongshengma/framework
 def create_vdisks_for_mds_service(amount,
                                   start_id,
                                   mds_service=None,
                                   storagedriver=None):
     """
     Generates vdisks and appends them to a given mds_service
     """
     if (mds_service is None
             and storagedriver is None) or (mds_service is not None
                                            and storagedriver is not None):
         raise RuntimeError(
             'Either `mds_service` or `storagedriver` should be passed')
     vdisks = {}
     storagedriver_id = None
     vpool = None
     mds_services = []
     if mds_service is not None:
         mds_services.append(mds_service)
         for sd in mds_service.vpool.storagedrivers:
             if sd.storagerouter_guid == mds_service.service.storagerouter_guid:
                 storagedriver_id = sd.storagedriver_id
                 vpool = sd.vpool
         if storagedriver_id is None:
             raise RuntimeError(
                 'The given MDSService is located on a node without StorageDriver'
             )
     else:
         storagedriver_id = storagedriver.storagedriver_id
         vpool = storagedriver.vpool
     srclient = StorageRouterClient(vpool.guid, None)
     for i in xrange(start_id, start_id + amount):
         devicename = 'vdisk_{0}'.format(i)
         mds_backend_config = DalHelper.generate_mds_metadata_backend_config(
             mds_services)
         volume_id = srclient.create_volume(devicename, mds_backend_config,
                                            0, str(storagedriver_id))
         if len(mds_services) == 1:
             MDSClient.set_catchup(mds_services[0], volume_id, 50)
         vdisk = VDisk()
         vdisk.name = str(i)
         vdisk.devicename = devicename
         vdisk.volume_id = volume_id
         vdisk.vpool = vpool
         vdisk.size = 0
         vdisk.save()
         vdisk.reload_client('storagedriver')
         if mds_service is not None:
             junction = MDSServiceVDisk()
             junction.vdisk = vdisk
             junction.mds_service = mds_service
             junction.is_master = True
             junction.save()
         vdisks[i] = vdisk
     return vdisks
コード例 #6
0
ファイル: vdisk.py プロジェクト: mflu/openvstorage_centos
    def create_from_template(diskguid, machinename, devicename, pmachineguid, machineguid=None, storagedriver_guid=None):
        """
        Create a disk from a template

        @param parentdiskguid: guid of the disk
        @param location: location where virtual device should be created (eg: myVM)
        @param devicename: device file name for the disk (eg: mydisk-flat.vmdk)
        @param machineguid: guid of the machine to assign disk to
        @return diskguid: guid of new disk
        """

        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        disk_path = hypervisor.get_disk_path(machinename, devicename)

        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = [
            'description', 'size', 'type', 'retentionpolicyid',
            'snapshotpolicyid', 'vmachine', 'vpool']

        disk = VDisk(diskguid)
        if disk.vmachine and not disk.vmachine.is_vtemplate:
            # Disk might not be attached to a vmachine, but still be a template
            raise RuntimeError('The given disk does not belong to a template')

        if storagedriver_guid is not None:
            storagedriver_id = StorageDriver(storagedriver_guid).storagedriver_id
        else:
            storagedriver_id = disk.storagedriver_id

        new_disk = VDisk()
        new_disk.copy(disk, include=properties_to_clone)
        new_disk.vpool = disk.vpool
        new_disk.devicename = hypervisor.clean_backing_disk_filename(disk_path)
        new_disk.parent_vdisk = disk
        new_disk.name = '{}-clone'.format(disk.name)
        new_disk.description = description
        new_disk.vmachine = VMachine(machineguid) if machineguid else disk.vmachine
        new_disk.save()

        logger.info('Create disk from template {} to new disk {} to location {}'.format(
            disk.name, new_disk.name, disk_path
        ))
        try:
            volume_id = disk.storagedriver_client.create_clone_from_template(disk_path, str(disk.volume_id), node_id=str(storagedriver_id))
            new_disk.volume_id = volume_id
            new_disk.save()
        except Exception as ex:
            logger.error('Clone disk on volumedriver level failed with exception: {0}'.format(str(ex)))
            new_disk.delete()
            raise

        return {'diskguid': new_disk.guid, 'name': new_disk.name,
                'backingdevice': disk_path}
コード例 #7
0
ファイル: vdisk.py プロジェクト: mflu/openvstorage_centos
    def clone(diskguid,
              snapshotid,
              devicename,
              pmachineguid,
              machinename,
              machineguid=None,
              **kwargs):
        """
        Clone a disk

        @param location: location where virtual device should be created (eg: myVM)
        @param devicename: device file name for the disk (eg: mydisk-flat.vmdk)
        @param parentdiskguid: guid of the disk
        @param snapshotid: guid of the snapshot
        @param machineguid: guid of the machine to assign disk to
        """
        _ = kwargs
        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = [
            'description', 'size', 'type', 'retentionpolicyguid',
            'snapshotpolicyguid', 'autobackup'
        ]

        new_disk = VDisk()
        disk = VDisk(diskguid)
        _log = 'Clone snapshot {} of disk {} to location {}'
        _location = hypervisor.get_backing_disk_path(machinename, devicename)
        _id = '{}'.format(disk.volume_id)
        _snap = '{}'.format(snapshotid)
        logger.info(_log.format(_snap, disk.name, _location))
        volume_id = disk.storagedriver_client.create_clone(
            _location, _id, _snap)
        new_disk.copy(disk, include=properties_to_clone)
        new_disk.parent_vdisk = disk
        new_disk.name = '{}-clone'.format(disk.name)
        new_disk.description = description
        new_disk.volume_id = volume_id
        new_disk.devicename = hypervisor.clean_backing_disk_filename(_location)
        new_disk.parentsnapshot = snapshotid
        new_disk.vmachine = VMachine(
            machineguid) if machineguid else disk.vmachine
        new_disk.vpool = disk.vpool
        new_disk.save()
        return {
            'diskguid': new_disk.guid,
            'name': new_disk.name,
            'backingdevice': _location
        }
コード例 #8
0
ファイル: vdisk.py プロジェクト: BillTheBest/openvstorage
    def clone(diskguid, snapshotid, devicename, pmachineguid, machinename, machineguid=None):
        """
        Clone a disk
        """
        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        description = "{} {}".format(machinename, devicename)
        properties_to_clone = ["description", "size", "type", "retentionpolicyguid", "snapshotpolicyguid", "autobackup"]
        vdisk = VDisk(diskguid)
        location = hypervisor.get_backing_disk_path(machinename, devicename)

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = "{0}-clone".format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(location)
        new_vdisk.parentsnapshot = snapshotid
        new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.save()

        storagedriver = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id)
        if storagedriver is None:
            raise RuntimeError("Could not find StorageDriver with id {0}".format(vdisk.storagedriver_id))

        mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool)
        if mds_service is None:
            raise RuntimeError("Could not find a MDS service")

        logger.info("Clone snapshot {} of disk {} to location {}".format(snapshotid, vdisk.name, location))
        volume_id = vdisk.storagedriver_client.create_clone(
            target_path=location,
            metadata_backend_config=MDSMetaDataBackendConfig(
                [MDSNodeConfig(address=str(mds_service.service.storagerouter.ip), port=mds_service.service.ports[0])]
            ),
            parent_volume_id=str(vdisk.volume_id),
            parent_snapshot_id=str(snapshotid),
            node_id=str(vdisk.storagedriver_id),
        )
        new_vdisk.volume_id = volume_id
        new_vdisk.save()
        MDSServiceController.ensure_safety(new_vdisk)

        return {"diskguid": new_vdisk.guid, "name": new_vdisk.name, "backingdevice": location}
コード例 #9
0
ファイル: helpers.py プロジェクト: grimpy/openvstorage
 def create_vdisks_for_mds_service(amount, start_id, mds_service=None, storagedriver=None):
     """
     Generates vdisks and appends them to a given mds_service
     """
     if (mds_service is None and storagedriver is None) or (mds_service is not None and storagedriver is not None):
         raise RuntimeError("Either `mds_service` or `storagedriver` should be passed")
     vdisks = {}
     storagedriver_id = None
     vpool = None
     mds_services = []
     if mds_service is not None:
         mds_services.append(mds_service)
         for sd in mds_service.vpool.storagedrivers:
             if sd.storagerouter_guid == mds_service.service.storagerouter_guid:
                 storagedriver_id = sd.storagedriver_id
                 vpool = sd.vpool
         if storagedriver_id is None:
             raise RuntimeError("The given MDSService is located on a node without StorageDriver")
     else:
         storagedriver_id = storagedriver.storagedriver_id
         vpool = storagedriver.vpool
     srclient = StorageRouterClient(vpool.guid, None)
     for i in xrange(start_id, start_id + amount):
         devicename = "vdisk_{0}".format(i)
         mds_backend_config = Helper._generate_mdsmetadatabackendconfig(mds_services)
         volume_id = srclient.create_volume(devicename, mds_backend_config, 0, str(storagedriver_id))
         if len(mds_services) == 1:
             MDSClient.set_catchup(mds_services[0], volume_id, 50)
         vdisk = VDisk()
         vdisk.name = str(i)
         vdisk.devicename = devicename
         vdisk.volume_id = volume_id
         vdisk.vpool = vpool
         vdisk.size = 0
         vdisk.save()
         vdisk.reload_client("storagedriver")
         if mds_service is not None:
             junction = MDSServiceVDisk()
             junction.vdisk = vdisk
             junction.mds_service = mds_service
             junction.is_master = True
             junction.save()
         vdisks[i] = vdisk
     return vdisks
コード例 #10
0
 def _create_vdisks_for_mds_service(self,
                                    amount,
                                    start_id,
                                    mds_service=None,
                                    vpool=None):
     """
     Generates vdisks and appends them to a given mds_service
     """
     vdisks = {}
     for i in xrange(start_id, start_id + amount):
         disk = VDisk()
         disk.name = str(i)
         disk.devicename = 'disk_{0}'.format(i)
         disk.volume_id = 'disk_{0}'.format(i)
         disk.vpool = mds_service.vpool if mds_service is not None else vpool
         disk.size = 0
         disk.save()
         disk.reload_client()
         if mds_service is not None:
             storagedriver_id = None
             for sd in mds_service.vpool.storagedrivers:
                 if sd.storagerouter_guid == mds_service.service.storagerouter_guid:
                     storagedriver_id = sd.storagedriver_id
             junction = MDSServiceVDisk()
             junction.vdisk = disk
             junction.mds_service = mds_service
             junction.is_master = True
             junction.save()
             config = type(
                 'MDSNodeConfig', (), {
                     'address': self._generate_nc_function(
                         True, mds_service),
                     'port': self._generate_nc_function(False, mds_service)
                 })()
             mds_backend_config = type(
                 'MDSMetaDataBackendConfig', (),
                 {'node_configs': self._generate_bc_function([config])})()
             StorageDriverClient.metadata_backend_config['disk_{0}'.format(
                 i)] = mds_backend_config
             StorageDriverClient.catch_up['disk_{0}'.format(i)] = 50
             StorageDriverClient.vrouter_id['disk_{0}'.format(
                 i)] = storagedriver_id
         vdisks[i] = disk
     return vdisks
コード例 #11
0
ファイル: vdisk.py プロジェクト: mflu/openvstorage_centos
    def clone(diskguid, snapshotid, devicename, pmachineguid, machinename, machineguid=None, **kwargs):
        """
        Clone a disk

        @param location: location where virtual device should be created (eg: myVM)
        @param devicename: device file name for the disk (eg: mydisk-flat.vmdk)
        @param parentdiskguid: guid of the disk
        @param snapshotid: guid of the snapshot
        @param machineguid: guid of the machine to assign disk to
        """
        _ = kwargs
        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = ['description', 'size', 'type', 'retentionpolicyguid',
                               'snapshotpolicyguid', 'autobackup']

        new_disk = VDisk()
        disk = VDisk(diskguid)
        _log = 'Clone snapshot {} of disk {} to location {}'
        _location = hypervisor.get_backing_disk_path(machinename, devicename)
        _id = '{}'.format(disk.volume_id)
        _snap = '{}'.format(snapshotid)
        logger.info(_log.format(_snap, disk.name, _location))
        volume_id = disk.storagedriver_client.create_clone(_location, _id, _snap)
        new_disk.copy(disk, include=properties_to_clone)
        new_disk.parent_vdisk = disk
        new_disk.name = '{}-clone'.format(disk.name)
        new_disk.description = description
        new_disk.volume_id = volume_id
        new_disk.devicename = hypervisor.clean_backing_disk_filename(_location)
        new_disk.parentsnapshot = snapshotid
        new_disk.vmachine = VMachine(machineguid) if machineguid else disk.vmachine
        new_disk.vpool = disk.vpool
        new_disk.save()
        return {'diskguid': new_disk.guid,
                'name': new_disk.name,
                'backingdevice': _location}
コード例 #12
0
ファイル: helpers.py プロジェクト: grimpy/openvstorage
    def build_service_structure(structure, previous_structure=None):
        """
        Builds an MDS service structure
        Example:
            structure = Helper.build_service_structure(
                {'vpools': [1],
                 'domains': [],
                 'storagerouters': [1],
                 'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
                 'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
                 'storagerouter_domains': []}  # (<id>, <storagerouter_id>, <domain_id>)
            )
        """
        if previous_structure is None:
            previous_structure = {}
        vdisks = previous_structure.get("vdisks", {})
        vpools = previous_structure.get("vpools", {})
        domains = previous_structure.get("domains", {})
        services = previous_structure.get("services", {})
        mds_services = previous_structure.get("mds_services", {})
        storagerouters = previous_structure.get("storagerouters", {})
        storagedrivers = previous_structure.get("storagedrivers", {})
        storagerouter_domains = previous_structure.get("storagerouter_domains", {})

        service_type = ServiceTypeList.get_by_name("MetadataServer")
        if service_type is None:
            service_type = ServiceType()
            service_type.name = "MetadataServer"
            service_type.save()
        srclients = {}
        for domain_id in structure.get("domains", []):
            if domain_id not in domains:
                domain = Domain()
                domain.name = "domain_{0}".format(domain_id)
                domain.save()
                domains[domain_id] = domain
        for vpool_id in structure.get("vpools", []):
            if vpool_id not in vpools:
                vpool = VPool()
                vpool.name = str(vpool_id)
                vpool.status = "RUNNING"
                vpool.save()
                vpools[vpool_id] = vpool
            else:
                vpool = vpools[vpool_id]
            srclients[vpool_id] = StorageRouterClient(vpool.guid, None)
        for sr_id in structure.get("storagerouters", []):
            if sr_id not in storagerouters:
                storagerouter = StorageRouter()
                storagerouter.name = str(sr_id)
                storagerouter.ip = "10.0.0.{0}".format(sr_id)
                storagerouter.rdma_capable = False
                storagerouter.node_type = "MASTER"
                storagerouter.machine_id = str(sr_id)
                storagerouter.save()
                storagerouters[sr_id] = storagerouter
                disk = Disk()
                disk.storagerouter = storagerouter
                disk.state = "OK"
                disk.name = "/dev/uda"
                disk.size = 1 * 1024 ** 4
                disk.is_ssd = True
                disk.aliases = ["/dev/uda"]
                disk.save()
                partition = DiskPartition()
                partition.offset = 0
                partition.size = disk.size
                partition.aliases = ["/dev/uda-1"]
                partition.state = "OK"
                partition.mountpoint = "/tmp/unittest/sr_{0}/disk_1/partition_1".format(sr_id)
                partition.disk = disk
                partition.roles = [DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB]
                partition.save()
        for sd_id, vpool_id, sr_id in structure.get("storagedrivers", ()):
            if sd_id not in storagedrivers:
                storagedriver = StorageDriver()
                storagedriver.vpool = vpools[vpool_id]
                storagedriver.storagerouter = storagerouters[sr_id]
                storagedriver.name = str(sd_id)
                storagedriver.mountpoint = "/"
                storagedriver.cluster_ip = storagerouters[sr_id].ip
                storagedriver.storage_ip = "10.0.1.{0}".format(sr_id)
                storagedriver.storagedriver_id = str(sd_id)
                storagedriver.ports = {"management": 1, "xmlrpc": 2, "dtl": 3, "edge": 4}
                storagedriver.save()
                storagedrivers[sd_id] = storagedriver
                Helper._set_vpool_storage_driver_configuration(vpool=vpools[vpool_id], storagedriver=storagedriver)
        for mds_id, sd_id in structure.get("mds_services", ()):
            if mds_id not in mds_services:
                sd = storagedrivers[sd_id]
                s_id = "{0}-{1}".format(sd.storagerouter.name, mds_id)
                service = Service()
                service.name = s_id
                service.storagerouter = sd.storagerouter
                service.ports = [mds_id]
                service.type = service_type
                service.save()
                services[s_id] = service
                mds_service = MDSService()
                mds_service.service = service
                mds_service.number = 0
                mds_service.capacity = 10
                mds_service.vpool = sd.vpool
                mds_service.save()
                mds_services[mds_id] = mds_service
                StorageDriverController.add_storagedriverpartition(
                    sd,
                    {
                        "size": None,
                        "role": DiskPartition.ROLES.DB,
                        "sub_role": StorageDriverPartition.SUBROLE.MDS,
                        "partition": sd.storagerouter.disks[0].partitions[0],
                        "mds_service": mds_service,
                    },
                )
        for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get("vdisks", ()):
            if vdisk_id not in vdisks:
                vpool = vpools[vpool_id]
                devicename = "vdisk_{0}".format(vdisk_id)
                mds_backend_config = Helper._generate_mdsmetadatabackendconfig(
                    [] if mds_id is None else [mds_services[mds_id]]
                )
                volume_id = srclients[vpool_id].create_volume(devicename, mds_backend_config, 0, str(storage_driver_id))
                vdisk = VDisk()
                vdisk.name = str(vdisk_id)
                vdisk.devicename = devicename
                vdisk.volume_id = volume_id
                vdisk.vpool = vpool
                vdisk.size = 0
                vdisk.save()
                vdisk.reload_client("storagedriver")
                vdisks[vdisk_id] = vdisk
        for srd_id, sr_id, domain_id, backup in structure.get("storagerouter_domains", ()):
            if srd_id not in storagerouter_domains:
                sr_domain = StorageRouterDomain()
                sr_domain.backup = backup
                sr_domain.domain = domains[domain_id]
                sr_domain.storagerouter = storagerouters[sr_id]
                sr_domain.save()
                storagerouter_domains[srd_id] = sr_domain
        return {
            "vdisks": vdisks,
            "vpools": vpools,
            "domains": domains,
            "services": services,
            "service_type": service_type,
            "mds_services": mds_services,
            "storagerouters": storagerouters,
            "storagedrivers": storagedrivers,
            "storagerouter_domains": storagerouter_domains,
        }
コード例 #13
0
ファイル: helpers.py プロジェクト: yongshengma/framework
    def build_dal_structure(structure, previous_structure=None):
        """
        Builds a model structure
        Example:
            structure = DalHelper.build_service_structure(
                {'vpools': [1],
                 'domains': [],
                 'storagerouters': [1],
                 'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
                 'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
                 'storagerouter_domains': []}  # (<id>, <storagerouter_id>, <domain_id>)
            )
        """
        Configuration.set(key=Configuration.EDITION_KEY,
                          value=PackageFactory.EDITION_ENTERPRISE)

        if previous_structure is None:
            previous_structure = {}
        vdisks = previous_structure.get('vdisks', {})
        vpools = previous_structure.get('vpools', {})
        domains = previous_structure.get('domains', {})
        services = previous_structure.get('services', {})
        mds_services = previous_structure.get('mds_services', {})
        storagerouters = previous_structure.get('storagerouters', {})
        storagedrivers = previous_structure.get('storagedrivers', {})
        storagerouter_domains = previous_structure.get('storagerouter_domains',
                                                       {})

        service_types = {}
        for service_type_name in ServiceType.SERVICE_TYPES.values():
            service_type = ServiceTypeList.get_by_name(service_type_name)
            if service_type is None:
                service_type = ServiceType()
                service_type.name = service_type_name
                service_type.save()
            service_types[service_type_name] = service_type
        srclients = {}
        for domain_id in structure.get('domains', []):
            if domain_id not in domains:
                domain = Domain()
                domain.name = 'domain_{0}'.format(domain_id)
                domain.save()
                domains[domain_id] = domain
        for vpool_id in structure.get('vpools', []):
            if vpool_id not in vpools:
                vpool = VPool()
                vpool.name = str(vpool_id)
                vpool.status = 'RUNNING'
                vpool.metadata = {'backend': {}, 'caching_info': {}}
                vpool.metadata_store_bits = 5
                vpool.save()
                vpools[vpool_id] = vpool
            else:
                vpool = vpools[vpool_id]
            srclients[vpool_id] = StorageRouterClient(vpool.guid, None)
            Configuration.set(
                '/ovs/vpools/{0}/mds_config|mds_tlogs'.format(vpool.guid), 100)
            Configuration.set(
                '/ovs/vpools/{0}/mds_config|mds_safety'.format(vpool.guid), 2)
            Configuration.set(
                '/ovs/vpools/{0}/mds_config|mds_maxload'.format(vpool.guid),
                75)
            Configuration.set(
                '/ovs/vpools/{0}/proxies/scrub/generic_scrub'.format(
                    vpool.guid),
                json.dumps({}, indent=4),
                raw=True)
        for sr_id in structure.get('storagerouters', []):
            if sr_id not in storagerouters:
                storagerouter = StorageRouter()
                storagerouter.name = str(sr_id)
                storagerouter.ip = '10.0.0.{0}'.format(sr_id)
                storagerouter.rdma_capable = False
                storagerouter.node_type = 'MASTER'
                storagerouter.machine_id = str(sr_id)
                storagerouter.save()
                storagerouters[sr_id] = storagerouter
                disk = Disk()
                disk.storagerouter = storagerouter
                disk.state = 'OK'
                disk.name = '/dev/uda'
                disk.size = 1 * 1024**4
                disk.is_ssd = True
                disk.aliases = ['/dev/uda']
                disk.save()
                partition = DiskPartition()
                partition.offset = 0
                partition.size = disk.size
                partition.aliases = ['/dev/uda-1']
                partition.state = 'OK'
                partition.mountpoint = '/tmp/unittest/sr_{0}/disk_1/partition_1'.format(
                    sr_id)
                partition.disk = disk
                partition.roles = [
                    DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB
                ]
                partition.save()
            else:
                storagerouter = storagerouters[sr_id]

            # noinspection PyProtectedMember
            System._machine_id[storagerouter.ip] = str(sr_id)
            mds_start = 10000 + 100 * (sr_id - 1)
            mds_end = 10000 + 100 * sr_id - 1
            arakoon_start = 20000 + 100 * (sr_id - 1)
            storagedriver_start = 30000 + 100 * (sr_id - 1)
            storagedriver_end = 30000 + 100 * sr_id - 1
            Configuration.initialize_host(
                host_id=sr_id,
                port_info={
                    'mds': [mds_start, mds_end],
                    'arakoon': arakoon_start,
                    'storagedriver': [storagedriver_start, storagedriver_end]
                })

        for sd_id, vpool_id, sr_id in structure.get('storagedrivers', ()):
            if sd_id not in storagedrivers:
                storagedriver = StorageDriver()
                storagedriver.vpool = vpools[vpool_id]
                storagedriver.storagerouter = storagerouters[sr_id]
                storagedriver.name = str(sd_id)
                storagedriver.mountpoint = '/'
                storagedriver.cluster_ip = storagerouters[sr_id].ip
                storagedriver.storage_ip = '10.0.1.{0}'.format(sr_id)
                storagedriver.storagedriver_id = str(sd_id)
                storagedriver.ports = {
                    'management': 1,
                    'xmlrpc': 2,
                    'dtl': 3,
                    'edge': 4
                }
                storagedriver.save()
                storagedrivers[sd_id] = storagedriver
                DalHelper.set_vpool_storage_driver_configuration(
                    vpool=vpools[vpool_id], storagedriver=storagedriver)
        for mds_id, sd_id in structure.get('mds_services', ()):
            if mds_id not in mds_services:
                sd = storagedrivers[sd_id]
                s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id)
                service = Service()
                service.name = s_id
                service.storagerouter = sd.storagerouter
                service.ports = [mds_id]
                service.type = service_types['MetadataServer']
                service.save()
                services[s_id] = service
                mds_service = MDSService()
                mds_service.service = service
                mds_service.number = 0
                mds_service.capacity = 10
                mds_service.vpool = sd.vpool
                mds_service.save()
                mds_services[mds_id] = mds_service
                StorageDriverController.add_storagedriverpartition(
                    sd, {
                        'size': None,
                        'role': DiskPartition.ROLES.DB,
                        'sub_role': StorageDriverPartition.SUBROLE.MDS,
                        'partition': sd.storagerouter.disks[0].partitions[0],
                        'mds_service': mds_service
                    })
        for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get(
                'vdisks', ()):
            if vdisk_id not in vdisks:
                vpool = vpools[vpool_id]
                devicename = 'vdisk_{0}'.format(vdisk_id)
                mds_backend_config = DalHelper.generate_mds_metadata_backend_config(
                    [] if mds_id is None else [mds_services[mds_id]])
                volume_id = srclients[vpool_id].create_volume(
                    devicename, mds_backend_config, 0, str(storage_driver_id))
                vdisk = VDisk()
                vdisk.name = str(vdisk_id)
                vdisk.devicename = devicename
                vdisk.volume_id = volume_id
                vdisk.vpool = vpool
                vdisk.size = 0
                vdisk.save()
                vdisk.reload_client('storagedriver')
                vdisks[vdisk_id] = vdisk
        for srd_id, sr_id, domain_id, backup in structure.get(
                'storagerouter_domains', ()):
            if srd_id not in storagerouter_domains:
                sr_domain = StorageRouterDomain()
                sr_domain.backup = backup
                sr_domain.domain = domains[domain_id]
                sr_domain.storagerouter = storagerouters[sr_id]
                sr_domain.save()
                storagerouter_domains[srd_id] = sr_domain
        return {
            'vdisks': vdisks,
            'vpools': vpools,
            'domains': domains,
            'services': services,
            'mds_services': mds_services,
            'service_types': service_types,
            'storagerouters': storagerouters,
            'storagedrivers': storagedrivers,
            'storagerouter_domains': storagerouter_domains
        }
コード例 #14
0
ファイル: vdisk.py プロジェクト: JasperLue/openvstorage
    def clone(diskguid, snapshotid, devicename, pmachineguid, machinename=None, machineguid=None, detached=False):
        """
        Clone a disk
        """
        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        if machinename is None:
            description = devicename
        else:
            description = '{0} {1}'.format(machinename, devicename)
        properties_to_clone = ['description', 'size', 'type', 'retentionpolicyguid',
                               'snapshotpolicyguid', 'autobackup']
        vdisk = VDisk(diskguid)
        location = hypervisor.get_backing_disk_path(machinename, devicename)

        if machineguid is not None and detached is True:
            raise ValueError('A vMachine GUID was specified while detached is True')

        if snapshotid is None:
            # Create a new snapshot
            timestamp = str(int(time.time()))
            metadata = {'label': '',
                        'is_consistent': False,
                        'timestamp': timestamp,
                        'machineguid': machineguid,
                        'is_automatic': True}
            VDiskController.create_snapshot(diskguid, metadata)
            tries = 25  # About 5 minutes
            while snapshotid is None and tries > 0:
                tries -= 1
                time.sleep(25 - tries)
                vdisk.invalidate_dynamics(['snapshots'])
                snapshots = [snapshot for snapshot in vdisk.snapshots
                             if snapshot['in_backend'] is True and snapshot['timestamp'] == timestamp]
                if len(snapshots) == 1:
                    snapshotid = snapshots[0]['guid']
            if snapshotid is None:
                raise RuntimeError('Could not find created snapshot in time')

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = '{0}-clone'.format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(location)
        new_vdisk.parentsnapshot = snapshotid
        if detached is False:
            new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.save()

        try:
            storagedriver = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id)
            if storagedriver is None:
                raise RuntimeError('Could not find StorageDriver with id {0}'.format(vdisk.storagedriver_id))

            mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool)
            if mds_service is None:
                raise RuntimeError('Could not find a MDS service')

            logger.info('Clone snapshot {0} of disk {1} to location {2}'.format(snapshotid, vdisk.name, location))
            volume_id = vdisk.storagedriver_client.create_clone(
                target_path=location,
                metadata_backend_config=MDSMetaDataBackendConfig([MDSNodeConfig(address=str(mds_service.service.storagerouter.ip),
                                                                                port=mds_service.service.ports[0])]),
                parent_volume_id=str(vdisk.volume_id),
                parent_snapshot_id=str(snapshotid),
                node_id=str(vdisk.storagedriver_id)
            )
        except Exception as ex:
            logger.error('Caught exception during clone, trying to delete the volume. {0}'.format(ex))
            new_vdisk.delete()
            VDiskController.delete_volume(location)
            raise

        new_vdisk.volume_id = volume_id
        new_vdisk.save()

        try:
            MDSServiceController.ensure_safety(new_vdisk)
        except Exception as ex:
            logger.error('Caught exception during "ensure_safety" {0}'.format(ex))

        return {'diskguid': new_vdisk.guid,
                'name': new_vdisk.name,
                'backingdevice': location}
コード例 #15
0
    def create_from_template(diskguid,
                             machinename,
                             devicename,
                             pmachineguid,
                             machineguid=None,
                             storagedriver_guid=None):
        """
        Create a disk from a template

        @param devicename: device file name for the disk (eg: mydisk-flat.vmdk)
        @param machineguid: guid of the machine to assign disk to
        @return diskguid: guid of new disk
        """

        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        disk_path = hypervisor.get_disk_path(machinename, devicename)

        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = [
            'description', 'size', 'type', 'retentionpolicyid',
            'snapshotpolicyid', 'vmachine', 'vpool'
        ]

        vdisk = VDisk(diskguid)
        if vdisk.vmachine and not vdisk.vmachine.is_vtemplate:
            # Disk might not be attached to a vmachine, but still be a template
            raise RuntimeError('The given vdisk does not belong to a template')

        if storagedriver_guid is not None:
            storagedriver_id = StorageDriver(
                storagedriver_guid).storagedriver_id
        else:
            storagedriver_id = vdisk.storagedriver_id
        storagedriver = StorageDriverList.get_by_storagedriver_id(
            storagedriver_id)
        if storagedriver is None:
            raise RuntimeError(
                'Could not find StorageDriver with id {0}'.format(
                    storagedriver_id))

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(
            disk_path)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = '{}-clone'.format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.vmachine = VMachine(
            machineguid) if machineguid else vdisk.vmachine
        new_vdisk.save()

        mds_service = MDSServiceController.get_preferred_mds(
            storagedriver.storagerouter, vdisk.vpool)
        if mds_service is None:
            raise RuntimeError('Could not find a MDS service')

        logger.info(
            'Create disk from template {} to new disk {} to location {}'.
            format(vdisk.name, new_vdisk.name, disk_path))
        try:
            volume_id = vdisk.storagedriver_client.create_clone_from_template(
                target_path=disk_path,
                metadata_backend_config=MDSMetaDataBackendConfig([
                    MDSNodeConfig(address=str(
                        mds_service.service.storagerouter.ip),
                                  port=mds_service.service.ports[0])
                ]),
                parent_volume_id=str(vdisk.volume_id),
                node_id=str(storagedriver_id))
            new_vdisk.volume_id = volume_id
            new_vdisk.save()
            MDSServiceController.ensure_safety(new_vdisk)

        except Exception as ex:
            logger.error(
                'Clone disk on volumedriver level failed with exception: {0}'.
                format(str(ex)))
            new_vdisk.delete()
            raise

        return {
            'diskguid': new_vdisk.guid,
            'name': new_vdisk.name,
            'backingdevice': disk_path
        }
コード例 #16
0
    def create_cloned_volume(self, volume, src_vref):
        """Create a cloned volume from another volume.
        Called on "cinder create --source-volid ... "

        :param volume: volume reference - target volume (sqlalchemy Model)
        :param src_vref: volume reference - source volume (sqlalchemy Model)

        OVS: Create clone from template if the source is a template
             Create volume from snapshot if the source is a volume
             - create snapshot of source volume if it doesn't have snapshots
        """
        _debug_vol_info('CREATE_CLONED_VOL', volume)
        _debug_vol_info('CREATE_CLONED_VOL Source', src_vref)

        mountpoint = self._get_hostname_mountpoint(str(volume.host))
        name = volume.display_name
        if not name:
            name = volume.name
            volume.display_name = volume.name

        pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
            str(volume.host))

        #source
        source_ovs_disk = self._find_ovs_model_disk_by_location(
            str(src_vref.provider_location), src_vref.host)
        if source_ovs_disk.info['object_type'] == 'TEMPLATE':
            LOG.info('[CREATE_FROM_TEMPLATE] VDisk %s is a template'
                     % source_ovs_disk.devicename)

            # cloning from a template
            LOG.debug('[CREATE FROM TEMPLATE] ovs_disk %s '
                      % (source_ovs_disk.devicename))

            disk_meta = VDiskController.create_from_template(
                diskguid = source_ovs_disk.guid,
                machinename = "",
                devicename = str(name),
                pmachineguid = pmachineguid,
                machineguid = None,
                storagedriver_guid = None)
            volume['provider_location'] = '{}{}'.format(
                mountpoint, disk_meta['backingdevice'])
            LOG.debug('[CREATE FROM TEMPLATE] New volume %s'
                      % volume['provider_location'])
            vdisk = VDisk(disk_meta['diskguid'])
            vdisk.cinder_id = volume.id
            vdisk.name = name
            LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s'
                      % (volume.id, name))
            vdisk.save()
        else:
            LOG.info('[THIN CLONE] VDisk %s is not a template'
                     % source_ovs_disk.devicename)
            # We do not support yet full volume clone
            # - requires "emancipate" functionality
            # So for now we'll take a snapshot
            # (or the latest snapshot existing) and clone from that snapshot
            available_snapshots = [snapshot for snapshot in source_ovs_disk.snapshots
                                   if 'in_backend' not in snapshot or snapshot['in_backend'] is True]
            if len(available_snapshots) == 0:
                metadata = {'label': "Cinder clone snapshot {0}".format(name),
                            'is_consistent': False,
                            'timestamp': time.time(),
                            'machineguid': source_ovs_disk.vmachine_guid,
                            'is_automatic': False}

                LOG.debug('CREATE_SNAP %s %s' % (name, str(metadata)))
                snapshotid = VDiskController.create_snapshot(
                    diskguid = source_ovs_disk.guid,
                    metadata = metadata,
                    snapshotid = None)
                LOG.debug('CREATE_SNAP OK')

                OVSVolumeDriver._wait_for_snapshot(source_ovs_disk, snapshotid)
            else:
                snapshotid = available_snapshots[-1]['guid']
            LOG.debug('[CREATE CLONE FROM SNAP] %s ' % snapshotid)

            disk_meta = VDiskController.clone(diskguid = source_ovs_disk.guid,
                                              snapshotid = snapshotid,
                                              devicename = str(name),
                                              pmachineguid = pmachineguid,
                                              machinename = "",
                                              machineguid=None)
            volume['provider_location'] = '{}{}'.format(
                mountpoint, disk_meta['backingdevice'])

            LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta))
            LOG.debug('[CLONE FROM SNAP] New volume %s'
                      % volume['provider_location'])
            vdisk = VDisk(disk_meta['diskguid'])
            vdisk.cinder_id = volume.id
            vdisk.name = name
            vdisk.save()
        return {'provider_location': volume['provider_location'],
                'display_name': volume['display_name']}
コード例 #17
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistents
        every now an then. The delelete policy is exectued every day
        """
        # Setup
        # There are 2 machines; one with two disks, one with one disk and an additional disk
        backend_type = BackendType()
        backend_type.name = 'BackendType'
        backend_type.code = 'BT'
        backend_type.save()
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = 'PMachine'
        pmachine.username = '******'
        pmachine.ip = '127.0.0.1'
        pmachine.hvtype = 'VMWARE'
        pmachine.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = pmachine
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        vdisk_1_2 = VDisk()
        vdisk_1_2.name = 'vdisk_1_2'
        vdisk_1_2.volume_id = 'vdisk_1_2'
        vdisk_1_2.vmachine = vmachine_1
        vdisk_1_2.vpool = vpool
        vdisk_1_2.devicename = 'dummy'
        vdisk_1_2.size = 0
        vdisk_1_2.save()
        vdisk_1_2.reload_client()
        vmachine_2 = VMachine()
        vmachine_2.name = 'vmachine_2'
        vmachine_2.devicename = 'dummy'
        vmachine_2.pmachine = pmachine
        vmachine_2.save()
        vdisk_2_1 = VDisk()
        vdisk_2_1.name = 'vdisk_2_1'
        vdisk_2_1.volume_id = 'vdisk_2_1'
        vdisk_2_1.vmachine = vmachine_2
        vdisk_2_1.vpool = vpool
        vdisk_2_1.devicename = 'dummy'
        vdisk_2_1.size = 0
        vdisk_2_1.save()
        vdisk_2_1.reload_client()
        vdisk_3 = VDisk()
        vdisk_3.name = 'vdisk_3'
        vdisk_3.volume_id = 'vdisk_3'
        vdisk_3.vpool = vpool
        vdisk_3.devicename = 'dummy'
        vdisk_3.size = 0
        vdisk_3.save()
        vdisk_3.reload_client()

        for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
            [dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        # Run the testing scenario
        debug = True
        amount_of_days = 50
        base = datetime.now().date()
        day = timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = DeleteSnapshots._make_timestamp(base, day * d)
            print ''
            print 'Day cycle: {}: {}'.format(
                d, datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d')
            )

            # At the start of the day, delete snapshot policy runs at 00:30
            print '- Deleting snapshots'
            ScheduledTaskController.deletescrubsnapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            print '- Validating snapshots'
            for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
                self._validate(vdisk, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            print '- Creating snapshots'
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                for vm in [vmachine_1, vmachine_2]:
                    VMachineController.snapshot(machineguid=vm.guid,
                                                label='ss_i_{0}:00'.format(str(h)),
                                                is_consistent=False,
                                                timestamp=timestamp)
                    if h in [6, 12, 18]:
                        ts = (timestamp + (minute * 30))
                        VMachineController.snapshot(machineguid=vm.guid,
                                                    label='ss_c_{0}:30'.format(str(h)),
                                                    is_consistent=True,
                                                    timestamp=ts)

                VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                metadata={'label': 'ss_i_{0}:00'.format(str(h)),
                                                          'is_consistent': False,
                                                          'timestamp': str(timestamp),
                                                          'machineguid': None})
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                    metadata={'label': 'ss_c_{0}:30'.format(str(h)),
                                                              'is_consistent': True,
                                                              'timestamp': str(ts),
                                                              'machineguid': None})
コード例 #18
0
ファイル: vmachine.py プロジェクト: DarumasLegs/framework
    def update_vmachine_config(vmachine, vm_object, pmachine=None):
        """
        Update a vMachine configuration with a given vMachine configuration
        :param vmachine: Virtual Machine to update
        :param vm_object: New virtual machine info
        :param pmachine: Physical machine of the virtual machine
        """
        try:
            vdisks_synced = 0
            if vmachine.name is None:
                MessageController.fire(MessageController.Type.EVENT,
                                       {'type': 'vmachine_created',
                                        'metadata': {'name': vm_object['name']}})
            elif vmachine.name != vm_object['name']:
                MessageController.fire(MessageController.Type.EVENT,
                                       {'type': 'vmachine_renamed',
                                        'metadata': {'old_name': vmachine.name,
                                                     'new_name': vm_object['name']}})
            if pmachine is not None:
                vmachine.pmachine = pmachine
            vmachine.name = vm_object['name']
            vmachine.hypervisor_id = vm_object['id']
            vmachine.devicename = vm_object['backing']['filename']
            vmachine.save()
            # Updating and linking disks
            storagedrivers = StorageDriverList.get_storagedrivers()
            datastores = dict([('{0}:{1}'.format(storagedriver.storage_ip, storagedriver.mountpoint), storagedriver) for storagedriver in storagedrivers])
            vdisk_guids = []
            mutex = volatile_mutex('{0}_{1}'.format(vmachine.name, vmachine.devicename))
            for disk in vm_object['disks']:
                ensure_safety = False
                if disk['datastore'] in vm_object['datastores']:
                    datastore = vm_object['datastores'][disk['datastore']]
                    if datastore in datastores:
                        try:
                            mutex.acquire(wait=10)
                            vdisk = VDiskList.get_by_devicename_and_vpool(disk['filename'], datastores[datastore].vpool)
                            if vdisk is None:
                                # The disk couldn't be located, but is in our datastore. We might be in a recovery scenario
                                vdisk = VDisk()
                                vdisk.vpool = datastores[datastore].vpool
                                vdisk.reload_client()
                                vdisk.devicename = disk['filename']
                                vdisk.volume_id = vdisk.storagedriver_client.get_volume_id(str(disk['backingfilename']))
                                vdisk.size = vdisk.info['volume_size']
                                vdisk.metadata = {'lba_size': vdisk.info['lba_size'],
                                                  'cluster_multiplier': vdisk.info['cluster_multiplier']}
                                # Create the disk in a locked context, but don't execute long running-task in same context
                                vdisk.save()
                                ensure_safety = True
                        finally:
                            mutex.release()
                        if ensure_safety:
                            MDSServiceController.ensure_safety(vdisk)
                            VDiskController.dtl_checkup(vdisk_guid=vdisk.guid)

                        # Update the disk with information from the hypervisor
                        if vdisk.vmachine is None:
                            MessageController.fire(MessageController.Type.EVENT,
                                                   {'type': 'vdisk_attached',
                                                    'metadata': {'vmachine_name': vmachine.name,
                                                                 'vdisk_name': disk['name']}})
                        vdisk.vmachine = vmachine
                        vdisk.name = disk['name']
                        vdisk.order = disk['order']
                        vdisk.save()
                        vdisk_guids.append(vdisk.guid)
                        vdisks_synced += 1

            for vdisk in vmachine.vdisks:
                if vdisk.guid not in vdisk_guids:
                    MessageController.fire(MessageController.Type.EVENT,
                                           {'type': 'vdisk_detached',
                                            'metadata': {'vmachine_name': vmachine.name,
                                                         'vdisk_name': vdisk.name}})
                    vdisk.vmachine = None
                    vdisk.save()

            VMachineController._logger.info('Updating vMachine finished (name {0}, {1} vdisks (re)linked)'.format(
                vmachine.name, vdisks_synced
            ))
        except Exception as ex:
            VMachineController._logger.info('Error during vMachine update: {0}'.format(str(ex)))
            raise
コード例 #19
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now an then. The delete policy is executed every day
        """
        # Setup
        # There are 2 machines; one with two disks, one with one disk and a stand-alone additional disk
        failure_domain = FailureDomain()
        failure_domain.name = 'Test'
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = 'BackendType'
        backend_type.code = 'BT'
        backend_type.save()
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = 'PMachine'
        pmachine.username = '******'
        pmachine.ip = '127.0.0.1'
        pmachine.hvtype = 'VMWARE'
        pmachine.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.path = '/dev/non-existent'
        disk.size = 500 * 1024 ** 3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.id = 'disk_partition_id'
        disk_partition.disk = disk
        disk_partition.path = '/dev/disk/non-existent'
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = pmachine
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        vdisk_1_2 = VDisk()
        vdisk_1_2.name = 'vdisk_1_2'
        vdisk_1_2.volume_id = 'vdisk_1_2'
        vdisk_1_2.vmachine = vmachine_1
        vdisk_1_2.vpool = vpool
        vdisk_1_2.devicename = 'dummy'
        vdisk_1_2.size = 0
        vdisk_1_2.save()
        vdisk_1_2.reload_client()
        vmachine_2 = VMachine()
        vmachine_2.name = 'vmachine_2'
        vmachine_2.devicename = 'dummy'
        vmachine_2.pmachine = pmachine
        vmachine_2.save()
        vdisk_2_1 = VDisk()
        vdisk_2_1.name = 'vdisk_2_1'
        vdisk_2_1.volume_id = 'vdisk_2_1'
        vdisk_2_1.vmachine = vmachine_2
        vdisk_2_1.vpool = vpool
        vdisk_2_1.devicename = 'dummy'
        vdisk_2_1.size = 0
        vdisk_2_1.save()
        vdisk_2_1.reload_client()
        vdisk_3 = VDisk()
        vdisk_3.name = 'vdisk_3'
        vdisk_3.volume_id = 'vdisk_3'
        vdisk_3.vpool = vpool
        vdisk_3.devicename = 'dummy'
        vdisk_3.size = 0
        vdisk_3.save()
        vdisk_3.reload_client()

        for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
            [dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        # Run the testing scenario
        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            print 'Running in Travis, reducing output.'
        debug = not travis
        amount_of_days = 50
        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = self._make_timestamp(base, day * d)
            print ''
            print 'Day cycle: {0}: {1}'.format(d, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d'))

            # At the start of the day, delete snapshot policy runs at 00:30
            print '- Deleting snapshots'
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            print '- Validating snapshots'
            for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
                self._validate(vdisk, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            print '- Creating snapshots'
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                for vm in [vmachine_1, vmachine_2]:
                    VMachineController.snapshot(machineguid=vm.guid,
                                                label='ss_i_{0}:00'.format(str(h)),
                                                is_consistent=False,
                                                timestamp=timestamp)
                    if h in [6, 12, 18]:
                        ts = (timestamp + (minute * 30))
                        VMachineController.snapshot(machineguid=vm.guid,
                                                    label='ss_c_{0}:30'.format(str(h)),
                                                    is_consistent=True,
                                                    timestamp=ts)

                VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                metadata={'label': 'ss_i_{0}:00'.format(str(h)),
                                                          'is_consistent': False,
                                                          'timestamp': str(timestamp),
                                                          'machineguid': None})
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                    metadata={'label': 'ss_c_{0}:30'.format(str(h)),
                                                              'is_consistent': True,
                                                              'timestamp': str(ts),
                                                              'machineguid': None})
コード例 #20
0
ファイル: vmachine.py プロジェクト: th3architect/framework
    def update_vmachine_config(vmachine, vm_object, pmachine=None):
        """
        Update a vMachine configuration with a given vMachine configuration
        :param vmachine: Virtual Machine to update
        :param vm_object: New virtual machine info
        :param pmachine: Physical machine of the virtual machine
        """
        try:
            vdisks_synced = 0
            if vmachine.name is None:
                MessageController.fire(MessageController.Type.EVENT,
                                       {'type': 'vmachine_created',
                                        'metadata': {'name': vm_object['name']}})
            elif vmachine.name != vm_object['name']:
                MessageController.fire(MessageController.Type.EVENT,
                                       {'type': 'vmachine_renamed',
                                        'metadata': {'old_name': vmachine.name,
                                                     'new_name': vm_object['name']}})
            if pmachine is not None:
                vmachine.pmachine = pmachine
            vmachine.name = vm_object['name']
            vmachine.hypervisor_id = vm_object['id']
            vmachine.devicename = vm_object['backing']['filename']
            vmachine.save()
            # Updating and linking disks
            storagedrivers = StorageDriverList.get_storagedrivers()
            datastores = dict([('{0}:{1}'.format(storagedriver.storage_ip, storagedriver.mountpoint), storagedriver) for storagedriver in storagedrivers])
            vdisk_guids = []
            mutex = volatile_mutex('{0}_{1}'.format(vmachine.name, vmachine.devicename))
            for disk in vm_object['disks']:
                ensure_safety = False
                if disk['datastore'] in vm_object['datastores']:
                    datastore = vm_object['datastores'][disk['datastore']]
                    if datastore in datastores:
                        try:
                            mutex.acquire(wait=10)
                            vdisk = VDiskList.get_by_devicename_and_vpool(disk['filename'], datastores[datastore].vpool)
                            if vdisk is None:
                                # The disk couldn't be located, but is in our datastore. We might be in a recovery scenario
                                vdisk = VDisk()
                                vdisk.vpool = datastores[datastore].vpool
                                vdisk.reload_client()
                                vdisk.devicename = disk['filename']
                                vdisk.volume_id = vdisk.storagedriver_client.get_volume_id(str(disk['backingfilename']))
                                vdisk.size = vdisk.info['volume_size']
                                vdisk.metadata = {'lba_size': vdisk.info['lba_size'],
                                                  'cluster_multiplier': vdisk.info['cluster_multiplier']}
                                # Create the disk in a locked context, but don't execute long running-task in same context
                                vdisk.save()
                                ensure_safety = True
                        finally:
                            mutex.release()
                        if ensure_safety:
                            MDSServiceController.ensure_safety(vdisk)
                            VDiskController.dtl_checkup(vdisk_guid=vdisk.guid)

                        # Update the disk with information from the hypervisor
                        if vdisk.vmachine is None:
                            MessageController.fire(MessageController.Type.EVENT,
                                                   {'type': 'vdisk_attached',
                                                    'metadata': {'vmachine_name': vmachine.name,
                                                                 'vdisk_name': disk['name']}})
                        vdisk.vmachine = vmachine
                        vdisk.name = disk['name']
                        vdisk.order = disk['order']
                        vdisk.save()
                        vdisk_guids.append(vdisk.guid)
                        vdisks_synced += 1

            for vdisk in vmachine.vdisks:
                if vdisk.guid not in vdisk_guids:
                    MessageController.fire(MessageController.Type.EVENT,
                                           {'type': 'vdisk_detached',
                                            'metadata': {'vmachine_name': vmachine.name,
                                                         'vdisk_name': vdisk.name}})
                    vdisk.vmachine = None
                    vdisk.save()

            VMachineController._logger.info('Updating vMachine finished (name {0}, {1} vdisks (re)linked)'.format(
                vmachine.name, vdisks_synced
            ))
        except Exception as ex:
            VMachineController._logger.info('Error during vMachine update: {0}'.format(str(ex)))
            raise
コード例 #21
0
ファイル: vmachine.py プロジェクト: mflu/openvstorage_centos
    def update_vmachine_config(vmachine, vm_object, pmachine=None):
        """
        Update a vMachine configuration with a given vMachine configuration
        """
        try:
            vdisks_synced = 0
            if vmachine.name is None:
                MessageController.fire(MessageController.Type.EVENT,
                                       {'type': 'vmachine_created',
                                        'metadata': {'name': vm_object['name']}})
            elif vmachine.name != vm_object['name']:
                MessageController.fire(MessageController.Type.EVENT,
                                       {'type': 'vmachine_renamed',
                                        'metadata': {'old_name': vmachine.name,
                                                     'new_name': vm_object['name']}})
            if pmachine is not None:
                vmachine.pmachine = pmachine
            vmachine.name = vm_object['name']
            vmachine.hypervisor_id = vm_object['id']
            vmachine.devicename = vm_object['backing']['filename']
            vmachine.save()
            # Updating and linking disks
            storagedrivers = StorageDriverList.get_storagedrivers()
            datastores = dict([('{}:{}'.format(storagedriver.storage_ip, storagedriver.mountpoint), storagedriver) for storagedriver in storagedrivers])
            vdisk_guids = []
            for disk in vm_object['disks']:
                if disk['datastore'] in vm_object['datastores']:
                    datastore = vm_object['datastores'][disk['datastore']]
                    if datastore in datastores:
                        vdisk = VDiskList.get_by_devicename_and_vpool(disk['filename'], datastores[datastore].vpool)
                        if vdisk is None:
                            # The disk couldn't be located, but is in our datastore. We might be in a recovery scenario
                            vdisk = VDisk()
                            vdisk.vpool = datastores[datastore].vpool
                            vdisk.reload_client()
                            vdisk.devicename = disk['filename']
                            vdisk.volume_id = vdisk.storagedriver_client.get_volume_id(str(disk['backingfilename']))
                            vdisk.size = vdisk.info['volume_size']
                        # Update the disk with information from the hypervisor
                        if vdisk.vmachine is None:
                            MessageController.fire(MessageController.Type.EVENT,
                                                   {'type': 'vdisk_attached',
                                                    'metadata': {'vmachine_name': vmachine.name,
                                                                 'vdisk_name': disk['name']}})
                        vdisk.vmachine = vmachine
                        vdisk.name = disk['name']
                        vdisk.order = disk['order']
                        vdisk.save()
                        vdisk_guids.append(vdisk.guid)
                        vdisks_synced += 1

            for vdisk in vmachine.vdisks:
                if vdisk.guid not in vdisk_guids:
                    MessageController.fire(MessageController.Type.EVENT,
                                           {'type': 'vdisk_detached',
                                            'metadata': {'vmachine_name': vmachine.name,
                                                         'vdisk_name': vdisk.name}})
                    vdisk.vmachine = None
                    vdisk.save()

            logger.info('Updating vMachine finished (name {}, {} vdisks (re)linked)'.format(
                vmachine.name, vdisks_synced
            ))
        except Exception as ex:
            logger.info('Error during vMachine update: {0}'.format(str(ex)))
            raise
コード例 #22
0
    def build_service_structure(structure, previous_structure=None):
        """
        Builds an MDS service structure
        Example:
            structure = Helper.build_service_structure(
                {'vpools': [1],
                 'domains': [],
                 'storagerouters': [1],
                 'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
                 'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
                 'storagerouter_domains': []}  # (<id>, <storagerouter_id>, <domain_id>)
            )
        """
        if previous_structure is None:
            previous_structure = {}
        vdisks = previous_structure.get('vdisks', {})
        vpools = previous_structure.get('vpools', {})
        domains = previous_structure.get('domains', {})
        services = previous_structure.get('services', {})
        mds_services = previous_structure.get('mds_services', {})
        storagerouters = previous_structure.get('storagerouters', {})
        storagedrivers = previous_structure.get('storagedrivers', {})
        storagerouter_domains = previous_structure.get('storagerouter_domains',
                                                       {})

        service_type = ServiceTypeList.get_by_name('MetadataServer')
        if service_type is None:
            service_type = ServiceType()
            service_type.name = 'MetadataServer'
            service_type.save()
        srclients = {}
        for domain_id in structure.get('domains', []):
            if domain_id not in domains:
                domain = Domain()
                domain.name = 'domain_{0}'.format(domain_id)
                domain.save()
                domains[domain_id] = domain
        for vpool_id in structure.get('vpools', []):
            if vpool_id not in vpools:
                vpool = VPool()
                vpool.name = str(vpool_id)
                vpool.status = 'RUNNING'
                vpool.save()
                vpools[vpool_id] = vpool
            else:
                vpool = vpools[vpool_id]
            srclients[vpool_id] = StorageRouterClient(vpool.guid, None)
        for sr_id in structure.get('storagerouters', []):
            if sr_id not in storagerouters:
                storagerouter = StorageRouter()
                storagerouter.name = str(sr_id)
                storagerouter.ip = '10.0.0.{0}'.format(sr_id)
                storagerouter.rdma_capable = False
                storagerouter.node_type = 'MASTER'
                storagerouter.machine_id = str(sr_id)
                storagerouter.save()
                storagerouters[sr_id] = storagerouter
                disk = Disk()
                disk.storagerouter = storagerouter
                disk.state = 'OK'
                disk.name = '/dev/uda'
                disk.size = 1 * 1024**4
                disk.is_ssd = True
                disk.aliases = ['/dev/uda']
                disk.save()
                partition = DiskPartition()
                partition.offset = 0
                partition.size = disk.size
                partition.aliases = ['/dev/uda-1']
                partition.state = 'OK'
                partition.mountpoint = '/tmp/unittest/sr_{0}/disk_1/partition_1'.format(
                    sr_id)
                partition.disk = disk
                partition.roles = [
                    DiskPartition.ROLES.DB, DiskPartition.ROLES.SCRUB
                ]
                partition.save()
        for sd_id, vpool_id, sr_id in structure.get('storagedrivers', ()):
            if sd_id not in storagedrivers:
                storagedriver = StorageDriver()
                storagedriver.vpool = vpools[vpool_id]
                storagedriver.storagerouter = storagerouters[sr_id]
                storagedriver.name = str(sd_id)
                storagedriver.mountpoint = '/'
                storagedriver.cluster_ip = storagerouters[sr_id].ip
                storagedriver.storage_ip = '10.0.1.{0}'.format(sr_id)
                storagedriver.storagedriver_id = str(sd_id)
                storagedriver.ports = {
                    'management': 1,
                    'xmlrpc': 2,
                    'dtl': 3,
                    'edge': 4
                }
                storagedriver.save()
                storagedrivers[sd_id] = storagedriver
                Helper._set_vpool_storage_driver_configuration(
                    vpool=vpools[vpool_id], storagedriver=storagedriver)
        for mds_id, sd_id in structure.get('mds_services', ()):
            if mds_id not in mds_services:
                sd = storagedrivers[sd_id]
                s_id = '{0}-{1}'.format(sd.storagerouter.name, mds_id)
                service = Service()
                service.name = s_id
                service.storagerouter = sd.storagerouter
                service.ports = [mds_id]
                service.type = service_type
                service.save()
                services[s_id] = service
                mds_service = MDSService()
                mds_service.service = service
                mds_service.number = 0
                mds_service.capacity = 10
                mds_service.vpool = sd.vpool
                mds_service.save()
                mds_services[mds_id] = mds_service
                StorageDriverController.add_storagedriverpartition(
                    sd, {
                        'size': None,
                        'role': DiskPartition.ROLES.DB,
                        'sub_role': StorageDriverPartition.SUBROLE.MDS,
                        'partition': sd.storagerouter.disks[0].partitions[0],
                        'mds_service': mds_service
                    })
        for vdisk_id, storage_driver_id, vpool_id, mds_id in structure.get(
                'vdisks', ()):
            if vdisk_id not in vdisks:
                vpool = vpools[vpool_id]
                devicename = 'vdisk_{0}'.format(vdisk_id)
                mds_backend_config = Helper._generate_mdsmetadatabackendconfig(
                    [] if mds_id is None else [mds_services[mds_id]])
                volume_id = srclients[vpool_id].create_volume(
                    devicename, mds_backend_config, 0, str(storage_driver_id))
                vdisk = VDisk()
                vdisk.name = str(vdisk_id)
                vdisk.devicename = devicename
                vdisk.volume_id = volume_id
                vdisk.vpool = vpool
                vdisk.size = 0
                vdisk.save()
                vdisk.reload_client('storagedriver')
                vdisks[vdisk_id] = vdisk
        for srd_id, sr_id, domain_id, backup in structure.get(
                'storagerouter_domains', ()):
            if srd_id not in storagerouter_domains:
                sr_domain = StorageRouterDomain()
                sr_domain.backup = backup
                sr_domain.domain = domains[domain_id]
                sr_domain.storagerouter = storagerouters[sr_id]
                sr_domain.save()
                storagerouter_domains[srd_id] = sr_domain
        return {
            'vdisks': vdisks,
            'vpools': vpools,
            'domains': domains,
            'services': services,
            'service_type': service_type,
            'mds_services': mds_services,
            'storagerouters': storagerouters,
            'storagedrivers': storagedrivers,
            'storagerouter_domains': storagerouter_domains
        }
コード例 #23
0
    def test_sync(self):
        """
        Validates whether the sync works as expected
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1],
            'storagerouters': [1],
            'storagedrivers':
            [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vpool = structure['vpools'][1]
        storagedriver = structure['storagedrivers'][1]
        mds_service = structure['mds_services'][1]
        # noinspection PyArgumentList
        backend_config = MDSMetaDataBackendConfig([
            MDSNodeConfig(address=str(mds_service.service.storagerouter.ip),
                          port=mds_service.service.ports[0])
        ])
        srclient = StorageRouterClient(vpool.guid, None)

        VDiskController.create_new('one', 1024**3, storagedriver.guid)
        VDiskController.create_new('two', 1024**3, storagedriver.guid)

        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 2)
        self.assertEqual(len(srclient.list_volumes()), 2)

        VDiskController.sync_with_reality()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 2)
        self.assertEqual(len(srclient.list_volumes()), 2)

        volume_id = srclient.create_volume('/three.raw', backend_config,
                                           1024**3,
                                           storagedriver.storagedriver_id)

        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 2)
        self.assertEqual(len(srclient.list_volumes()), 3)

        VDiskController.sync_with_reality()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 3)
        self.assertEqual(len(srclient.list_volumes()), 3)

        vdisk = VDiskList.get_vdisk_by_volume_id(volume_id)
        self.assertEqual(vdisk.devicename, '/three.raw')

        vdisk = VDisk()
        vdisk.volume_id = 'foo'
        vdisk.name = 'foo'
        vdisk.devicename = 'foo.raw'
        vdisk.size = 1024**3
        vdisk.vpool = vpool
        vdisk.save()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 4)
        self.assertEqual(len(srclient.list_volumes()), 3)

        VDiskController.sync_with_reality()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 3)
        self.assertEqual(len(srclient.list_volumes()), 3)

        with self.assertRaises(ObjectNotFoundException):
            vdisk.save()
コード例 #24
0
    def create_cloned_volume(self, volume, src_vref):
        """Create a cloned volume from another volume.
        Called on "cinder create --source-volid ... "

        :param volume: volume reference - target volume (sqlalchemy Model)
        :param src_vref: volume reference - source volume (sqlalchemy Model)

        OVS: Create clone from template if the source is a template
             Create volume from snapshot if the source is a volume
             - create snapshot of source volume if it doesn't have snapshots
        """
        _debug_vol_info('CREATE_CLONED_VOL', volume)
        _debug_vol_info('CREATE_CLONED_VOL Source', src_vref)

        mountpoint = self._get_hostname_mountpoint(str(volume.host))
        name = volume.display_name
        if not name:
            name = volume.name
            volume.display_name = volume.name

        pmachineguid = self._find_ovs_model_pmachine_guid_by_hostname(
            str(volume.host))

        #source
        source_ovs_disk = self._find_ovs_model_disk_by_location(
            str(src_vref.provider_location), src_vref.host)
        if source_ovs_disk.info['object_type'] == 'TEMPLATE':
            LOG.info('[CREATE_FROM_TEMPLATE] VDisk %s is a template' %
                     source_ovs_disk.devicename)

            # cloning from a template
            LOG.debug('[CREATE FROM TEMPLATE] ovs_disk %s ' %
                      (source_ovs_disk.devicename))

            disk_meta = VDiskController.create_from_template(
                diskguid=source_ovs_disk.guid,
                machinename="",
                devicename=str(name),
                pmachineguid=pmachineguid,
                machineguid=None,
                storagedriver_guid=None)
            volume['provider_location'] = '{}{}'.format(
                mountpoint, disk_meta['backingdevice'])
            LOG.debug('[CREATE FROM TEMPLATE] New volume %s' %
                      volume['provider_location'])
            vdisk = VDisk(disk_meta['diskguid'])
            vdisk.cinder_id = volume.id
            vdisk.name = name
            LOG.debug('[CREATE FROM TEMPLATE] Updating meta %s %s' %
                      (volume.id, name))
            vdisk.save()
        else:
            LOG.info('[THIN CLONE] VDisk %s is not a template' %
                     source_ovs_disk.devicename)
            # We do not support yet full volume clone
            # - requires "emancipate" functionality
            # So for now we'll take a snapshot
            # (or the latest snapshot existing) and clone from that snapshot
            available_snapshots = [
                snapshot for snapshot in source_ovs_disk.snapshots if
                'in_backend' not in snapshot or snapshot['in_backend'] is True
            ]
            if len(available_snapshots) == 0:
                metadata = {
                    'label': "Cinder clone snapshot {0}".format(name),
                    'is_consistent': False,
                    'timestamp': time.time(),
                    'machineguid': source_ovs_disk.vmachine_guid,
                    'is_automatic': False
                }

                LOG.debug('CREATE_SNAP %s %s' % (name, str(metadata)))
                snapshotid = VDiskController.create_snapshot(
                    diskguid=source_ovs_disk.guid,
                    metadata=metadata,
                    snapshotid=None)
                LOG.debug('CREATE_SNAP OK')

                OVSVolumeDriver._wait_for_snapshot(source_ovs_disk, snapshotid)
            else:
                snapshotid = available_snapshots[-1]['guid']
            LOG.debug('[CREATE CLONE FROM SNAP] %s ' % snapshotid)

            disk_meta = VDiskController.clone(diskguid=source_ovs_disk.guid,
                                              snapshotid=snapshotid,
                                              devicename=str(name),
                                              pmachineguid=pmachineguid,
                                              machinename="",
                                              machineguid=None)
            volume['provider_location'] = '{}{}'.format(
                mountpoint, disk_meta['backingdevice'])

            LOG.debug('[CLONE FROM SNAP] Meta: %s' % str(disk_meta))
            LOG.debug('[CLONE FROM SNAP] New volume %s' %
                      volume['provider_location'])
            vdisk = VDisk(disk_meta['diskguid'])
            vdisk.cinder_id = volume.id
            vdisk.name = name
            vdisk.save()
        return {
            'provider_location': volume['provider_location'],
            'display_name': volume['display_name']
        }
コード例 #25
0
ファイル: vdisk.py プロジェクト: BillTheBest/openvstorage
    def create_from_template(
        diskguid, machinename, devicename, pmachineguid, machineguid=None, storagedriver_guid=None
    ):
        """
        Create a disk from a template

        @param parentdiskguid: guid of the disk
        @param location: location where virtual device should be created (eg: myVM)
        @param devicename: device file name for the disk (eg: mydisk-flat.vmdk)
        @param machineguid: guid of the machine to assign disk to
        @return diskguid: guid of new disk
        """

        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        disk_path = hypervisor.get_disk_path(machinename, devicename)

        description = "{} {}".format(machinename, devicename)
        properties_to_clone = [
            "description",
            "size",
            "type",
            "retentionpolicyid",
            "snapshotpolicyid",
            "vmachine",
            "vpool",
        ]

        vdisk = VDisk(diskguid)
        if vdisk.vmachine and not vdisk.vmachine.is_vtemplate:
            # Disk might not be attached to a vmachine, but still be a template
            raise RuntimeError("The given vdisk does not belong to a template")

        if storagedriver_guid is not None:
            storagedriver_id = StorageDriver(storagedriver_guid).storagedriver_id
        else:
            storagedriver_id = vdisk.storagedriver_id
        storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
        if storagedriver is None:
            raise RuntimeError("Could not find StorageDriver with id {0}".format(storagedriver_id))

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(disk_path)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = "{}-clone".format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine
        new_vdisk.save()

        mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool)
        if mds_service is None:
            raise RuntimeError("Could not find a MDS service")

        logger.info(
            "Create disk from template {} to new disk {} to location {}".format(vdisk.name, new_vdisk.name, disk_path)
        )
        try:
            volume_id = vdisk.storagedriver_client.create_clone_from_template(
                target_path=disk_path,
                metadata_backend_config=MDSMetaDataBackendConfig(
                    [
                        MDSNodeConfig(
                            address=str(mds_service.service.storagerouter.ip), port=mds_service.service.ports[0]
                        )
                    ]
                ),
                parent_volume_id=str(vdisk.volume_id),
                node_id=str(storagedriver_id),
            )
            new_vdisk.volume_id = volume_id
            new_vdisk.save()
            MDSServiceController.ensure_safety(new_vdisk)

        except Exception as ex:
            logger.error("Clone disk on volumedriver level failed with exception: {0}".format(str(ex)))
            new_vdisk.delete()
            raise

        # Allow "regular" users to use this volume
        # Do not use run for other user than ovs as it blocks asking for root password
        # Do not use run_local for other user as it doesn't have permission
        # So this method only works if this is called by root or ovs
        storagerouter = StorageRouter(new_vdisk.storagerouter_guid)
        mountpoint = storagedriver.mountpoint
        location = "{0}{1}".format(mountpoint, disk_path)
        client = SSHClient.load(storagerouter.pmachine.ip)
        print(client.run('chmod 664 "{0}"'.format(location)))
        print(client.run('chown ovs:ovs "{0}"'.format(location)))
        return {"diskguid": new_vdisk.guid, "name": new_vdisk.name, "backingdevice": disk_path}
コード例 #26
0
ファイル: vmachine.py プロジェクト: teotikalki/openvstorage
    def update_vmachine_config(vmachine, vm_object, pmachine=None):
        """
        Update a vMachine configuration with a given vMachine configuration
        """
        try:
            vdisks_synced = 0
            if vmachine.name is None:
                MessageController.fire(
                    MessageController.Type.EVENT, {
                        'type': 'vmachine_created',
                        'metadata': {
                            'name': vm_object['name']
                        }
                    })
            elif vmachine.name != vm_object['name']:
                MessageController.fire(
                    MessageController.Type.EVENT, {
                        'type': 'vmachine_renamed',
                        'metadata': {
                            'old_name': vmachine.name,
                            'new_name': vm_object['name']
                        }
                    })
            if pmachine is not None:
                vmachine.pmachine = pmachine
            vmachine.name = vm_object['name']
            vmachine.hypervisor_id = vm_object['id']
            vmachine.devicename = vm_object['backing']['filename']
            vmachine.save()
            # Updating and linking disks
            storagedrivers = StorageDriverList.get_storagedrivers()
            datastores = dict([('{}:{}'.format(storagedriver.storage_ip,
                                               storagedriver.mountpoint),
                                storagedriver)
                               for storagedriver in storagedrivers])
            vdisk_guids = []
            for disk in vm_object['disks']:
                if disk['datastore'] in vm_object['datastores']:
                    datastore = vm_object['datastores'][disk['datastore']]
                    if datastore in datastores:
                        vdisk = VDiskList.get_by_devicename_and_vpool(
                            disk['filename'], datastores[datastore].vpool)
                        if vdisk is None:
                            # The disk couldn't be located, but is in our datastore. We might be in a recovery scenario
                            vdisk = VDisk()
                            vdisk.vpool = datastores[datastore].vpool
                            vdisk.reload_client()
                            vdisk.devicename = disk['filename']
                            vdisk.volume_id = vdisk.storagedriver_client.get_volume_id(
                                str(disk['backingfilename']))
                            vdisk.size = vdisk.info['volume_size']
                            MDSServiceController.ensure_safety(vdisk)
                        # Update the disk with information from the hypervisor
                        if vdisk.vmachine is None:
                            MessageController.fire(
                                MessageController.Type.EVENT, {
                                    'type': 'vdisk_attached',
                                    'metadata': {
                                        'vmachine_name': vmachine.name,
                                        'vdisk_name': disk['name']
                                    }
                                })
                        vdisk.vmachine = vmachine
                        vdisk.name = disk['name']
                        vdisk.order = disk['order']
                        vdisk.save()
                        vdisk_guids.append(vdisk.guid)
                        vdisks_synced += 1

            for vdisk in vmachine.vdisks:
                if vdisk.guid not in vdisk_guids:
                    MessageController.fire(
                        MessageController.Type.EVENT, {
                            'type': 'vdisk_detached',
                            'metadata': {
                                'vmachine_name': vmachine.name,
                                'vdisk_name': vdisk.name
                            }
                        })
                    vdisk.vmachine = None
                    vdisk.save()

            logger.info(
                'Updating vMachine finished (name {}, {} vdisks (re)linked)'.
                format(vmachine.name, vdisks_synced))
        except Exception as ex:
            logger.info('Error during vMachine update: {0}'.format(str(ex)))
            raise
コード例 #27
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now an then. The delete policy is executed every day
        """
        # Setup
        # There are 2 machines; one with two disks, one with one disk and an additional disk
        failure_domain = FailureDomain()
        failure_domain.name = "Test"
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = "BackendType"
        backend_type.code = "BT"
        backend_type.save()
        vpool = VPool()
        vpool.name = "vpool"
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = "PMachine"
        pmachine.username = "******"
        pmachine.ip = "127.0.0.1"
        pmachine.hvtype = "VMWARE"
        pmachine.save()
        storage_router = StorageRouter()
        storage_router.name = "storage_router"
        storage_router.ip = "127.0.0.1"
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        disk = Disk()
        disk.name = "physical_disk_1"
        disk.path = "/dev/non-existent"
        disk.size = 500 * 1024 ** 3
        disk.state = "OK"
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.id = "disk_partition_id"
        disk_partition.disk = disk
        disk_partition.path = "/dev/disk/non-existent"
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = "OK"
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = "/var/tmp"
        disk_partition.save()
        vmachine_1 = VMachine()
        vmachine_1.name = "vmachine_1"
        vmachine_1.devicename = "dummy"
        vmachine_1.pmachine = pmachine
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = "vdisk_1_1"
        vdisk_1_1.volume_id = "vdisk_1_1"
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = "dummy"
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        vdisk_1_2 = VDisk()
        vdisk_1_2.name = "vdisk_1_2"
        vdisk_1_2.volume_id = "vdisk_1_2"
        vdisk_1_2.vmachine = vmachine_1
        vdisk_1_2.vpool = vpool
        vdisk_1_2.devicename = "dummy"
        vdisk_1_2.size = 0
        vdisk_1_2.save()
        vdisk_1_2.reload_client()
        vmachine_2 = VMachine()
        vmachine_2.name = "vmachine_2"
        vmachine_2.devicename = "dummy"
        vmachine_2.pmachine = pmachine
        vmachine_2.save()
        vdisk_2_1 = VDisk()
        vdisk_2_1.name = "vdisk_2_1"
        vdisk_2_1.volume_id = "vdisk_2_1"
        vdisk_2_1.vmachine = vmachine_2
        vdisk_2_1.vpool = vpool
        vdisk_2_1.devicename = "dummy"
        vdisk_2_1.size = 0
        vdisk_2_1.save()
        vdisk_2_1.reload_client()
        vdisk_3 = VDisk()
        vdisk_3.name = "vdisk_3"
        vdisk_3.volume_id = "vdisk_3"
        vdisk_3.vpool = vpool
        vdisk_3.devicename = "dummy"
        vdisk_3.size = 0
        vdisk_3.save()
        vdisk_3.reload_client()

        for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
            [dynamic for dynamic in disk._dynamics if dynamic.name == "snapshots"][0].timeout = 0

        # Run the testing scenario
        debug = True
        amount_of_days = 50
        base = datetime.now().date()
        day = timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = DeleteSnapshots._make_timestamp(base, day * d)
            print ""
            print "Day cycle: {0}: {1}".format(d, datetime.fromtimestamp(base_timestamp).strftime("%Y-%m-%d"))

            # At the start of the day, delete snapshot policy runs at 00:30
            print "- Deleting snapshots"
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            print "- Validating snapshots"
            for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
                self._validate(vdisk, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            print "- Creating snapshots"
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                for vm in [vmachine_1, vmachine_2]:
                    VMachineController.snapshot(
                        machineguid=vm.guid,
                        label="ss_i_{0}:00".format(str(h)),
                        is_consistent=False,
                        timestamp=timestamp,
                    )
                    if h in [6, 12, 18]:
                        ts = timestamp + (minute * 30)
                        VMachineController.snapshot(
                            machineguid=vm.guid, label="ss_c_{0}:30".format(str(h)), is_consistent=True, timestamp=ts
                        )

                VDiskController.create_snapshot(
                    diskguid=vdisk_3.guid,
                    metadata={
                        "label": "ss_i_{0}:00".format(str(h)),
                        "is_consistent": False,
                        "timestamp": str(timestamp),
                        "machineguid": None,
                    },
                )
                if h in [6, 12, 18]:
                    ts = timestamp + (minute * 30)
                    VDiskController.create_snapshot(
                        diskguid=vdisk_3.guid,
                        metadata={
                            "label": "ss_c_{0}:30".format(str(h)),
                            "is_consistent": True,
                            "timestamp": str(ts),
                            "machineguid": None,
                        },
                    )
コード例 #28
0
ファイル: vdisk.py プロジェクト: jamie-liu/openvstorage
    def create_from_template(diskguid, devicename, pmachineguid, machinename='', machineguid=None):
        """
        Create a disk from a template

        :param diskguid: Guid of the disk
        :param machinename: Name of the machine
        :param devicename: Device file name for the disk (eg: my_disk-flat.vmdk)
        :param pmachineguid: Guid of pmachine to create new vdisk on
        :param machineguid: Guid of the machine to assign disk to
        :return diskguid: Guid of new disk
        """
        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        if machineguid is not None:
            new_vdisk_vmachine = VMachine(machineguid)
            machinename = new_vdisk_vmachine.name
        disk_path = hypervisor.get_disk_path(machinename, devicename)

        description = '{0} {1}'.format(machinename, devicename)
        properties_to_clone = [
            'description', 'size', 'type', 'retentionpolicyid',
            'snapshotpolicyid', 'vmachine', 'vpool']

        vdisk = VDisk(diskguid)
        if vdisk.vmachine and not vdisk.vmachine.is_vtemplate:
            # Disk might not be attached to a vmachine, but still be a template
            raise RuntimeError('The given vdisk does not belong to a template')
        if not vdisk.is_vtemplate:
            raise RuntimeError('The given vdisk is not a template')

        storagedriver = None
        for sd in vdisk.vpool.storagedrivers:
            if sd.storagerouter_guid in pmachine.storagerouters_guids:
                storagedriver = sd
                break

        if storagedriver is None:
            raise RuntimeError('Could not find Storage Driver')

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(disk_path)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = '{0}-clone'.format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.vmachine = new_vdisk_vmachine if machineguid else vdisk.vmachine
        new_vdisk.save()

        mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, new_vdisk.vpool)
        if mds_service is None:
            raise RuntimeError('Could not find a MDS service')

        logger.info('Create disk from template {0} to new disk {1} to location {2}'.format(vdisk.name, new_vdisk.name, disk_path))

        try:
            backend_config = MDSNodeConfig(address=str(mds_service.service.storagerouter.ip),
                                           port=mds_service.service.ports[0])
            volume_id = vdisk.storagedriver_client.create_clone_from_template(target_path=disk_path,
                                                                              metadata_backend_config=MDSMetaDataBackendConfig([backend_config]),
                                                                              parent_volume_id=str(vdisk.volume_id),
                                                                              node_id=str(storagedriver.storagedriver_id))
            new_vdisk.volume_id = volume_id
            new_vdisk.save()
            MDSServiceController.ensure_safety(new_vdisk)
            VDiskController.dtl_checkup.delay(vdisk_guid=new_vdisk.guid)
        except Exception as ex:
            logger.error('Clone disk on volumedriver level failed with exception: {0}'.format(str(ex)))
            try:
                VDiskController.clean_bad_disk(new_vdisk.guid)
            except Exception as ex2:
                logger.exception('Exception during exception handling of "create_clone_from_template" : {0}'.format(str(ex2)))
            raise ex

        return {'diskguid': new_vdisk.guid, 'name': new_vdisk.name,
                'backingdevice': disk_path}
コード例 #29
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistents
        every now an then. The delelete policy is exectued every day
        """
        # Setup
        # There are 2 machines; one with two disks, one with one disk and an additional disk
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.backend_type = BackendType()
        vpool.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = PMachine()
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        vdisk_1_2 = VDisk()
        vdisk_1_2.name = 'vdisk_1_2'
        vdisk_1_2.volume_id = 'vdisk_1_2'
        vdisk_1_2.vmachine = vmachine_1
        vdisk_1_2.vpool = vpool
        vdisk_1_2.devicename = 'dummy'
        vdisk_1_2.size = 0
        vdisk_1_2.save()
        vdisk_1_2.reload_client()
        vmachine_2 = VMachine()
        vmachine_2.name = 'vmachine_2'
        vmachine_2.devicename = 'dummy'
        vmachine_2.pmachine = PMachine()
        vmachine_2.save()
        vdisk_2_1 = VDisk()
        vdisk_2_1.name = 'vdisk_2_1'
        vdisk_2_1.volume_id = 'vdisk_2_1'
        vdisk_2_1.vmachine = vmachine_2
        vdisk_2_1.vpool = vpool
        vdisk_2_1.devicename = 'dummy'
        vdisk_2_1.size = 0
        vdisk_2_1.save()
        vdisk_2_1.reload_client()
        vdisk_3 = VDisk()
        vdisk_3.name = 'vdisk_3'
        vdisk_3.volume_id = 'vdisk_3'
        vdisk_3.vpool = vpool
        vdisk_3.devicename = 'dummy'
        vdisk_3.size = 0
        vdisk_3.save()
        vdisk_3.reload_client()

        for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
            [dynamic for dynamic in disk._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        # Run the testing scenario
        debug = True
        amount_of_days = 50
        now = int(mktime(datetime.now().date().timetuple()))  # Last night
        minute = 60
        hour = minute * 60
        day = hour * 24
        for d in xrange(0, amount_of_days):
            base_timestamp = now + (day * d)
            print ''
            print 'Day cycle: {}: {}'.format(
                d,
                datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d')
            )

            # At the start of the day, delete snapshot policy runs at 00:30
            print '- Deleting snapshots'
            ScheduledTaskController.deletescrubsnapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            print '- Validating snapshots'
            for vdisk in [vdisk_3]:  # [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
                self._validate(vdisk, d, now, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            print '- Creating snapshots'
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                for vm in [vmachine_1, vmachine_2]:
                    VMachineController.snapshot(machineguid=vm.guid,
                                                label='ss_i_{}:00'.format(str(h)),
                                                is_consistent=False,
                                                timestamp=timestamp)
                    if h in [6, 12, 18]:
                        ts = (timestamp + (minute * 30))
                        VMachineController.snapshot(machineguid=vm.guid,
                                                    label='ss_c_{}:30'.format(str(h)),
                                                    is_consistent=True,
                                                    timestamp=ts)

                VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                metadata={'label': 'ss_i_{}:00'.format(str(h)),
                                                          'is_consistent': False,
                                                          'timestamp': timestamp,
                                                          'machineguid': None})
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                    metadata={'label': 'ss_c_{}:30'.format(str(h)),
                                                              'is_consistent': True,
                                                              'timestamp': ts,
                                                              'machineguid': None})
コード例 #30
0
    def _prepare(self):
        # Setup
        failure_domain = FailureDomain()
        failure_domain.name = 'Test'
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = 'BackendType'
        backend_type.code = 'BT'
        backend_type.save()
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = 'PMachine'
        pmachine.username = '******'
        pmachine.ip = '127.0.0.1'
        pmachine.hvtype = 'KVM'
        pmachine.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = pmachine
        vmachine_1.is_vtemplate = True
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        storagedriver = StorageDriver()
        storagedriver.vpool = vpool
        storagedriver.storagerouter = storage_router
        storagedriver.name = '1'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storage_router.ip
        storagedriver.storage_ip = '127.0.0.1'
        storagedriver.storagedriver_id = '1'
        storagedriver.ports = [1, 2, 3]
        storagedriver.save()
        service_type = ServiceType()
        service_type.name = 'MetadataServer'
        service_type.save()
        s_id = '{0}-{1}'.format(storagedriver.storagerouter.name, '1')
        service = Service()
        service.name = s_id
        service.storagerouter = storagedriver.storagerouter
        service.ports = [1]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storagedriver.vpool
        mds_service.save()

        def ensure_safety(vdisk):
            pass

        class Dtl_Checkup():
            @staticmethod
            def delay(vpool_guid=None,
                      vdisk_guid=None,
                      storagerouters_to_exclude=None):
                pass

        MDSServiceController.ensure_safety = staticmethod(ensure_safety)
        VDiskController.dtl_checkup = Dtl_Checkup
        return vdisk_1_1, pmachine
コード例 #31
0
    def test_clone_snapshot(self):
        """
        Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted
        """
        # Setup
        # There are 2 disks, second one cloned from a snapshot of the first
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.aliases = ['/dev/non-existent']
        disk.size = 500 * 1024**3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.disk = disk
        disk_partition.aliases = ['/dev/disk/non-existent']
        disk_partition.size = 400 * 1024**3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        storage_driver = StorageDriver()
        storage_driver.vpool = vpool
        storage_driver.storagerouter = storage_router
        storage_driver.name = 'storage_driver_1'
        storage_driver.mountpoint = '/'
        storage_driver.cluster_ip = storage_router.ip
        storage_driver.storage_ip = '127.0.0.1'
        storage_driver.storagedriver_id = 'storage_driver_1'
        storage_driver.ports = {
            'management': 1,
            'xmlrpc': 2,
            'dtl': 3,
            'edge': 4
        }
        storage_driver.save()
        service_type = ServiceType()
        service_type.name = 'MetadataServer'
        service_type.save()
        service = Service()
        service.name = 'service_1'
        service.storagerouter = storage_driver.storagerouter
        service.ports = [1]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storage_driver.vpool
        mds_service.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client('storagedriver')

        [
            dynamic for dynamic in vdisk_1_1._dynamics
            if dynamic.name == 'snapshots'
        ][0].timeout = 0

        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            print 'Running in Travis, reducing output.'

        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        base_timestamp = self._make_timestamp(base, day)
        minute = 60
        hour = minute * 60
        for h in [6, 12, 18]:
            timestamp = base_timestamp + (hour * h)
            VDiskController.create_snapshot(vdisk_guid=vdisk_1_1.guid,
                                            metadata={
                                                'label':
                                                'snapshot_{0}:30'.format(
                                                    str(h)),
                                                'is_consistent':
                                                True,
                                                'timestamp':
                                                str(timestamp),
                                                'machineguid':
                                                None
                                            })

        base_snapshot_guid = vdisk_1_1.snapshots[0]['guid']  # Oldest
        clone_vdisk = VDisk()
        clone_vdisk.name = 'clone_vdisk'
        clone_vdisk.volume_id = 'clone_vdisk'
        clone_vdisk.vpool = vpool
        clone_vdisk.devicename = 'dummy'
        clone_vdisk.parentsnapshot = base_snapshot_guid
        clone_vdisk.size = 0
        clone_vdisk.save()
        clone_vdisk.reload_client('storagedriver')

        for h in [6, 12, 18]:
            timestamp = base_timestamp + (hour * h)
            VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid,
                                            metadata={
                                                'label':
                                                'snapshot_{0}:30'.format(
                                                    str(h)),
                                                'is_consistent':
                                                True,
                                                'timestamp':
                                                str(timestamp),
                                                'machineguid':
                                                None
                                            })

        base_timestamp = self._make_timestamp(base, day * 2)
        ScheduledTaskController.delete_snapshots(timestamp=base_timestamp +
                                                 (minute * 30))
        self.assertIn(
            base_snapshot_guid, [snap['guid'] for snap in vdisk_1_1.snapshots],
            'Snapshot was deleted while there are still clones of it')
コード例 #32
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now and then. The delete policy is executed every day
        """
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.aliases = ['/dev/non-existent']
        disk.size = 500 * 1024**3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.disk = disk
        disk_partition.aliases = ['/dev/disk/non-existent']
        disk_partition.size = 400 * 1024**3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        vdisk_1 = VDisk()
        vdisk_1.name = 'vdisk_1'
        vdisk_1.volume_id = 'vdisk_1'
        vdisk_1.vpool = vpool
        vdisk_1.devicename = 'dummy'
        vdisk_1.size = 0
        vdisk_1.save()
        vdisk_1.reload_client('storagedriver')

        [
            dynamic for dynamic in vdisk_1._dynamics
            if dynamic.name == 'snapshots'
        ][0].timeout = 0

        # Run the testing scenario
        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            self._print_message('Running in Travis, reducing output.')
        debug = not travis
        amount_of_days = 50
        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = self._make_timestamp(base, day * d)
            self._print_message('')
            self._print_message('Day cycle: {0}: {1}'.format(
                d,
                datetime.datetime.fromtimestamp(base_timestamp).strftime(
                    '%Y-%m-%d')))

            # At the start of the day, delete snapshot policy runs at 00:30
            self._print_message('- Deleting snapshots')
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp +
                                                     (minute * 30))

            # Validate snapshots
            self._print_message('- Validating snapshots')
            self._validate(vdisk_1, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            self._print_message('- Creating snapshots')
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid,
                                                metadata={
                                                    'label':
                                                    'ss_i_{0}:00'.format(
                                                        str(h)),
                                                    'is_consistent':
                                                    False,
                                                    'timestamp':
                                                    str(timestamp),
                                                    'machineguid':
                                                    None
                                                })
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid,
                                                    metadata={
                                                        'label':
                                                        'ss_c_{0}:30'.format(
                                                            str(h)),
                                                        'is_consistent':
                                                        True,
                                                        'timestamp':
                                                        str(ts),
                                                        'machineguid':
                                                        None
                                                    })
コード例 #33
0
ファイル: vdisk.py プロジェクト: mflu/openvstorage_centos
    def create_from_template(diskguid,
                             machinename,
                             devicename,
                             pmachineguid,
                             machineguid=None,
                             storagedriver_guid=None):
        """
        Create a disk from a template

        @param parentdiskguid: guid of the disk
        @param location: location where virtual device should be created (eg: myVM)
        @param devicename: device file name for the disk (eg: mydisk-flat.vmdk)
        @param machineguid: guid of the machine to assign disk to
        @return diskguid: guid of new disk
        """

        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        disk_path = hypervisor.get_disk_path(machinename, devicename)

        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = [
            'description', 'size', 'type', 'retentionpolicyid',
            'snapshotpolicyid', 'vmachine', 'vpool'
        ]

        disk = VDisk(diskguid)
        if disk.vmachine and not disk.vmachine.is_vtemplate:
            # Disk might not be attached to a vmachine, but still be a template
            raise RuntimeError('The given disk does not belong to a template')

        if storagedriver_guid is not None:
            storagedriver_id = StorageDriver(
                storagedriver_guid).storagedriver_id
        else:
            storagedriver_id = disk.storagedriver_id

        new_disk = VDisk()
        new_disk.copy(disk, include=properties_to_clone)
        new_disk.vpool = disk.vpool
        new_disk.devicename = hypervisor.clean_backing_disk_filename(disk_path)
        new_disk.parent_vdisk = disk
        new_disk.name = '{}-clone'.format(disk.name)
        new_disk.description = description
        new_disk.vmachine = VMachine(
            machineguid) if machineguid else disk.vmachine
        new_disk.save()

        logger.info(
            'Create disk from template {} to new disk {} to location {}'.
            format(disk.name, new_disk.name, disk_path))
        try:
            volume_id = disk.storagedriver_client.create_clone_from_template(
                disk_path, str(disk.volume_id), node_id=str(storagedriver_id))
            new_disk.volume_id = volume_id
            new_disk.save()
        except Exception as ex:
            logger.error(
                'Clone disk on volumedriver level failed with exception: {0}'.
                format(str(ex)))
            new_disk.delete()
            raise

        return {
            'diskguid': new_disk.guid,
            'name': new_disk.name,
            'backingdevice': disk_path
        }
コード例 #34
0
    def clone(diskguid,
              snapshotid,
              devicename,
              pmachineguid,
              machinename,
              machineguid=None):
        """
        Clone a disk
        """
        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = [
            'description', 'size', 'type', 'retentionpolicyguid',
            'snapshotpolicyguid', 'autobackup'
        ]
        vdisk = VDisk(diskguid)
        location = hypervisor.get_backing_disk_path(machinename, devicename)

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = '{0}-clone'.format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(location)
        new_vdisk.parentsnapshot = snapshotid
        new_vdisk.vmachine = VMachine(
            machineguid) if machineguid else vdisk.vmachine
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.save()

        try:
            storagedriver = StorageDriverList.get_by_storagedriver_id(
                vdisk.storagedriver_id)
            if storagedriver is None:
                raise RuntimeError(
                    'Could not find StorageDriver with id {0}'.format(
                        vdisk.storagedriver_id))

            mds_service = MDSServiceController.get_preferred_mds(
                storagedriver.storagerouter, vdisk.vpool)
            if mds_service is None:
                raise RuntimeError('Could not find a MDS service')

            logger.info('Clone snapshot {} of disk {} to location {}'.format(
                snapshotid, vdisk.name, location))
            volume_id = vdisk.storagedriver_client.create_clone(
                target_path=location,
                metadata_backend_config=MDSMetaDataBackendConfig([
                    MDSNodeConfig(address=str(
                        mds_service.service.storagerouter.ip),
                                  port=mds_service.service.ports[0])
                ]),
                parent_volume_id=str(vdisk.volume_id),
                parent_snapshot_id=str(snapshotid),
                node_id=str(vdisk.storagedriver_id))
        except Exception as ex:
            logger.error(
                'Caught exception during clone, trying to delete the volume. {0}'
                .format(ex))
            new_vdisk.delete()
            VDiskController.delete_volume(location)
            raise

        new_vdisk.volume_id = volume_id
        new_vdisk.save()

        try:
            MDSServiceController.ensure_safety(new_vdisk)
        except Exception as ex:
            logger.error(
                'Caught exception during "ensure_safety" {0}'.format(ex))

        return {
            'diskguid': new_vdisk.guid,
            'name': new_vdisk.name,
            'backingdevice': location
        }
コード例 #35
0
ファイル: vdisk.py プロジェクト: jamie-liu/openvstorage
    def clone(diskguid, snapshotid, devicename, pmachineguid, machinename=None, machineguid=None, detached=False):
        """
        Clone a disk
        :param diskguid: Guid of the disk to clone
        :param snapshotid: ID of the snapshot to clone from
        :param devicename: Name of the device to use in clone's description
        :param pmachineguid: Guid of the physical machine
        :param machinename: Name of the machine the disk is attached to
        :param machineguid: Guid of the machine
        :param detached: Boolean indicating the disk is attached to a machine or not
        """
        # 1. Validations
        name_regex = "^[0-9a-zA-Z][-_a-zA-Z0-9]{1,48}[a-zA-Z0-9]$"
        if not re.match(name_regex, devicename):
            raise RuntimeError("Invalid name for virtual disk clone")

        if VDiskList.get_vdisk_by_name(vdiskname=devicename) is not None:
            raise RuntimeError("A virtual disk with this name already exists")

        vdisk = VDisk(diskguid)
        storagedriver = StorageDriverList.get_by_storagedriver_id(vdisk.storagedriver_id)
        if storagedriver is None:
            raise RuntimeError('Could not find StorageDriver with ID {0}'.format(vdisk.storagedriver_id))

        if machineguid is not None and detached is True:
            raise ValueError('A vMachine GUID was specified while detached is True')

        # 2. Create new snapshot if required
        if snapshotid is None:
            timestamp = str(int(time.time()))
            metadata = {'label': '',
                        'is_consistent': False,
                        'timestamp': timestamp,
                        'machineguid': machineguid,
                        'is_automatic': True}
            sd_snapshot_id = VDiskController.create_snapshot(diskguid, metadata)
            tries = 25  # 5 minutes
            while snapshotid is None and tries > 0:
                time.sleep(25 - tries)
                tries -= 1
                vdisk.invalidate_dynamics(['snapshots'])
                for snapshot in vdisk.snapshots:
                    if snapshot['guid'] != sd_snapshot_id:
                        continue
                    if snapshot['in_backend'] is True:
                        snapshotid = snapshot['guid']
            if snapshotid is None:
                try:
                    VDiskController.delete_snapshot(diskguid=diskguid,
                                                    snapshotid=sd_snapshot_id)
                except:
                    pass
                raise RuntimeError('Could not find created snapshot in time')

        # 3. Model new cloned virtual disk
        hypervisor = Factory.get(PMachine(pmachineguid))
        location = hypervisor.get_disk_path(machinename, devicename)

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=['description', 'size', 'type', 'retentionpolicyguid', 'snapshotpolicyguid', 'autobackup'])
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = devicename
        new_vdisk.description = devicename if machinename is None else '{0} {1}'.format(machinename, devicename)
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(location)
        new_vdisk.parentsnapshot = snapshotid
        if detached is False:
            new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.save()

        # 4. Configure Storage Driver
        try:
            mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool)
            if mds_service is None:
                raise RuntimeError('Could not find a MDS service')

            logger.info('Clone snapshot {0} of disk {1} to location {2}'.format(snapshotid, vdisk.name, location))
            backend_config = MDSMetaDataBackendConfig([MDSNodeConfig(address=str(mds_service.service.storagerouter.ip),
                                                                     port=mds_service.service.ports[0])])
            volume_id = vdisk.storagedriver_client.create_clone(target_path=location,
                                                                metadata_backend_config=backend_config,
                                                                parent_volume_id=str(vdisk.volume_id),
                                                                parent_snapshot_id=str(snapshotid),
                                                                node_id=str(vdisk.storagedriver_id))
        except Exception as ex:
            logger.error('Caught exception during clone, trying to delete the volume. {0}'.format(ex))
            try:
                VDiskController.clean_bad_disk(new_vdisk.guid)
            except Exception as ex2:
                logger.exception('Exception during exception handling of "create_clone_from_template" : {0}'.format(str(ex2)))
            raise

        new_vdisk.volume_id = volume_id
        new_vdisk.save()

        # 5. Check MDS & DTL for new clone
        try:
            MDSServiceController.ensure_safety(new_vdisk)
        except Exception as ex:
            logger.error('Caught exception during "ensure_safety" {0}'.format(ex))
        VDiskController.dtl_checkup.delay(vdisk_guid=new_vdisk.guid)

        return {'diskguid': new_vdisk.guid,
                'name': new_vdisk.name,
                'backingdevice': location}
コード例 #36
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now and then. The delete policy is executed every day
        """
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.aliases = ['/dev/non-existent']
        disk.size = 500 * 1024 ** 3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.disk = disk
        disk_partition.aliases = ['/dev/disk/non-existent']
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        vdisk_1 = VDisk()
        vdisk_1.name = 'vdisk_1'
        vdisk_1.volume_id = 'vdisk_1'
        vdisk_1.vpool = vpool
        vdisk_1.devicename = 'dummy'
        vdisk_1.size = 0
        vdisk_1.save()
        vdisk_1.reload_client('storagedriver')

        [dynamic for dynamic in vdisk_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        # Run the testing scenario
        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            self._print_message('Running in Travis, reducing output.')
        debug = not travis
        amount_of_days = 50
        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = self._make_timestamp(base, day * d)
            self._print_message('')
            self._print_message('Day cycle: {0}: {1}'.format(d, datetime.datetime.fromtimestamp(base_timestamp).strftime('%Y-%m-%d')))

            # At the start of the day, delete snapshot policy runs at 00:30
            self._print_message('- Deleting snapshots')
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))

            # Validate snapshots
            self._print_message('- Validating snapshots')
            self._validate(vdisk_1, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            self._print_message('- Creating snapshots')
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid,
                                                metadata={'label': 'ss_i_{0}:00'.format(str(h)),
                                                          'is_consistent': False,
                                                          'timestamp': str(timestamp),
                                                          'machineguid': None})
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(vdisk_guid=vdisk_1.guid,
                                                    metadata={'label': 'ss_c_{0}:30'.format(str(h)),
                                                              'is_consistent': True,
                                                              'timestamp': str(ts),
                                                              'machineguid': None})
コード例 #37
0
    def test_happypath(self):
        """
        Validates the happy path; Hourly snapshots are taken with a few manual consistent
        every now an then. The delete policy is executed every day
        """
        # Setup
        # There are 2 machines; one with two disks, one with one disk and a stand-alone additional disk
        failure_domain = FailureDomain()
        failure_domain.name = 'Test'
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = 'BackendType'
        backend_type.code = 'BT'
        backend_type.save()
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = 'PMachine'
        pmachine.username = '******'
        pmachine.ip = '127.0.0.1'
        pmachine.hvtype = 'VMWARE'
        pmachine.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.path = '/dev/non-existent'
        disk.size = 500 * 1024**3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.id = 'disk_partition_id'
        disk_partition.disk = disk
        disk_partition.path = '/dev/disk/non-existent'
        disk_partition.size = 400 * 1024**3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = pmachine
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        vdisk_1_2 = VDisk()
        vdisk_1_2.name = 'vdisk_1_2'
        vdisk_1_2.volume_id = 'vdisk_1_2'
        vdisk_1_2.vmachine = vmachine_1
        vdisk_1_2.vpool = vpool
        vdisk_1_2.devicename = 'dummy'
        vdisk_1_2.size = 0
        vdisk_1_2.save()
        vdisk_1_2.reload_client()
        vmachine_2 = VMachine()
        vmachine_2.name = 'vmachine_2'
        vmachine_2.devicename = 'dummy'
        vmachine_2.pmachine = pmachine
        vmachine_2.save()
        vdisk_2_1 = VDisk()
        vdisk_2_1.name = 'vdisk_2_1'
        vdisk_2_1.volume_id = 'vdisk_2_1'
        vdisk_2_1.vmachine = vmachine_2
        vdisk_2_1.vpool = vpool
        vdisk_2_1.devicename = 'dummy'
        vdisk_2_1.size = 0
        vdisk_2_1.save()
        vdisk_2_1.reload_client()
        vdisk_3 = VDisk()
        vdisk_3.name = 'vdisk_3'
        vdisk_3.volume_id = 'vdisk_3'
        vdisk_3.vpool = vpool
        vdisk_3.devicename = 'dummy'
        vdisk_3.size = 0
        vdisk_3.save()
        vdisk_3.reload_client()

        for disk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
            [
                dynamic for dynamic in disk._dynamics
                if dynamic.name == 'snapshots'
            ][0].timeout = 0

        # Run the testing scenario
        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            print 'Running in Travis, reducing output.'
        debug = not travis
        amount_of_days = 50
        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        minute = 60
        hour = minute * 60

        for d in xrange(0, amount_of_days):
            base_timestamp = self._make_timestamp(base, day * d)
            print ''
            print 'Day cycle: {0}: {1}'.format(
                d,
                datetime.datetime.fromtimestamp(base_timestamp).strftime(
                    '%Y-%m-%d'))

            # At the start of the day, delete snapshot policy runs at 00:30
            print '- Deleting snapshots'
            ScheduledTaskController.delete_snapshots(timestamp=base_timestamp +
                                                     (minute * 30))

            # Validate snapshots
            print '- Validating snapshots'
            for vdisk in [vdisk_1_1, vdisk_1_2, vdisk_2_1, vdisk_3]:
                self._validate(vdisk, d, base, amount_of_days, debug)

            # During the day, snapshots are taken
            # - Create non consistent snapshot every hour, between 2:00 and 22:00
            # - Create consistent snapshot at 6:30, 12:30, 18:30
            print '- Creating snapshots'
            for h in xrange(2, 23):
                timestamp = base_timestamp + (hour * h)
                for vm in [vmachine_1, vmachine_2]:
                    VMachineController.snapshot(machineguid=vm.guid,
                                                label='ss_i_{0}:00'.format(
                                                    str(h)),
                                                is_consistent=False,
                                                timestamp=timestamp)
                    if h in [6, 12, 18]:
                        ts = (timestamp + (minute * 30))
                        VMachineController.snapshot(machineguid=vm.guid,
                                                    label='ss_c_{0}:30'.format(
                                                        str(h)),
                                                    is_consistent=True,
                                                    timestamp=ts)

                VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                metadata={
                                                    'label':
                                                    'ss_i_{0}:00'.format(
                                                        str(h)),
                                                    'is_consistent':
                                                    False,
                                                    'timestamp':
                                                    str(timestamp),
                                                    'machineguid':
                                                    None
                                                })
                if h in [6, 12, 18]:
                    ts = (timestamp + (minute * 30))
                    VDiskController.create_snapshot(diskguid=vdisk_3.guid,
                                                    metadata={
                                                        'label':
                                                        'ss_c_{0}:30'.format(
                                                            str(h)),
                                                        'is_consistent':
                                                        True,
                                                        'timestamp':
                                                        str(ts),
                                                        'machineguid':
                                                        None
                                                    })
コード例 #38
0
    def test_clone_snapshot(self):
        """
        Validates that a snapshot that has clones will not be deleted while other snapshots will be deleted
        """
        # Setup
        # There are 2 disks, second one cloned from a snapshot of the first
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.status = 'RUNNING'
        vpool.save()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.save()
        disk = Disk()
        disk.name = 'physical_disk_1'
        disk.aliases = ['/dev/non-existent']
        disk.size = 500 * 1024 ** 3
        disk.state = 'OK'
        disk.is_ssd = True
        disk.storagerouter = storage_router
        disk.save()
        disk_partition = DiskPartition()
        disk_partition.disk = disk
        disk_partition.aliases = ['/dev/disk/non-existent']
        disk_partition.size = 400 * 1024 ** 3
        disk_partition.state = 'OK'
        disk_partition.offset = 1024
        disk_partition.roles = [DiskPartition.ROLES.SCRUB]
        disk_partition.mountpoint = '/var/tmp'
        disk_partition.save()
        storage_driver = StorageDriver()
        storage_driver.vpool = vpool
        storage_driver.storagerouter = storage_router
        storage_driver.name = 'storage_driver_1'
        storage_driver.mountpoint = '/'
        storage_driver.cluster_ip = storage_router.ip
        storage_driver.storage_ip = '127.0.0.1'
        storage_driver.storagedriver_id = 'storage_driver_1'
        storage_driver.ports = {'management': 1,
                                'xmlrpc': 2,
                                'dtl': 3,
                                'edge': 4}
        storage_driver.save()
        service_type = ServiceType()
        service_type.name = 'MetadataServer'
        service_type.save()
        service = Service()
        service.name = 'service_1'
        service.storagerouter = storage_driver.storagerouter
        service.ports = [1]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storage_driver.vpool
        mds_service.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client('storagedriver')

        [dynamic for dynamic in vdisk_1_1._dynamics if dynamic.name == 'snapshots'][0].timeout = 0

        travis = 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
        if travis is True:
            print 'Running in Travis, reducing output.'

        base = datetime.datetime.now().date()
        day = datetime.timedelta(1)
        base_timestamp = self._make_timestamp(base, day)
        minute = 60
        hour = minute * 60
        for h in [6, 12, 18]:
            timestamp = base_timestamp + (hour * h)
            VDiskController.create_snapshot(vdisk_guid=vdisk_1_1.guid,
                                            metadata={'label': 'snapshot_{0}:30'.format(str(h)),
                                                      'is_consistent': True,
                                                      'timestamp': str(timestamp),
                                                      'machineguid': None})

        base_snapshot_guid = vdisk_1_1.snapshots[0]['guid']  # Oldest
        clone_vdisk = VDisk()
        clone_vdisk.name = 'clone_vdisk'
        clone_vdisk.volume_id = 'clone_vdisk'
        clone_vdisk.vpool = vpool
        clone_vdisk.devicename = 'dummy'
        clone_vdisk.parentsnapshot = base_snapshot_guid
        clone_vdisk.size = 0
        clone_vdisk.save()
        clone_vdisk.reload_client('storagedriver')

        for h in [6, 12, 18]:
            timestamp = base_timestamp + (hour * h)
            VDiskController.create_snapshot(vdisk_guid=clone_vdisk.guid,
                                            metadata={'label': 'snapshot_{0}:30'.format(str(h)),
                                                      'is_consistent': True,
                                                      'timestamp': str(timestamp),
                                                      'machineguid': None})

        base_timestamp = self._make_timestamp(base, day * 2)
        ScheduledTaskController.delete_snapshots(timestamp=base_timestamp + (minute * 30))
        self.assertIn(base_snapshot_guid, [snap['guid'] for snap in vdisk_1_1.snapshots], 'Snapshot was deleted while there are still clones of it')
コード例 #39
0
ファイル: vdisk.py プロジェクト: tcpcloud/openvstorage
    def create_from_template(diskguid, machinename, devicename, pmachineguid, machineguid=None, storagedriver_guid=None):
        """
        Create a disk from a template

        @param devicename: device file name for the disk (eg: mydisk-flat.vmdk)
        @param machineguid: guid of the machine to assign disk to
        @return diskguid: guid of new disk
        """

        pmachine = PMachine(pmachineguid)
        hypervisor = Factory.get(pmachine)
        disk_path = hypervisor.get_disk_path(machinename, devicename)

        description = '{} {}'.format(machinename, devicename)
        properties_to_clone = [
            'description', 'size', 'type', 'retentionpolicyid',
            'snapshotpolicyid', 'vmachine', 'vpool']

        vdisk = VDisk(diskguid)
        if vdisk.vmachine and not vdisk.vmachine.is_vtemplate:
            # Disk might not be attached to a vmachine, but still be a template
            raise RuntimeError('The given vdisk does not belong to a template')

        if storagedriver_guid is not None:
            storagedriver_id = StorageDriver(storagedriver_guid).storagedriver_id
        else:
            storagedriver_id = vdisk.storagedriver_id
        storagedriver = StorageDriverList.get_by_storagedriver_id(storagedriver_id)
        if storagedriver is None:
            raise RuntimeError('Could not find StorageDriver with id {0}'.format(storagedriver_id))

        new_vdisk = VDisk()
        new_vdisk.copy(vdisk, include=properties_to_clone)
        new_vdisk.vpool = vdisk.vpool
        new_vdisk.devicename = hypervisor.clean_backing_disk_filename(disk_path)
        new_vdisk.parent_vdisk = vdisk
        new_vdisk.name = '{}-clone'.format(vdisk.name)
        new_vdisk.description = description
        new_vdisk.vmachine = VMachine(machineguid) if machineguid else vdisk.vmachine
        new_vdisk.save()

        mds_service = MDSServiceController.get_preferred_mds(storagedriver.storagerouter, vdisk.vpool)
        if mds_service is None:
            raise RuntimeError('Could not find a MDS service')

        logger.info('Create disk from template {} to new disk {} to location {}'.format(
            vdisk.name, new_vdisk.name, disk_path
        ))
        try:
            volume_id = vdisk.storagedriver_client.create_clone_from_template(
                target_path=disk_path,
                metadata_backend_config=MDSMetaDataBackendConfig([MDSNodeConfig(address=str(mds_service.service.storagerouter.ip),
                                                                                port=mds_service.service.ports[0])]),
                parent_volume_id=str(vdisk.volume_id),
                node_id=str(storagedriver_id)
            )
            new_vdisk.volume_id = volume_id
            new_vdisk.save()
            MDSServiceController.ensure_safety(new_vdisk)

        except Exception as ex:
            logger.error('Clone disk on volumedriver level failed with exception: {0}'.format(str(ex)))
            new_vdisk.delete()
            raise

        return {'diskguid': new_vdisk.guid, 'name': new_vdisk.name,
                'backingdevice': disk_path}
コード例 #40
0
    def _prepare(self):
        # Setup
        failure_domain = FailureDomain()
        failure_domain.name = 'Test'
        failure_domain.save()
        backend_type = BackendType()
        backend_type.name = 'BackendType'
        backend_type.code = 'BT'
        backend_type.save()
        vpool = VPool()
        vpool.name = 'vpool'
        vpool.backend_type = backend_type
        vpool.save()
        pmachine = PMachine()
        pmachine.name = 'PMachine'
        pmachine.username = '******'
        pmachine.ip = '127.0.0.1'
        pmachine.hvtype = 'KVM'
        pmachine.save()
        vmachine_1 = VMachine()
        vmachine_1.name = 'vmachine_1'
        vmachine_1.devicename = 'dummy'
        vmachine_1.pmachine = pmachine
        vmachine_1.is_vtemplate = True
        vmachine_1.save()
        vdisk_1_1 = VDisk()
        vdisk_1_1.name = 'vdisk_1_1'
        vdisk_1_1.volume_id = 'vdisk_1_1'
        vdisk_1_1.vmachine = vmachine_1
        vdisk_1_1.vpool = vpool
        vdisk_1_1.devicename = 'dummy'
        vdisk_1_1.size = 0
        vdisk_1_1.save()
        vdisk_1_1.reload_client()
        storage_router = StorageRouter()
        storage_router.name = 'storage_router'
        storage_router.ip = '127.0.0.1'
        storage_router.pmachine = pmachine
        storage_router.machine_id = System.get_my_machine_id()
        storage_router.rdma_capable = False
        storage_router.primary_failure_domain = failure_domain
        storage_router.save()
        storagedriver = StorageDriver()
        storagedriver.vpool = vpool
        storagedriver.storagerouter = storage_router
        storagedriver.name = '1'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storage_router.ip
        storagedriver.storage_ip = '127.0.0.1'
        storagedriver.storagedriver_id = '1'
        storagedriver.ports = [1, 2, 3]
        storagedriver.save()
        service_type = ServiceType()
        service_type.name = 'MetadataServer'
        service_type.save()
        s_id = '{0}-{1}'.format(storagedriver.storagerouter.name, '1')
        service = Service()
        service.name = s_id
        service.storagerouter = storagedriver.storagerouter
        service.ports = [1]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storagedriver.vpool
        mds_service.save()

        def ensure_safety(vdisk):
            pass
        class Dtl_Checkup():
            @staticmethod
            def delay(vpool_guid=None, vdisk_guid=None, storagerouters_to_exclude=None):
                pass
        MDSServiceController.ensure_safety = staticmethod(ensure_safety)
        VDiskController.dtl_checkup = Dtl_Checkup
        return vdisk_1_1, pmachine