コード例 #1
0
    def test_sync_vdisk_with_voldrv(self):
        clone_depth = 3

        def _make_clones(vdisks_map, depth=clone_depth):
            for level in range(depth):
                previous_vd = list(vdisks_map.itervalues())[-1]
                new_name = previous_vd.name + '_clone'
                new_guid = VDiskController.clone(previous_vd.guid,
                                                 new_name).get('vdisk_guid')
                vdisks_map[new_name] = VDisk(new_guid)

        structure = DalHelper.build_dal_structure({
            'vpools': [1],
            'storagerouters': [1],
            'storagedrivers':
            [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vdisk_name = 'vdisk_1'
        storagedriver = structure['storagedrivers'][1]
        vdisk_1 = VDisk(
            VDiskController.create_new(volume_name=vdisk_name,
                                       volume_size=1024**4,
                                       storagedriver_guid=storagedriver.guid))
        vdisks = OrderedDict()
        vdisks[vdisk_name] = vdisk_1

        _make_clones(vdisks)
        self.assertEquals(clone_depth + 1, len(list(VDiskList.get_vdisks())))
        delete_list = list(vdisks.itervalues(
        ))[::-1][:-1]  # These vDisks are clones and ought to be deleted
        for vdisk in delete_list:
            for mds_service in vdisk.mds_services:
                mds_service.delete()
            vdisk.delete()
        self.assertEquals(1, len(list(VDiskList.get_vdisks()))
                          )  # Make sure vDisk clones are properly removed
        self.assertEquals(
            VDiskList.get_vdisks()[0].name,
            vdisk_name)  # Make sure only item left is original vDisk

        VDiskController.sync_with_reality()
        self.assertEquals(clone_depth + 1, len(list(
            VDiskList.get_vdisks())))  # The clones should be in place now

        parents = 0
        for vdisk in VDiskList.get_vdisks():
            try:
                if vdisk.parent_vdisk.name:
                    parents += 1
            except AttributeError:
                pass
        self.assertEquals(
            clone_depth, parents
        )  # As much parents should be detected as the depth of the clones
コード例 #2
0
ファイル: test_vdisk.py プロジェクト: grimpy/openvstorage
    def test_event_resize_from_volumedriver(self):
        """
        Test resize from volumedriver event
            - Create a vDisk using the resize event
            - Resize the created vDisk using the same resize event
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        storagedrivers = structure['storagedrivers']
        mds_service = structure['mds_services'][1]

        # Create volume using resize from voldrv
        device_name = '/vdisk.raw'
        srclient = StorageRouterClient(vpools[1].guid, None)
        mds_backend_config = Helper._generate_mdsmetadatabackendconfig([mds_service])
        volume_id = srclient.create_volume(device_name, mds_backend_config, 1024 ** 4, str(storagedrivers[1].storagedriver_id))
        VDiskController.resize_from_voldrv(volume_id=volume_id,
                                           volume_size=1024 ** 4,
                                           volume_path=device_name,
                                           storagedriver_id=storagedrivers[1].storagedriver_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1,
                        msg='Expected to find 1 vDisk in model')
        self.assertEqual(first=vdisks[0].name,
                         second='vdisk',
                         msg='Volume name should be vdisk')
        self.assertEqual(first=vdisks[0].volume_id,
                         second=volume_id,
                         msg='Volume ID should be {0}'.format(volume_id))
        self.assertEqual(first=vdisks[0].devicename,
                         second=device_name,
                         msg='Device name should be {0}'.format(device_name))
        self.assertEqual(first=vdisks[0].size,
                         second=1024 ** 4,
                         msg='Size should be 1 TiB')

        # Resize volume using resize from voldrv
        VDiskController.resize_from_voldrv(volume_id=volume_id,
                                           volume_size=2 * 1024 ** 4,
                                           volume_path=device_name,
                                           storagedriver_id=storagedrivers[1].storagedriver_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1,
                        msg='Expected to find 1 vDisk in model')
        self.assertEqual(first=vdisks[0].name,
                         second='vdisk',
                         msg='Volume name should be vdisk')
        self.assertEqual(first=vdisks[0].size,
                         second=2 * 1024 ** 4,
                         msg='Size should be 2 TiB')
コード例 #3
0
ファイル: vdisks.py プロジェクト: winglq/framework
 def list(self, vpoolguid=None, storagerouterguid=None, query=None):
     """
     Overview of all vDisks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     :type vpoolguid: str
     :param storagerouterguid: Guid of the StorageRouter to retrieve its disks
     :type storagerouterguid: str
     :param query: A query to be executed if required
     :type query: DataQuery
     """
     if vpoolguid is not None:
         vpool = VPool(vpoolguid)
         vdisks = vpool.vdisks
     elif storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         vdisks = DataList(
             VDisk, {
                 'type':
                 DataList.where_operator.AND,
                 'items': [('guid', DataList.operator.IN,
                            storagerouter.vdisks_guids)]
             })
     else:
         vdisks = VDiskList.get_vdisks()
     if query is not None:
         query_vdisk_guids = DataList(VDisk, query).guids
         vdisks = [
             vdisk for vdisk in vdisks if vdisk.guid in query_vdisk_guids
         ]
     return vdisks
コード例 #4
0
ファイル: vdisks.py プロジェクト: grimpy/openvstorage
 def list(self, vpoolguid=None, storagerouterguid=None, query=None):
     """
     Overview of all vDisks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     :type vpoolguid: str
     :param storagerouterguid: Guid of the StorageRouter to retrieve its disks
     :type storagerouterguid: str
     :param query: A query to be executed if required
     :type query: DataQuery
     """
     if vpoolguid is not None:
         vpool = VPool(vpoolguid)
         vdisks = vpool.vdisks
     elif storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         vdisks = DataList(
             VDisk,
             {
                 "type": DataList.where_operator.AND,
                 "items": [("guid", DataList.operator.IN, storagerouter.vdisks_guids)],
             },
         )
     else:
         vdisks = VDiskList.get_vdisks()
     if query is not None:
         query_vdisk_guids = DataList(VDisk, query).guids
         vdisks = [vdisk for vdisk in vdisks if vdisk.guid in query_vdisk_guids]
     return vdisks
コード例 #5
0
 def snapshot_all_vdisks():
     """
     Snapshots all vDisks
     """
     GenericController._logger.info('[SSA] started')
     success = []
     fail = []
     for vdisk in VDiskList.get_vdisks():
         if vdisk.is_vtemplate is True:
             continue
         try:
             metadata = {
                 'label': '',
                 'is_consistent': False,
                 'timestamp': str(int(time.time())),
                 'is_automatic': True,
                 'is_sticky': False
             }
             VDiskController.create_snapshot(vdisk_guid=vdisk.guid,
                                             metadata=metadata)
             success.append(vdisk.guid)
         except Exception:
             GenericController._logger.exception(
                 'Error taking snapshot for vDisk {0}'.format(vdisk.guid))
             fail.append(vdisk.guid)
     GenericController._logger.info(
         '[SSA] Snapshot has been taken for {0} vDisks, {1} failed.'.format(
             len(success), len(fail)))
     return success, fail
コード例 #6
0
 def list(self, vpoolguid=None, storagerouterguid=None):
     """
     Overview of all vDisks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     :type vpoolguid: str
     :param storagerouterguid: Guid of the StorageRouter to retrieve its disks
     :type storagerouterguid: str
     :return: List of vDisks matching the parameters specified
     :rtype: list[ovs.dal.hybrids.vdisk.VDisk]
     """
     if vpoolguid is not None:
         vpool = VPool(vpoolguid)
         vdisks = vpool.vdisks
     elif storagerouterguid is not None:
         storagerouter = StorageRouter(storagerouterguid)
         vdisks = DataList(
             VDisk, {
                 'type':
                 DataList.where_operator.AND,
                 'items': [('guid', DataList.operator.IN,
                            storagerouter.vdisks_guids)]
             })
     else:
         vdisks = VDiskList.get_vdisks()
     return vdisks
コード例 #7
0
 def _find_ovs_model_disk_by_location(self,
                                      location,
                                      hostname,
                                      retry=3,
                                      timeout=3):
     """Find OVS disk object based on location and hostname
     :return VDisk: OVS DAL model object
     """
     hostname = self._get_real_hostname(hostname)
     LOG.debug('[_FIND OVS DISK] Location %s, hostname %s' %
               (location, hostname))
     attempt = 0
     while attempt <= retry:
         for vd in VDiskList.get_vdisks():
             if vd.vpool:
                 for vsr in vd.vpool.storagedrivers:
                     if vsr.storagerouter.name == hostname:
                         _location = "{0}/{1}".format(
                             vsr.mountpoint, vd.devicename)
                         if _location == location:
                             LOG.info('Location %s Disk found %s' %
                                      (location, vd.guid))
                             disk = VDisk(vd.guid)
                             return disk
         msg = ' NO RESULT Attempt %s timeout %s max attempts %s'
         LOG.debug(msg % (attempt, timeout, retry))
         if timeout:
             time.sleep(timeout)
         attempt += 1
     raise RuntimeError('No disk found for location %s' % location)
コード例 #8
0
 def _find_ovs_model_disk_by_location(self, location, hostname, retry=3,
                                      timeout=3):
     """Find OVS disk object based on location and hostname
     :return VDisk: OVS DAL model object
     """
     hostname = self._get_real_hostname(hostname)
     LOG.debug('[_FIND OVS DISK] Location %s, hostname %s'
               % (location, hostname))
     attempt = 0
     while attempt <= retry:
         for vd in VDiskList.get_vdisks():
             if vd.vpool:
                 for vsr in vd.vpool.storagedrivers:
                     if vsr.storagerouter.name == hostname:
                         _location = "{0}/{1}".format(vsr.mountpoint,
                                                      vd.devicename)
                         if _location == location:
                             LOG.info('Location %s Disk found %s'
                                      % (location, vd.guid))
                             disk = VDisk(vd.guid)
                             return disk
         msg = ' NO RESULT Attempt %s timeout %s max attempts %s'
         LOG.debug(msg % (attempt, timeout, retry))
         if timeout:
             time.sleep(timeout)
         attempt += 1
     raise RuntimeError('No disk found for location %s' % location)
コード例 #9
0
 def _snapshot_has_children(self, snapshotid):
     """Find if snapshot has children, in OVS Model
     :return True/False
     """
     LOG.debug('[_FIND CHILDREN OF SNAPSHOT] Snapshotid %s' % snapshotid)
     for vdisk in VDiskList.get_vdisks():
         if vdisk.parentsnapshot == snapshotid:
             return True
     return False
コード例 #10
0
 def _snapshot_has_children(self, snapshotid):
     """Find if snapshot has children, in OVS Model
     :return True/False
     """
     LOG.debug('[_FIND CHILDREN OF SNAPSHOT] Snapshotid %s' % snapshotid)
     for vdisk in VDiskList.get_vdisks():
         if vdisk.parentsnapshot == snapshotid:
             return True
     return False
コード例 #11
0
    def test_delete(self):
        """
        Test the delete of a vDisk
            - Create 2 vDisks with identical names on 2 different vPools
            - Delete 1st vDisk and verify other still remains on correct vPool
            - Delete 2nd vDisk and verify no more volumes left
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1, 2],
            'domains': [1],
            'storagerouters': [1],
            'storagedrivers':
            [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1), (2, 2)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        domains = structure['domains']
        storagedrivers = structure['storagedrivers']

        vdisk1 = VDisk(
            VDiskController.create_new(
                volume_name='vdisk_1',
                volume_size=1024**3,
                storagedriver_guid=storagedrivers[1].guid))
        vdisk2 = VDisk(
            VDiskController.create_new(
                volume_name='vdisk_1',
                volume_size=1024**3,
                storagedriver_guid=storagedrivers[2].guid))

        vdisk_domain = VDiskDomain()
        vdisk_domain.domain = domains[1]
        vdisk_domain.vdisk = vdisk1
        vdisk_domain.save()

        # Delete vDisk1 and make some assertions
        VDiskController.delete(vdisk_guid=vdisk1.guid)
        with self.assertRaises(ObjectNotFoundException):
            VDisk(vdisk1.guid)
        self.assertEqual(
            first=len(VDiskController.list_volumes()),
            second=1,
            msg='Expected to find only 1 volume in Storage Driver list_volumes'
        )
        self.assertIn(member=vdisk2,
                      container=VDiskList.get_vdisks(),
                      msg='vDisk2 should still be modeled')

        # Delete vDisk2 and make some assertions
        VDiskController.delete(vdisk_guid=vdisk2.guid)
        with self.assertRaises(ObjectNotFoundException):
            VDisk(vdisk2.guid)
        self.assertEqual(
            first=len(VDiskController.list_volumes()),
            second=0,
            msg=
            'Expected to find no more volumes in Storage Driver list_volumes')
コード例 #12
0
ファイル: vdisks.py プロジェクト: mflu/openvstorage_centos
 def list(self, vmachineguid=None, vpoolguid=None):
     """
     Overview of all vDisks
     """
     if vmachineguid is not None:
         vmachine = VMachine(vmachineguid)
         return vmachine.vdisks
     elif vpoolguid is not None:
         vpool = VPool(vpoolguid)
         return vpool.vdisks
     return VDiskList.get_vdisks()
コード例 #13
0
ファイル: vdisks.py プロジェクト: BillTheBest/openvstorage
 def list(self, vmachineguid=None, vpoolguid=None):
     """
     Overview of all vDisks
     """
     if vmachineguid is not None:
         vmachine = VMachine(vmachineguid)
         return vmachine.vdisks
     elif vpoolguid is not None:
         vpool = VPool(vpoolguid)
         return vpool.vdisks
     return VDiskList.get_vdisks()
コード例 #14
0
ファイル: ovs_common.py プロジェクト: tjdeadfish/openvstorage
 def _ovs_snapshot_id_in_vdisklist_snapshots(self, snapshot_id, retry=10):
     attempt = 0
     while attempt <= int(retry):
         snap_map = dict((vd.guid, vd.snapshots) for vd in VDiskList.get_vdisks())
         for guid, snapshots in snap_map.items():
             snaps = [snap['guid'] for snap in snapshots if snap['guid'] == snapshot_id]
             if len(snaps) == 1:
                 return True
         attempt += 1
         time.sleep(2)
     return False
コード例 #15
0
 def _find_ovs_model_disk_by_snapshot_id(self, snapshotid):
     """Find OVS disk object based on snapshot id
     :return VDisk: OVS DAL model object
     """
     LOG.debug("[_FIND OVS DISK] Snapshotid %s" % snapshotid)
     for disk in VDiskList.get_vdisks():
         snaps_guid = [s["guid"] for s in disk.snapshots]
         if str(snapshotid) in snaps_guid:
             LOG.info("[_FIND OVS DISK] Snapshot id %s Disk found %s" % (snapshotid, disk.name))
             return disk
     raise RuntimeError("No disk found for snapshotid %s" % snapshotid)
コード例 #16
0
ファイル: test_vdisk.py プロジェクト: grimpy/openvstorage
    def test_list_volumes(self):
        """
        Test the list volumes functionality
            - Create 1 vDisk on vPool1 and create 3 vDisks on vPool2
            - List all volumes
            - List the volumes on vPool1
            - List the volumes on vPool2
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        storagedrivers = structure['storagedrivers']

        vpool1 = vpools[1]
        vpool2 = vpools[2]
        VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[1].guid)
        VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[2].guid)
        VDiskController.create_new(volume_name='vdisk_2', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[2].guid)
        VDiskController.create_new(volume_name='vdisk_3', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[2].guid)
        all_vdisks = VDiskList.get_vdisks()

        # List all volumes
        sd_volume_ids = set(VDiskController.list_volumes())
        model_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks])
        self.assertEqual(first=len(sd_volume_ids),
                         second=4,
                         msg='Expected to retrieve all 4 volumes')
        self.assertEqual(first=sd_volume_ids,
                         second=model_volume_ids,
                         msg='Volume IDs from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'.format(sd_volume_ids, model_volume_ids))

        # List all volumes of vpools[1]
        sd_vpool1_volume_ids = set(VDiskController.list_volumes(vpool_guid=vpool1.guid))
        model_vpool1_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool1])
        self.assertEqual(first=len(sd_vpool1_volume_ids),
                         second=1,
                         msg='Expected to retrieve 1 volume')
        self.assertEqual(first=sd_vpool1_volume_ids,
                         second=model_vpool1_volume_ids,
                         msg='Volume IDs for vPool1 from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'.format(sd_vpool1_volume_ids, model_vpool1_volume_ids))

        # List all volumes of vpools[2]
        sd_vpool2_volume_ids = set(VDiskController.list_volumes(vpool_guid=vpool2.guid))
        model_vpool2_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool2])
        self.assertEqual(first=len(sd_vpool2_volume_ids),
                         second=3,
                         msg='Expected to retrieve 3 volumes')
        self.assertEqual(first=sd_vpool2_volume_ids,
                         second=model_vpool2_volume_ids,
                         msg='Volume IDs for vPool2 from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'.format(sd_vpool2_volume_ids, model_vpool2_volume_ids))
コード例 #17
0
ファイル: ovs_common.py プロジェクト: JasperLue/openvstorage
 def _ovs_snapshot_id_in_vdisklist_snapshots(self, snapshot_id, retry=10):
     attempt = 0
     while attempt <= int(retry):
         snap_map = dict((vd.guid, vd.snapshots) for vd in VDiskList.get_vdisks())
         for guid, snapshots in snap_map.items():
             snaps = [snap['guid'] for snap in snapshots if snap['guid'] == snapshot_id]
             if len(snaps) == 1:
                 return True
         attempt += 1
         time.sleep(2)
     return False
コード例 #18
0
 def _find_ovs_model_disk_by_snapshot_id(self, snapshotid):
     """Find OVS disk object based on snapshot id
     :return VDisk: OVS DAL model object
     """
     LOG.debug('[_FIND OVS DISK] Snapshotid %s' % snapshotid)
     for disk in VDiskList.get_vdisks():
         snaps_guid = [s['guid'] for s in disk.snapshots]
         if str(snapshotid) in snaps_guid:
             LOG.info('[_FIND OVS DISK] Snapshot id %s Disk found %s' %
                      (snapshotid, disk))
             return disk
     raise RuntimeError('No disk found for snapshotid %s' % snapshotid)
コード例 #19
0
    def get_vdisks_stats():
        """
        Send vdisks statistics to InfluxDB
        """
        vdisks = VDiskList.get_vdisks()
        if len(vdisks) == 0:
            StatsmonkeyScheduledTaskController._logger.info("No vdisks found")
            return None

        for vdisk in vdisks:
            try:
                points = []
                metrics = StatsmonkeyScheduledTaskController._pop_realtime_info(vdisk.statistics)

                disk_name = vdisk.name
                failover_mode = vdisk.info['failover_mode']

                if failover_mode in ['OK_STANDALONE', 'OK_SYNC']:
                    failover_status = 0
                elif failover_mode == 'CATCHUP':
                    failover_status = 1
                elif failover_mode == 'DEGRADED':
                    failover_status = 2
                else:
                    failover_status = 3

                metrics['failover_mode_status'] = failover_status

                if vdisk.vmachine:
                    vm_name = vdisk.vmachine.name
                else:
                    vm_name = None

                vpool_name = VPool(vdisk.vpool_guid).name

                entry = {
                    'measurement': 'vdisk_stats',
                    'tags': {
                        'disk_name': disk_name,
                        'vm_name': vm_name,
                        'storagerouter_name': StorageRouter(vdisk.storagerouter_guid).name,
                        'vpool_name': vpool_name,
                        'failover_mode': vdisk.info['failover_mode']
                    },
                    'fields': metrics
                }
                points.append(entry)
                StatsmonkeyScheduledTaskController._send_stats(points)
                return points
            except Exception as ex:
                StatsmonkeyScheduledTaskController._logger.error(ex.message)
                return None
            return None
コード例 #20
0
ファイル: vdisks.py プロジェクト: tjdeadfish/openvstorage
 def list(self, vmachineguid=None, vpoolguid=None):
     """
     Overview of all vDisks
     :param vmachineguid: Guid of the virtual machine to retrieve its disks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     """
     if vmachineguid is not None:
         vmachine = VMachine(vmachineguid)
         return vmachine.vdisks
     elif vpoolguid is not None:
         vpool = VPool(vpoolguid)
         return vpool.vdisks
     return VDiskList.get_vdisks()
コード例 #21
0
ファイル: vdisks.py プロジェクト: dawnpower/framework
 def list(self, vmachineguid=None, vpoolguid=None):
     """
     Overview of all vDisks
     :param vmachineguid: Guid of the virtual machine to retrieve its disks
     :param vpoolguid: Guid of the vPool to retrieve its disks
     """
     if vmachineguid is not None:
         vmachine = VMachine(vmachineguid)
         return vmachine.vdisks
     elif vpoolguid is not None:
         vpool = VPool(vpoolguid)
         return vpool.vdisks
     return VDiskList.get_vdisks()
コード例 #22
0
ファイル: mdsservice.py プロジェクト: cynpna/framework
    def mds_catchup():
        """
        Looks to catch up all MDS slaves which are too far behind
        Only one catch for every storagedriver is invoked
        """

        # Only for caching purposes
        def storagedriver_worker(queue, error_list):
            # type: (Queue.Queue, List[str]) -> None
            while not queue.empty():
                mds_catch_up = queue.get()  # type: MDSCatchUp
                try:
                    mds_catch_up.catch_up(async=False)
                except Exception as ex:
                    MDSServiceController._logger.exception(
                        'Exceptions while catching for vDisk {0}'.format(
                            mds_catch_up.vdisk.guid))
                    error_list.append(str(ex))
                finally:
                    queue.task_done()

        storagedriver_queues = {}
        for vdisk in VDiskList.get_vdisks():
            if vdisk.storagedriver_id not in storagedriver_queues:
                storagedriver_queues[vdisk.storagedriver_id] = Queue.Queue()
            # Putting it in the Queue ensures that the reference is still there so the caching is used optimally
            catch_up = MDSCatchUp(vdisk.guid)
            storagedriver_queues[vdisk.storagedriver_id].put(catch_up)

        errors = []
        threads = []
        for storadriver_id, storagedriver_queue in storagedriver_queues.iteritems(
        ):
            thread = Thread(target=storagedriver_worker,
                            args=(
                                storagedriver_queue,
                                errors,
                            ))
            thread.start()
            threads.append(thread)
        for thread in threads:
            thread.join()

        if len(errors) > 0:
            raise RuntimeError(
                'Exception occurred while catching up: \n - {0}'.format(
                    '\n - '.join(errors)))
コード例 #23
0
def teardown():
    """
    Teardown for VirtualDisk package, will be executed when all started tests in this package have ended
    Removal actions of possible things left over after the test-run
    :return: None
    """
    vpool_name = General.get_config().get("vpool", "name")
    vpool = GeneralVPool.get_vpool_by_name(vpool_name)

    for vd in VDiskList.get_vdisks():
        GeneralVDisk.delete_volume(vd, vpool, loop_device='loop0')

    if vpool is not None:
        GeneralVPool.remove_vpool(vpool)

    alba_backend = GeneralAlba.get_by_name(General.get_config().get('backend', 'name'))
    if alba_backend is not None:
        GeneralAlba.unclaim_disks_and_remove_alba_backend(alba_backend=alba_backend)
コード例 #24
0
ファイル: test_vdisk.py プロジェクト: grimpy/openvstorage
    def test_delete(self):
        """
        Test the delete of a vDisk
            - Create 2 vDisks with identical names on 2 different vPools
            - Delete 1st vDisk and verify other still remains on correct vPool
            - Delete 2nd vDisk and verify no more volumes left
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'domains': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        domains = structure['domains']
        storagedrivers = structure['storagedrivers']

        vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[2].guid))

        vdisk_domain = VDiskDomain()
        vdisk_domain.domain = domains[1]
        vdisk_domain.vdisk = vdisk1
        vdisk_domain.save()

        # Delete vDisk1 and make some assertions
        VDiskController.delete(vdisk_guid=vdisk1.guid)
        with self.assertRaises(ObjectNotFoundException):
            VDisk(vdisk1.guid)
        self.assertEqual(first=len(VDiskController.list_volumes()),
                         second=1,
                         msg='Expected to find only 1 volume in Storage Driver list_volumes')
        self.assertIn(member=vdisk2,
                      container=VDiskList.get_vdisks(),
                      msg='vDisk2 should still be modeled')

        # Delete vDisk2 and make some assertions
        VDiskController.delete(vdisk_guid=vdisk2.guid)
        with self.assertRaises(ObjectNotFoundException):
            VDisk(vdisk2.guid)
        self.assertEqual(first=len(VDiskController.list_volumes()),
                         second=0,
                         msg='Expected to find no more volumes in Storage Driver list_volumes')
コード例 #25
0
ファイル: scheduledtask.py プロジェクト: grimpy/openvstorage
 def snapshot_all_vdisks():
     """
     Snapshots all vDisks
     """
     ScheduledTaskController._logger.info('[SSA] started')
     success = []
     fail = []
     for vdisk in VDiskList.get_vdisks():
         try:
             metadata = {'label': '',
                         'is_consistent': False,
                         'timestamp': str(int(time.time())),
                         'is_automatic': True,
                         'is_sticky': False}
             VDiskController.create_snapshot(vdisk_guid=vdisk.guid,
                                             metadata=metadata)
             success.append(vdisk.guid)
         except Exception:
             ScheduledTaskController._logger.exception('Error taking snapshot for vDisk {0}'.format(vdisk.guid))
             fail.append(vdisk.guid)
     ScheduledTaskController._logger.info('[SSA] Snapshot has been taken for {0} vDisks, {1} failed.'.format(len(success), len(fail)))
コード例 #26
0
    def test_list_volumes(self):
        """
        Test the list volumes functionality
            - Create 1 vDisk on vPool1 and create 3 vDisks on vPool2
            - List all volumes
            - List the volumes on vPool1
            - List the volumes on vPool2
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1, 2],
            'storagerouters': [1],
            'storagedrivers':
            [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1), (2, 2)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vpools = structure['vpools']
        storagedrivers = structure['storagedrivers']

        vpool1 = vpools[1]
        vpool2 = vpools[2]
        VDiskController.create_new(volume_name='vdisk_1',
                                   volume_size=1024**4,
                                   storagedriver_guid=storagedrivers[1].guid)
        VDiskController.create_new(volume_name='vdisk_1',
                                   volume_size=1024**4,
                                   storagedriver_guid=storagedrivers[2].guid)
        VDiskController.create_new(volume_name='vdisk_2',
                                   volume_size=1024**4,
                                   storagedriver_guid=storagedrivers[2].guid)
        VDiskController.create_new(volume_name='vdisk_3',
                                   volume_size=1024**4,
                                   storagedriver_guid=storagedrivers[2].guid)
        all_vdisks = VDiskList.get_vdisks()

        # List all volumes
        sd_volume_ids = set(VDiskController.list_volumes())
        model_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks])
        self.assertEqual(first=len(sd_volume_ids),
                         second=4,
                         msg='Expected to retrieve all 4 volumes')
        self.assertEqual(
            first=sd_volume_ids,
            second=model_volume_ids,
            msg=
            'Volume IDs from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'
            .format(sd_volume_ids, model_volume_ids))

        # List all volumes of vpools[1]
        sd_vpool1_volume_ids = set(
            VDiskController.list_volumes(vpool_guid=vpool1.guid))
        model_vpool1_volume_ids = set(
            [vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool1])
        self.assertEqual(first=len(sd_vpool1_volume_ids),
                         second=1,
                         msg='Expected to retrieve 1 volume')
        self.assertEqual(
            first=sd_vpool1_volume_ids,
            second=model_vpool1_volume_ids,
            msg=
            'Volume IDs for vPool1 from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'
            .format(sd_vpool1_volume_ids, model_vpool1_volume_ids))

        # List all volumes of vpools[2]
        sd_vpool2_volume_ids = set(
            VDiskController.list_volumes(vpool_guid=vpool2.guid))
        model_vpool2_volume_ids = set(
            [vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool2])
        self.assertEqual(first=len(sd_vpool2_volume_ids),
                         second=3,
                         msg='Expected to retrieve 3 volumes')
        self.assertEqual(
            first=sd_vpool2_volume_ids,
            second=model_vpool2_volume_ids,
            msg=
            'Volume IDs for vPool2 from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'
            .format(sd_vpool2_volume_ids, model_vpool2_volume_ids))
コード例 #27
0
    def test_clone(self):
        """
        Test the clone functionality
            - Create a vDisk with name 'clone1'
            - Clone the vDisk and make some assertions
            - Attempt to clone again using same name and same devicename
            - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted
            - Attempt to clone on Storage Driver without MDS service
            - Attempt to clone from snapshot which is not yet completely synced to backend
            - Attempt to delete the snapshot from which a clone was made
            - Clone the vDisk on another Storage Router
            - Clone another vDisk with name 'clone1' linked to another vPool
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1, 2],
            'storagerouters': [1, 2, 3],
            'storagedrivers':
            [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1), (2, 2)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vpools = structure['vpools']
        mds_services = structure['mds_services']
        service_type = structure['service_types']['MetadataServer']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        self._roll_out_dtl_services(vpool=vpools[1],
                                    storagerouters=storagerouters)
        self._roll_out_dtl_services(vpool=vpools[2],
                                    storagerouters=storagerouters)

        # Basic clone scenario
        vdisk1 = VDisk(
            VDiskController.create_new(
                volume_name='vdisk_1',
                volume_size=1024**3,
                storagedriver_guid=storagedrivers[1].guid))
        clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid,
                                            name='clone1')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks')

        clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshot_ids[0])
        self.assertTrue(expr=len(clones) == 1,
                        msg='Expected to find 1 vDisk with parent snapshot')
        self.assertTrue(expr=len(vdisk1.child_vdisks) == 1,
                        msg='Expected to find 1 child vDisk')

        for expected_key in ['vdisk_guid', 'name', 'backingdevice']:
            self.assertTrue(
                expr=expected_key in clone1_info,
                msg='Expected to find key "{0}" in clone_info'.format(
                    expected_key))
        self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'],
                        msg='Guids do not match')
        self.assertTrue(expr=clones[0].name == clone1_info['name'],
                        msg='Names do not match')
        self.assertTrue(
            expr=clones[0].devicename == clone1_info['backingdevice'],
            msg='Device names do not match')

        # Attempt to clone again with same name
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(
            expr=len(vdisks) == 2,
            msg='Expected to find 2 vDisks after failed clone attempt 1')

        # Attempt to clone again with a name which will have identical devicename
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone1%')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(
            expr=len(vdisks) == 2,
            msg='Expected to find 2 vDisks after failed clone attempt 2')

        # Attempt to clone on Storage Router on which vPool is not extended
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2',
                                  storagerouter_guid=storagerouters[2].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(
            expr=len(vdisks) == 2,
            msg='Expected to find 2 vDisks after failed clone attempt 3')

        # Attempt to clone on non-existing Storage Driver
        storagedrivers[1].storagedriver_id = 'non-existing'
        storagedrivers[1].save()
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(
            expr=len(vdisks) == 2,
            msg='Expected to find 2 vDisks after failed clone attempt 4')
        storagedrivers[1].storagedriver_id = '1'
        storagedrivers[1].save()

        # Attempt to clone on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[3]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid, name='clone2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(
            expr=len(vdisks) == 2,
            msg='Expected to find 2 vDisks after failed clone attempt 5')
        mds_services[1].service.storagerouter = storagerouters[1]
        mds_services[1].service.save()

        # Attempt to clone by providing snapshot_id not synced to backend
        self.assertTrue(expr=len(vdisk1.snapshots) == 1,
                        msg='Expected to find only 1 snapshot before cloning')
        self.assertTrue(
            expr=len(vdisk1.snapshot_ids) == 1,
            msg='Expected to find only 1 snapshot ID before cloning')
        metadata = {
            'label': 'label1',
            'timestamp': int(time.time()),
            'is_sticky': False,
            'in_backend': False,
            'is_automatic': True,
            'is_consistent': True
        }
        snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid,
                                                      metadata=metadata)
        self.assertTrue(expr=len(vdisk1.snapshots) == 2,
                        msg='Expected to find 2 snapshots')
        self.assertTrue(expr=len(vdisk1.snapshot_ids) == 2,
                        msg='Expected to find 2 snapshot IDs')
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2',
                                  snapshot_id=snapshot_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(
            expr=len(vdisks) == 2,
            msg='Expected to find 2 vDisks after failed clone attempt 6')

        # Update backend synced flag and retry
        vdisk1.storagedriver_client._set_snapshot_in_backend(
            vdisk1.volume_id, snapshot_id, True)
        vdisk1.invalidate_dynamics(['snapshots', 'snapshot_ids'])
        VDiskController.clone(vdisk_guid=vdisk1.guid,
                              name='clone2',
                              snapshot_id=snapshot_id)
        vdisks = VDiskList.get_vdisks()
        vdisk1.invalidate_dynamics()
        self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks')
        self.assertTrue(expr=len(vdisk1.child_vdisks) == 2,
                        msg='Expected to find 2 child vDisks')
        self.assertTrue(
            expr=len(vdisk1.snapshots) == 2,
            msg=
            'Expected to find 2 snapshots after cloning from a specified snapshot'
        )
        self.assertTrue(
            expr=len(vdisk1.snapshot_ids) == 2,
            msg=
            'Expected to find 2 snapshot IDs after cloning from a specified snapshot'
        )

        # Attempt to delete the snapshot that has clones
        with self.assertRaises(RuntimeError):
            VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid,
                                            snapshot_id=snapshot_id)

        # Clone on specific Storage Router
        storagedriver = StorageDriver()
        storagedriver.vpool = vpools[1]
        storagedriver.storagerouter = storagerouters[2]
        storagedriver.name = '3'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storagerouters[2].ip
        storagedriver.storage_ip = '127.0.0.1'
        storagedriver.storagedriver_id = '3'
        storagedriver.ports = {
            'management': 1,
            'xmlrpc': 2,
            'dtl': 3,
            'edge': 4
        }
        storagedriver.save()

        s_id = '{0}-1'.format(storagedriver.storagerouter.name)
        service = Service()
        service.name = s_id
        service.storagerouter = storagedriver.storagerouter
        service.ports = [3]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storagedriver.vpool
        mds_service.save()

        clone3 = VDisk(
            VDiskController.clone(
                vdisk_guid=vdisk1.guid,
                name='clone3',
                storagerouter_guid=storagerouters[2].guid)['vdisk_guid'])
        self.assertTrue(
            expr=clone3.storagerouter_guid == storagerouters[2].guid,
            msg='Incorrect Storage Router on which the clone is attached')

        # Clone vDisk with existing name on another vPool
        vdisk2 = VDisk(
            VDiskController.create_new(
                volume_name='vdisk_1',
                volume_size=1024**3,
                storagedriver_guid=storagedrivers[2].guid))
        clone_vdisk2 = VDisk(
            VDiskController.clone(vdisk_guid=vdisk2.guid,
                                  name='clone1')['vdisk_guid'])
        self.assertTrue(
            expr=clone_vdisk2.vpool == vpools[2],
            msg='Cloned vDisk with name "clone1" was created on incorrect vPool'
        )
        self.assertTrue(expr=len([
            vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1'
        ]) == 2,
                        msg='Expected to find 2 vDisks with name "clone1"')

        # Attempt to clone without specifying snapshot and snapshot fails to sync to backend
        StorageRouterClient.synced = False
        vdisk2 = VDisk(
            VDiskController.create_new(
                volume_name='vdisk_2',
                volume_size=1024**3,
                storagedriver_guid=storagedrivers[1].guid))
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk2.guid, name='clone4')
        vdisk2.invalidate_dynamics()
        self.assertTrue(expr=len(vdisk2.snapshots) == 0,
                        msg='Expected to find 0 snapshots after clone failure')
        self.assertTrue(
            expr=len(vdisk2.snapshot_ids) == 0,
            msg='Expected to find 0 snapshot IDs after clone failure')
        self.assertTrue(expr=len(vdisk2.child_vdisks) == 0,
                        msg='Expected to find 0 children after clone failure')
        StorageRouterClient.synced = True
コード例 #28
0
    def test_create_new(self):
        """
        Test the create new volume functionality
            - Attempt to create a vDisk larger than 2 TiB
            - Create a vDisk of exactly 2 TiB
            - Attempt to create a vDisk with identical name
            - Attempt to create a vDisk with identical devicename
            - Create a vDisk with identical name on another vPool
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1, 2],
            'storagerouters': [1, 2],
            'storagedrivers':
            [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1), (2, 2)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vpools = structure['vpools']
        mds_services = structure['mds_services']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        size_64_tib = 64 * 1024**4

        # Verify maximum size of 64TiB
        vdisk_name_1 = 'vdisk_1'
        vdisk_name_2 = 'vdisk_2'
        with self.assertRaises(ValueError):
            VDiskController.create_new(
                volume_name=vdisk_name_1,
                volume_size=size_64_tib + 1,
                storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 0,
                        msg='Expected to find 0 vDisks after failure 1')

        # Create volume of maximum size
        VDiskController.create_new(volume_name=vdisk_name_1,
                                   volume_size=size_64_tib,
                                   storagedriver_guid=storagedrivers[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1, msg='Expected to find 1 vDisk')
        self.assertTrue(
            expr=vdisks[0].storagerouter_guid == storagerouters[1].guid,
            msg='Storage Router does not match expected value')
        self.assertTrue(expr=vdisks[0].size == size_64_tib,
                        msg='Size does not match expected value')
        self.assertTrue(expr=vdisks[0].name == vdisk_name_1,
                        msg='Name does not match expected value')
        self.assertTrue(expr=vdisks[0].vpool == vpools[1],
                        msg='vPool does not match expected value')
        self.assertTrue(expr=vdisks[0].devicename ==
                        VDiskController.clean_devicename(vdisk_name_1),
                        msg='Devicename does not match expected value')

        # Attempt to create same volume on same vPool
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(
                volume_name=vdisk_name_1,
                volume_size=size_64_tib,
                storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1,
                        msg='Expected to find 1 vDisk after failure 2')

        # Attempt to create volume with identical devicename on same vPool
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(
                volume_name='{0}%^$'.format(vdisk_name_1),
                volume_size=size_64_tib,
                storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1,
                        msg='Expected to find 1 vDisk after failure 3')

        # Create same volume on another vPool
        vdisk2 = VDisk(
            VDiskController.create_new(
                volume_name=vdisk_name_2,
                volume_size=size_64_tib,
                storagedriver_guid=storagedrivers[2].guid))
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks')
        self.assertTrue(
            expr=vdisk2.storagerouter_guid == storagerouters[1].guid,
            msg='Storage Router does not match expected value')
        self.assertTrue(expr=vdisk2.size == size_64_tib,
                        msg='Size does not match expected value')
        self.assertTrue(expr=vdisk2.name == vdisk_name_2,
                        msg='Name does not match expected value')
        self.assertTrue(expr=vdisk2.vpool == vpools[2],
                        msg='vPool does not match expected value')
        self.assertTrue(expr=vdisk2.devicename ==
                        VDiskController.clean_devicename(vdisk_name_2),
                        msg='Devicename does not match expected value')

        # Attempt to create vDisk on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[2]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(
                volume_name='vdisk_3',
                volume_size=size_64_tib,
                storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 2,
                        msg='Expected to find 2 vDisks after failure 4')
コード例 #29
0
    def test_create_from_template(self):
        """
        Test the create from template functionality
            - Create a vDisk and convert to vTemplate
            - Attempt to create from template from a vDisk which is not a vTemplate
            - Create from template basic scenario
            - Attempt to create from template using same name
            - Attempt to create from template using same devicename
            - Attempt to create from template using Storage Router on which vPool is not extended
            - Attempt to create from template using non-existing Storage Driver
            - Attempt to create from template using Storage Driver which does not have an MDS service
            - Create from template on another Storage Router
            - Create from template without specifying a Storage Router
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1],
            'storagerouters': [1, 2, 3],
            'storagedrivers':
            [(1, 1, 1), (2, 1, 2)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1), (2, 2)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vpool = structure['vpools'][1]
        mds_services = structure['mds_services']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        template = VDisk(
            VDiskController.create_new(
                volume_name='vdisk_1',
                volume_size=1024**3,
                storagedriver_guid=storagedrivers[1].guid))
        vdisk_name = 'from_template_1'
        VDiskController.set_as_template(vdisk_guid=template.guid)
        self.assertTrue(expr=template.is_vtemplate,
                        msg='Dynamic property "is_vtemplate" should be True')

        # Create from vDisk which is not a vTemplate
        template.storagedriver_client._set_object_type(template.volume_id,
                                                       'BASE')
        template.invalidate_dynamics(['info', 'is_vtemplate'])
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(
                vdisk_guid=template.guid,
                name=vdisk_name,
                storagerouter_guid=storagerouters[1].guid)

        # Create from template
        template.storagedriver_client._set_object_type(template.volume_id,
                                                       'TEMPLATE')
        template.invalidate_dynamics(['info', 'is_vtemplate'])
        info = VDiskController.create_from_template(
            vdisk_guid=template.guid,
            name=vdisk_name,
            storagerouter_guid=storagerouters[1].guid)
        expected_keys = ['vdisk_guid', 'name', 'backingdevice']
        self.assertEqual(
            first=set(info.keys()),
            second=set(expected_keys),
            msg='Create from template returned not the expected keys')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks')
        vdisk = [vdisk for vdisk in vdisks if vdisk.is_vtemplate is False][0]
        self.assertTrue(
            expr=vdisk.name == vdisk_name,
            msg='vDisk name is incorrect. Expected: {0}  -  Actual: {1}'.
            format(vdisk_name, vdisk.name))
        self.assertTrue(expr=vdisk.parent_vdisk == template,
                        msg='The parent of the vDisk is incorrect')

        # Attempt to create from template using same name
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(
                vdisk_guid=template.guid,
                name=vdisk_name,
                storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2,
                        msg='Expected 2 vDisks after failed attempt 1')

        # Attempt to create from template using same devicename
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(
                vdisk_guid=template.guid,
                name='^{0}$*'.format(vdisk_name),
                storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2,
                        msg='Expected 2 vDisks after failed attempt 2')

        # Attempt to create from template on Storage Router on which vPool is not extended
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(
                vdisk_guid=template.guid,
                name='from_template_2',
                storagerouter_guid=storagerouters[3].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2,
                        msg='Expected 2 vDisks after failed attempt 3')

        # Attempt to create on non-existing Storage Driver
        storagedrivers[1].storagedriver_id = 'non-existing'
        storagedrivers[1].save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid,
                                                 name='from_template_2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2,
                        msg='Expected 2 vDisks after failed attempt 4')
        storagedrivers[1].storagedriver_id = '1'
        storagedrivers[1].save()

        # Attempt to create on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[3]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(
                vdisk_guid=template.guid,
                name='from_template_2',
                storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2,
                        msg='Expected 2 vDisks after failed attempt 5')
        mds_services[1].service.storagerouter = storagerouters[1]
        mds_services[1].service.save()

        # Create from template on another Storage Router
        vdisk2 = VDisk(
            VDiskController.create_from_template(
                vdisk_guid=template.guid,
                name='from_template_2',
                storagerouter_guid=storagerouters[2].guid)['vdisk_guid'])
        self.assertTrue(
            expr=vdisk2.storagerouter_guid == storagerouters[2].guid,
            msg='Expected vdisk2 to be hosted by Storage Router 2')

        # Create from template without specifying Storage Router
        vdisk3 = VDisk(
            VDiskController.create_from_template(
                vdisk_guid=template.guid,
                name='from_template_3')['vdisk_guid'])
        self.assertTrue(
            expr=vdisk3.storagerouter_guid == template.storagerouter_guid,
            msg='Expected vdisk3 to be hosted by Storage Router 1')
コード例 #30
0
ファイル: test_vdisk.py プロジェクト: grimpy/openvstorage
    def test_create_from_template(self):
        """
        Test the create from template functionality
            - Create a vDisk and convert to vTemplate
            - Attempt to create from template from a vDisk which is not a vTemplate
            - Create from template basic scenario
            - Attempt to create from template using same name
            - Attempt to create from template using same devicename
            - Attempt to create from template using Storage Router on which vPool is not extended
            - Attempt to create from template using non-existing Storage Driver
            - Attempt to create from template using Storage Driver which does not have an MDS service
            - Create from template on another Storage Router
            - Create from template without specifying a Storage Router
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1, 2, 3],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpool = structure['vpools'][1]
        mds_services = structure['mds_services']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        template = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        vdisk_name = 'from_template_1'
        VDiskController.set_as_template(vdisk_guid=template.guid)
        self.assertTrue(expr=template.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True')

        # Create from vDisk which is not a vTemplate
        template.storagedriver_client._set_object_type(template.volume_id, 'BASE')
        template.invalidate_dynamics(['info', 'is_vtemplate'])
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid)

        # Create from template
        template.storagedriver_client._set_object_type(template.volume_id, 'TEMPLATE')
        template.invalidate_dynamics(['info', 'is_vtemplate'])
        info = VDiskController.create_from_template(vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid)
        expected_keys = ['vdisk_guid', 'name', 'backingdevice']
        self.assertEqual(first=set(info.keys()),
                         second=set(expected_keys),
                         msg='Create from template returned not the expected keys')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks')
        vdisk = [vdisk for vdisk in vdisks if vdisk.is_vtemplate is False][0]
        self.assertTrue(expr=vdisk.name == vdisk_name, msg='vDisk name is incorrect. Expected: {0}  -  Actual: {1}'.format(vdisk_name, vdisk.name))
        self.assertTrue(expr=vdisk.parent_vdisk == template, msg='The parent of the vDisk is incorrect')

        # Attempt to create from template using same name
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 1')

        # Attempt to create from template using same devicename
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='^{0}$*'.format(vdisk_name), storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 2')

        # Attempt to create from template on Storage Router on which vPool is not extended
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[3].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 3')

        # Attempt to create on non-existing Storage Driver
        storagedrivers[1].storagedriver_id = 'non-existing'
        storagedrivers[1].save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 4')
        storagedrivers[1].storagedriver_id = '1'
        storagedrivers[1].save()

        # Attempt to create on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[3]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 5')
        mds_services[1].service.storagerouter = storagerouters[1]
        mds_services[1].service.save()

        # Create from template on another Storage Router
        vdisk2 = VDisk(VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[2].guid)['vdisk_guid'])
        self.assertTrue(expr=vdisk2.storagerouter_guid == storagerouters[2].guid, msg='Expected vdisk2 to be hosted by Storage Router 2')

        # Create from template without specifying Storage Router
        vdisk3 = VDisk(VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_3')['vdisk_guid'])
        self.assertTrue(expr=vdisk3.storagerouter_guid == template.storagerouter_guid, msg='Expected vdisk3 to be hosted by Storage Router 1')
コード例 #31
0
ファイル: vdisk.py プロジェクト: jamie-liu/openvstorage
    def dtl_checkup(vpool_guid=None, vdisk_guid=None, storagerouters_to_exclude=None):
        """
        Check DTL for all volumes
        :param vpool_guid:                vPool to check the DTL configuration of all its disks
        :type vpool_guid:                 String

        :param vdisk_guid:                Virtual Disk to check its DTL configuration
        :type vdisk_guid:                 String

        :param storagerouters_to_exclude: Storage Routers to exclude from possible targets
        :type storagerouters_to_exclude:  List

        :return:                          None
        """
        if vpool_guid is not None and vdisk_guid is not None:
            raise ValueError('vpool and vdisk are mutually exclusive')
        if storagerouters_to_exclude is None:
            storagerouters_to_exclude = []

        from ovs.lib.vpool import VPoolController

        logger.info('DTL checkup started')
        required_params = {'dtl_mode': (str, StorageDriverClient.VPOOL_DTL_MODE_MAP.keys()),
                           'dtl_enabled': (bool, None)}
        vdisk = VDisk(vdisk_guid) if vdisk_guid else None
        vpool = VPool(vpool_guid) if vpool_guid else None
        errors_found = False
        root_client_map = {}
        vpool_dtl_config_cache = {}
        vdisks = VDiskList.get_vdisks() if vdisk is None and vpool is None else vpool.vdisks if vpool is not None else [vdisk]
        for vdisk in vdisks:
            logger.info('    Verifying vDisk {0} with guid {1}'.format(vdisk.name, vdisk.guid))
            vdisk.invalidate_dynamics(['storagedriver_client', 'storagerouter_guid'])
            if vdisk.storagedriver_client is None:
                continue

            vpool = vdisk.vpool
            if vpool.guid not in vpool_dtl_config_cache:
                vpool_config = VPoolController.get_configuration(vpool.guid)  # Config on vPool is permanent for DTL settings
                vpool_dtl_config_cache[vpool.guid] = vpool_config
                Toolbox.verify_required_params(required_params, vpool_config)

            volume_id = str(vdisk.volume_id)
            vpool_config = vpool_dtl_config_cache[vpool.guid]
            dtl_vpool_enabled = vpool_config['dtl_enabled']
            try:
                current_dtl_config = vdisk.storagedriver_client.get_dtl_config(volume_id)
                current_dtl_config_mode = vdisk.storagedriver_client.get_dtl_config_mode(volume_id)
            except RuntimeError as rte:
                # Can occur when a volume has not been stolen yet from a dead node
                logger.error('Retrieving DTL configuration from storage driver failed with error: {0}'.format(rte))
                errors_found = True
                continue

            if dtl_vpool_enabled is False and (current_dtl_config is None or current_dtl_config.host == 'null'):
                logger.info('    DTL is globally disabled for vPool {0} with guid {1}'.format(vpool.name, vpool.guid))
                vdisk.storagedriver_client.set_manual_dtl_config(volume_id, None)
                continue
            elif current_dtl_config_mode == DTLConfigMode.MANUAL and (current_dtl_config is None or current_dtl_config.host == 'null'):
                logger.info('    DTL is disabled for virtual disk {0} with guid {1}'.format(vdisk.name, vdisk.guid))
                continue

            storage_router = StorageRouter(vdisk.storagerouter_guid)
            available_storagerouters = []
            # 1. Check available storage routers in the backup failure domain
            if storage_router.secondary_failure_domain is not None:
                for storagerouter in storage_router.secondary_failure_domain.primary_storagerouters:
                    if vpool.guid not in storagerouter.vpools_guids:
                        continue
                    if storagerouter not in root_client_map:
                        try:
                            root_client = SSHClient(storagerouter, username='******')
                        except UnableToConnectException:
                            logger.warning('    Storage Router with IP {0} of vDisk {1} is not reachable'.format(storagerouter.ip, vdisk.name))
                            continue
                        root_client_map[storagerouter] = root_client
                    else:
                        root_client = root_client_map[storagerouter]
                    if ServiceManager.get_service_status('dtl_{0}'.format(vpool.name), client=root_client) is True:
                        available_storagerouters.append(storagerouter)
            # 2. Check available storage routers in the same failure domain as current storage router
            if len(available_storagerouters) == 0:
                for storagerouter in storage_router.primary_failure_domain.primary_storagerouters:
                    if vpool.guid not in storagerouter.vpools_guids or storagerouter == storage_router:
                        continue
                    if storagerouter not in root_client_map:
                        try:
                            root_client = SSHClient(storagerouter, username='******')
                        except UnableToConnectException:
                            logger.warning('    Storage Router with IP {0} of vDisk {1} is not reachable'.format(storagerouter.ip, vdisk.name))
                            continue
                        root_client_map[storagerouter] = root_client
                    else:
                        root_client = root_client_map[storagerouter]
                    if ServiceManager.get_service_status('dtl_{0}'.format(vpool.name), client=root_client) is True:
                        available_storagerouters.append(storagerouter)

            # Remove storage routers to exclude
            for sr_guid in storagerouters_to_exclude:
                sr_to_exclude = StorageRouter(sr_guid)
                if sr_to_exclude in available_storagerouters:
                    available_storagerouters.remove(sr_to_exclude)

            if len(available_storagerouters) == 0:
                logger.info('    No Storage Routers could be found as valid DTL target')
                vdisk.storagedriver_client.set_manual_dtl_config(volume_id, None)
                continue

            # Check whether reconfiguration is required
            reconfigure_required = False
            if current_dtl_config is None:
                logger.info('        No DTL configuration found, but there are Storage Routers available')
                reconfigure_required = True
            elif current_dtl_config_mode == DTLConfigMode.AUTOMATIC:
                logger.info('        DTL configuration set to AUTOMATIC, switching to manual')
                reconfigure_required = True
            else:
                dtl_host = current_dtl_config.host
                dtl_port = current_dtl_config.port
                storage_drivers = [sd for sd in vpool.storagedrivers if sd.storagerouter.ip == dtl_host]

                logger.info('        DTL host: {0}'.format(dtl_host or '-'))
                logger.info('        DTL port: {0}'.format(dtl_port or '-'))
                if dtl_host not in [sr.ip for sr in available_storagerouters]:
                    logger.info('        Host not in available Storage Routers')
                    reconfigure_required = True
                elif dtl_port != storage_drivers[0].ports[2]:
                    logger.info('        Configured port does not match expected port ({0} vs {1})'.format(dtl_port, storage_drivers[0].ports[2]))
                    reconfigure_required = True

            # Perform the reconfiguration
            if reconfigure_required is True:
                logger.info('        Reconfigure required')
                index = random.randint(0, len(available_storagerouters) - 1)
                dtl_target = available_storagerouters[index]
                storage_drivers = [sd for sd in vpool.storagedrivers if sd.storagerouter == dtl_target]
                if len(storage_drivers) == 0:
                    raise ValueError('Could not retrieve related storagedriver')

                port = storage_drivers[0].ports[2]
                vpool_dtl_mode = vpool_config.get('dtl_mode', StorageDriverClient.FRAMEWORK_DTL_ASYNC)
                logger.info('        DTL config that will be set -->  Host: {0}, Port: {1}, Mode: {2}'.format(dtl_target.ip, port, vpool_dtl_mode))
                dtl_config = DTLConfig(str(dtl_target.ip), port, StorageDriverClient.VDISK_DTL_MODE_MAP[vpool_dtl_mode])
                vdisk.storagedriver_client.set_manual_dtl_config(volume_id, dtl_config)
        if errors_found is True:
            logger.error('DTL checkup ended with errors')
            raise Exception('DTL checkup failed with errors. Please check /var/log/ovs/lib.log for more information')
        logger.info('DTL checkup ended')
コード例 #32
0
ファイル: ovsmigrator.py プロジェクト: th3architect/framework
    def migrate(previous_version):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        version 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        @param previous_version: The previous version from which to start the migration.
        """

        working_version = previous_version

        # Version 1 introduced:
        # - The datastore is still empty, add defaults
        if working_version < 1:
            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.backendtype import BackendType
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding
            from ovs.dal.lists.backendtypelist import BackendTypeList

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(
                random.choice(string.ascii_letters + string.digits +
                              '|_=+*#@!/-[]{}<>.?,\'";:~') for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [(admin_group, [read_role, write_role, manage_role]),
                       (viewers_group, [read_role])]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add backends
            for backend_type_info in [('Ceph', 'ceph_s3'),
                                      ('Amazon', 'amazon_s3'),
                                      ('Swift', 'swift_s3'),
                                      ('Local', 'local'),
                                      ('Distributed', 'distributed'),
                                      ('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [
                    ServiceType.SERVICE_TYPES.MD_SERVER,
                    ServiceType.SERVICE_TYPES.ALBA_PROXY,
                    ServiceType.SERVICE_TYPES.ARAKOON
            ]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

            # Failure Domain
            failure_domain = FailureDomain()
            failure_domain.name = 'Default'
            failure_domain.save()

            # We're now at version 1
            working_version = 1

        # Version 2 introduced:
        # - new Descriptor format
        if working_version < 2:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[
                            entry] and 'hybrids' in data[entry]['source']:
                        filename = data[entry]['source']
                        if not filename.startswith('/'):
                            filename = '/opt/OpenvStorage/ovs/dal/{0}'.format(
                                filename)
                        module = imp.load_source(data[entry]['name'], filename)
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            # We're now at version 2
            working_version = 2

        # Version 3 introduced:
        # - new Descriptor format
        if working_version < 3:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry],
                                  dict) and 'source' in data[entry]:
                        module = imp.load_source(data[entry]['name'],
                                                 data[entry]['source'])
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            working_version = 3

        # Version 4 introduced:
        # - Flexible SSD layout
        if working_version < 4:
            import os
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
            for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.MD_SERVER).services:
                mds_service = service.mds_service
                storagedriver = None
                for current_storagedriver in service.storagerouter.storagedrivers:
                    if current_storagedriver.vpool_guid == mds_service.vpool_guid:
                        storagedriver = current_storagedriver
                        break
                tasks = {}
                if storagedriver._data.get('mountpoint_md'):
                    tasks['{0}/mds_{1}_{2}'.format(
                        storagedriver._data.get('mountpoint_md'),
                        storagedriver.vpool.name, mds_service.number)] = (
                            DiskPartition.ROLES.DB,
                            StorageDriverPartition.SUBROLE.MDS)
                if storagedriver._data.get('mountpoint_temp'):
                    tasks['{0}/mds_{1}_{2}'.format(
                        storagedriver._data.get('mountpoint_temp'),
                        storagedriver.vpool.name, mds_service.number)] = (
                            DiskPartition.ROLES.SCRUB,
                            StorageDriverPartition.SUBROLE.MDS)
                for disk in service.storagerouter.disks:
                    for partition in disk.partitions:
                        for directory, (role, subrole) in tasks.iteritems():
                            with remote(storagedriver.storagerouter.ip, [os],
                                        username='******') as rem:
                                stat_dir = directory
                                while not rem.os.path.exists(
                                        stat_dir) and stat_dir != '/':
                                    stat_dir = stat_dir.rsplit('/', 1)[0]
                                    if not stat_dir:
                                        stat_dir = '/'
                                inode = rem.os.stat(stat_dir).st_dev
                            if partition.inode == inode:
                                if role not in partition.roles:
                                    partition.roles.append(role)
                                    partition.save()
                                number = 0
                                migrated = False
                                for sd_partition in storagedriver.partitions:
                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                        if sd_partition.mds_service == mds_service:
                                            migrated = True
                                            break
                                        if sd_partition.partition_guid == partition.guid:
                                            number = max(
                                                sd_partition.number, number)
                                if migrated is False:
                                    sd_partition = StorageDriverPartition()
                                    sd_partition.role = role
                                    sd_partition.sub_role = subrole
                                    sd_partition.partition = partition
                                    sd_partition.storagedriver = storagedriver
                                    sd_partition.mds_service = mds_service
                                    sd_partition.size = None
                                    sd_partition.number = number + 1
                                    sd_partition.save()
                                    client = SSHClient(
                                        storagedriver.storagerouter,
                                        username='******')
                                    path = sd_partition.path.rsplit('/', 1)[0]
                                    if path:
                                        client.dir_create(path)
                                        client.dir_chown(path, 'ovs', 'ovs')
                                    client.dir_create(directory)
                                    client.dir_chown(directory, 'ovs', 'ovs')
                                    client.symlink(
                                        {sd_partition.path: directory})
            for storagedriver in StorageDriverList.get_storagedrivers():
                migrated_objects = {}
                for disk in storagedriver.storagerouter.disks:
                    for partition in disk.partitions:
                        # Process all mountpoints that are unique and don't have a specified size
                        for key, (role, sr_info) in {
                                'mountpoint_md': (DiskPartition.ROLES.DB, {
                                    'metadata_{0}':
                                    StorageDriverPartition.SUBROLE.MD,
                                    'tlogs_{0}':
                                    StorageDriverPartition.SUBROLE.TLOG
                                }),
                                'mountpoint_fragmentcache':
                            (DiskPartition.ROLES.WRITE, {
                                'fcache_{0}':
                                StorageDriverPartition.SUBROLE.FCACHE
                            }),
                                'mountpoint_foc': (DiskPartition.ROLES.WRITE, {
                                    'fd_{0}':
                                    StorageDriverPartition.SUBROLE.FD,
                                    'dtl_{0}':
                                    StorageDriverPartition.SUBROLE.DTL
                                }),
                                'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {
                                    'fd_{0}':
                                    StorageDriverPartition.SUBROLE.FD,
                                    'dtl_{0}':
                                    StorageDriverPartition.SUBROLE.DTL
                                }),
                                'mountpoint_readcaches':
                            (DiskPartition.ROLES.READ, {
                                '': None
                            }),
                                'mountpoint_writecaches':
                            (DiskPartition.ROLES.WRITE, {
                                'sco_{0}': StorageDriverPartition.SUBROLE.SCO
                            })
                        }.iteritems():
                            if key in storagedriver._data:
                                is_list = isinstance(storagedriver._data[key],
                                                     list)
                                entries = storagedriver._data[
                                    key][:] if is_list is True else [
                                        storagedriver._data[key]
                                    ]
                                for entry in entries:
                                    if not entry:
                                        if is_list:
                                            storagedriver._data[key].remove(
                                                entry)
                                            if len(storagedriver._data[key]
                                                   ) == 0:
                                                del storagedriver._data[key]
                                        else:
                                            del storagedriver._data[key]
                                    else:
                                        with remote(
                                                storagedriver.storagerouter.ip,
                                            [os],
                                                username='******') as rem:
                                            inode = rem.os.stat(entry).st_dev
                                        if partition.inode == inode:
                                            if role not in partition.roles:
                                                partition.roles.append(role)
                                                partition.save()
                                            for folder, subrole in sr_info.iteritems(
                                            ):
                                                number = 0
                                                migrated = False
                                                for sd_partition in storagedriver.partitions:
                                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                                        if sd_partition.partition_guid == partition.guid:
                                                            number = max(
                                                                sd_partition.
                                                                number, number)
                                                if migrated is False:
                                                    sd_partition = StorageDriverPartition(
                                                    )
                                                    sd_partition.role = role
                                                    sd_partition.sub_role = subrole
                                                    sd_partition.partition = partition
                                                    sd_partition.storagedriver = storagedriver
                                                    sd_partition.size = None
                                                    sd_partition.number = number + 1
                                                    sd_partition.save()
                                                    if folder:
                                                        source = '{0}/{1}'.format(
                                                            entry,
                                                            folder.format(
                                                                storagedriver.
                                                                vpool.name))
                                                    else:
                                                        source = entry
                                                    client = SSHClient(
                                                        storagedriver.
                                                        storagerouter,
                                                        username='******')
                                                    path = sd_partition.path.rsplit(
                                                        '/', 1)[0]
                                                    if path:
                                                        client.dir_create(path)
                                                        client.dir_chown(
                                                            path, 'ovs', 'ovs')
                                                    client.symlink({
                                                        sd_partition.path:
                                                        source
                                                    })
                                                    migrated_objects[
                                                        source] = sd_partition
                                            if is_list:
                                                storagedriver._data[
                                                    key].remove(entry)
                                                if len(storagedriver._data[key]
                                                       ) == 0:
                                                    del storagedriver._data[
                                                        key]
                                            else:
                                                del storagedriver._data[key]
                                            storagedriver.save()
                if 'mountpoint_bfs' in storagedriver._data:
                    storagedriver.mountpoint_dfs = storagedriver._data[
                        'mountpoint_bfs']
                    if not storagedriver.mountpoint_dfs:
                        storagedriver.mountpoint_dfs = None
                    del storagedriver._data['mountpoint_bfs']
                    storagedriver.save()
                if 'mountpoint_temp' in storagedriver._data:
                    del storagedriver._data['mountpoint_temp']
                    storagedriver.save()
                if migrated_objects:
                    print 'Loading sizes'
                    config = StorageDriverConfiguration(
                        'storagedriver', storagedriver.vpool_guid,
                        storagedriver.storagedriver_id)
                    config.load()
                    for readcache in config.configuration.get(
                            'content_addressed_cache',
                        {}).get('clustercache_mount_points', []):
                        path = readcache.get('path', '').rsplit('/', 1)[0]
                        size = int(readcache['size'].strip(
                            'KiB')) * 1024 if 'size' in readcache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()
                    for writecache in config.configuration.get(
                            'scocache', {}).get('scocache_mount_points', []):
                        path = writecache.get('path', '')
                        size = int(writecache['size'].strip(
                            'KiB')) * 1024 if 'size' in writecache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()

            working_version = 4

        # Version 5 introduced:
        # - Failure Domains
        if working_version < 5:
            import os
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.lists.failuredomainlist import FailureDomainList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            failure_domains = FailureDomainList.get_failure_domains()
            if len(failure_domains) > 0:
                failure_domain = failure_domains[0]
            else:
                failure_domain = FailureDomain()
                failure_domain.name = 'Default'
                failure_domain.save()
            for storagerouter in StorageRouterList.get_storagerouters():
                change = False
                if storagerouter.primary_failure_domain is None:
                    storagerouter.primary_failure_domain = failure_domain
                    change = True
                if storagerouter.rdma_capable is None:
                    client = SSHClient(storagerouter, username='******')
                    rdma_capable = False
                    with remote(client.ip, [os], username='******') as rem:
                        for root, dirs, files in rem.os.walk(
                                '/sys/class/infiniband'):
                            for directory in dirs:
                                ports_dir = '/'.join(
                                    [root, directory, 'ports'])
                                if not rem.os.path.exists(ports_dir):
                                    continue
                                for sub_root, sub_dirs, _ in rem.os.walk(
                                        ports_dir):
                                    if sub_root != ports_dir:
                                        continue
                                    for sub_directory in sub_dirs:
                                        state_file = '/'.join(
                                            [sub_root, sub_directory, 'state'])
                                        if rem.os.path.exists(state_file):
                                            if 'ACTIVE' in client.run(
                                                    'cat {0}'.format(
                                                        state_file)):
                                                rdma_capable = True
                    storagerouter.rdma_capable = rdma_capable
                    change = True
                if change is True:
                    storagerouter.save()

            working_version = 5

        # Version 6 introduced:
        # - Distributed scrubbing
        if working_version < 6:
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.sshclient import SSHClient
            for storage_driver in StorageDriverList.get_storagedrivers():
                root_client = SSHClient(storage_driver.storagerouter,
                                        username='******')
                for partition in storage_driver.partitions:
                    if partition.role == DiskPartition.ROLES.SCRUB:
                        old_path = partition.path
                        partition.sub_role = None
                        partition.save()
                        partition.invalidate_dynamics(['folder', 'path'])
                        if root_client.dir_exists(partition.path):
                            continue  # New directory already exists
                        if '_mds_' in old_path:
                            if root_client.dir_exists(old_path):
                                root_client.symlink({partition.path: old_path})
                        if not root_client.dir_exists(partition.path):
                            root_client.dir_create(partition.path)
                        root_client.dir_chmod(partition.path, 0777)

            working_version = 6

        # Version 7 introduced:
        # - vPool status
        if working_version < 7:
            from ovs.dal.hybrids import vpool
            reload(vpool)
            from ovs.dal.hybrids.vpool import VPool
            from ovs.dal.lists.vpoollist import VPoolList
            for _vpool in VPoolList.get_vpools():
                vpool = VPool(_vpool.guid)
                if hasattr(vpool, 'status') and vpool.status is None:
                    vpool.status = VPool.STATUSES.RUNNING
                    vpool.save()

            working_version = 7

        # Version 10 introduced:
        # - Reverse indexes are stored in persistent store
        # - Store more non-changing metadata on disk iso using a dynamic property
        if working_version < 10:
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.datalist import DataList
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            from ovs.extensions.storage.volatilefactory import VolatileFactory
            persistent = PersistentFactory.get_client()
            for prefix in ['ovs_listcache', 'ovs_reverseindex']:
                for key in persistent.prefix(prefix):
                    persistent.delete(key)
            for key in persistent.prefix('ovs_data_'):
                persistent.set(key, persistent.get(key))
            base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}'
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                all_objects = DataList(cls, {
                    'type': DataList.where_operator.AND,
                    'items': []
                })
                for item in all_objects:
                    guid = item.guid
                    for relation in item._relations:
                        if relation.foreign_type is None:
                            rcls = cls
                            rclsname = rcls.__name__.lower()
                        else:
                            rcls = relation.foreign_type
                            rclsname = rcls.__name__.lower()
                        key = relation.name
                        rguid = item._data[key]['guid']
                        if rguid is not None:
                            reverse_key = base_reverse_key.format(
                                rclsname, rguid, relation.foreign_key, guid)
                            persistent.set(reverse_key, 0)
            volatile = VolatileFactory.get_client()
            try:
                volatile._client.flush_all()
            except:
                pass
            from ovs.dal.lists.vdisklist import VDiskList
            for vdisk in VDiskList.get_vdisks():
                try:
                    vdisk.metadata = {
                        'lba_size': vdisk.info['lba_size'],
                        'cluster_multiplier': vdisk.info['cluster_multiplier']
                    }
                    vdisk.save()
                except:
                    pass

            working_version = 10

        # Version 11 introduced:
        # - ALBA accelerated ALBA, meaning different vpool.metadata information
        if working_version < 11:
            from ovs.dal.lists.vpoollist import VPoolList

            for vpool in VPoolList.get_vpools():
                vpool.metadata = {'backend': vpool.metadata}
                if 'metadata' in vpool.metadata['backend']:
                    vpool.metadata['backend'][
                        'arakoon_config'] = vpool.metadata['backend'].pop(
                            'metadata')
                if 'backend_info' in vpool.metadata['backend']:
                    vpool.metadata['backend']['backend_info'][
                        'fragment_cache_on_read'] = True
                    vpool.metadata['backend']['backend_info'][
                        'fragment_cache_on_write'] = False
                vpool.save()
            working_version = 11

        return working_version
コード例 #33
0
ファイル: test_vdisk.py プロジェクト: grimpy/openvstorage
    def test_create_new(self):
        """
        Test the create new volume functionality
            - Attempt to create a vDisk larger than 2 TiB
            - Create a vDisk of exactly 2 TiB
            - Attempt to create a vDisk with identical name
            - Attempt to create a vDisk with identical devicename
            - Create a vDisk with identical name on another vPool
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        mds_services = structure['mds_services']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        size_64_tib = 64 * 1024 ** 4

        # Verify maximum size of 64TiB
        vdisk_name_1 = 'vdisk_1'
        vdisk_name_2 = 'vdisk_2'
        with self.assertRaises(ValueError):
            VDiskController.create_new(volume_name=vdisk_name_1, volume_size=size_64_tib + 1, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 0, msg='Expected to find 0 vDisks after failure 1')

        # Create volume of maximum size
        VDiskController.create_new(volume_name=vdisk_name_1, volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1, msg='Expected to find 1 vDisk')
        self.assertTrue(expr=vdisks[0].storagerouter_guid == storagerouters[1].guid, msg='Storage Router does not match expected value')
        self.assertTrue(expr=vdisks[0].size == size_64_tib, msg='Size does not match expected value')
        self.assertTrue(expr=vdisks[0].name == vdisk_name_1, msg='Name does not match expected value')
        self.assertTrue(expr=vdisks[0].vpool == vpools[1], msg='vPool does not match expected value')
        self.assertTrue(expr=vdisks[0].devicename == VDiskController.clean_devicename(vdisk_name_1), msg='Devicename does not match expected value')

        # Attempt to create same volume on same vPool
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(volume_name=vdisk_name_1, volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1, msg='Expected to find 1 vDisk after failure 2')

        # Attempt to create volume with identical devicename on same vPool
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(volume_name='{0}%^$'.format(vdisk_name_1), volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1, msg='Expected to find 1 vDisk after failure 3')

        # Create same volume on another vPool
        vdisk2 = VDisk(VDiskController.create_new(volume_name=vdisk_name_2, volume_size=size_64_tib, storagedriver_guid=storagedrivers[2].guid))
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks')
        self.assertTrue(expr=vdisk2.storagerouter_guid == storagerouters[1].guid, msg='Storage Router does not match expected value')
        self.assertTrue(expr=vdisk2.size == size_64_tib, msg='Size does not match expected value')
        self.assertTrue(expr=vdisk2.name == vdisk_name_2, msg='Name does not match expected value')
        self.assertTrue(expr=vdisk2.vpool == vpools[2], msg='vPool does not match expected value')
        self.assertTrue(expr=vdisk2.devicename == VDiskController.clean_devicename(vdisk_name_2), msg='Devicename does not match expected value')

        # Attempt to create vDisk on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[2]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(volume_name='vdisk_3', volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 2, msg='Expected to find 2 vDisks after failure 4')
コード例 #34
0
 def get_vdisks():
     """
     Retrieve all Virtual Disks
     :return: Virtual Disk data-object list
     """
     return VDiskList.get_vdisks()
コード例 #35
0
    def test_event_resize_from_volumedriver(self):
        """
        Test resize from volumedriver event
            - Create a vDisk using the resize event
            - Resize the created vDisk using the same resize event
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1],
            'storagerouters': [1],
            'storagedrivers':
            [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vpools = structure['vpools']
        storagedrivers = structure['storagedrivers']
        mds_service = structure['mds_services'][1]

        # Create volume using resize from voldrv
        device_name = '/vdisk.raw'
        srclient = StorageRouterClient(vpools[1].guid, None)
        mds_backend_config = DalHelper.generate_mds_metadata_backend_config(
            [mds_service])
        volume_id = srclient.create_volume(
            device_name, mds_backend_config, 1024**4,
            str(storagedrivers[1].storagedriver_id))
        VDiskController.resize_from_voldrv(
            volume_id=volume_id,
            volume_size=1024**4,
            volume_path=device_name,
            storagedriver_id=storagedrivers[1].storagedriver_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1,
                        msg='Expected to find 1 vDisk in model')
        self.assertEqual(first=vdisks[0].name,
                         second='vdisk',
                         msg='Volume name should be vdisk')
        self.assertEqual(first=vdisks[0].volume_id,
                         second=volume_id,
                         msg='Volume ID should be {0}'.format(volume_id))
        self.assertEqual(first=vdisks[0].devicename,
                         second=device_name,
                         msg='Device name should be {0}'.format(device_name))
        self.assertEqual(first=vdisks[0].size,
                         second=1024**4,
                         msg='Size should be 1 TiB')

        # Resize volume using resize from voldrv
        VDiskController.resize_from_voldrv(
            volume_id=volume_id,
            volume_size=2 * 1024**4,
            volume_path=device_name,
            storagedriver_id=storagedrivers[1].storagedriver_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1,
                        msg='Expected to find 1 vDisk in model')
        self.assertEqual(first=vdisks[0].name,
                         second='vdisk',
                         msg='Volume name should be vdisk')
        self.assertEqual(first=vdisks[0].size,
                         second=2 * 1024**4,
                         msg='Size should be 2 TiB')
コード例 #36
0
ファイル: ovsmigrator.py プロジェクト: DarumasLegs/framework
    def migrate(previous_version):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        version 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        @param previous_version: The previous version from which to start the migration.
        """

        working_version = previous_version

        # Version 1 introduced:
        # - The datastore is still empty, add defaults
        if working_version < 1:
            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.backendtype import BackendType
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding
            from ovs.dal.lists.backendtypelist import BackendTypeList

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(random.choice(string.ascii_letters +
                                                                  string.digits +
                                                                  '|_=+*#@!/-[]{}<>.?,\'";:~')
                                                    for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [
                (admin_group, [read_role, write_role, manage_role]),
                (viewers_group, [read_role])
            ]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add backends
            for backend_type_info in [('Ceph', 'ceph_s3'), ('Amazon', 'amazon_s3'), ('Swift', 'swift_s3'),
                                      ('Local', 'local'), ('Distributed', 'distributed'), ('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [ServiceType.SERVICE_TYPES.MD_SERVER, ServiceType.SERVICE_TYPES.ALBA_PROXY, ServiceType.SERVICE_TYPES.ARAKOON]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

            # Failure Domain
            failure_domain = FailureDomain()
            failure_domain.name = 'Default'
            failure_domain.save()

            # We're now at version 1
            working_version = 1

        # Version 2 introduced:
        # - new Descriptor format
        if working_version < 2:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[entry] and 'hybrids' in data[entry]['source']:
                        filename = data[entry]['source']
                        if not filename.startswith('/'):
                            filename = '/opt/OpenvStorage/ovs/dal/{0}'.format(filename)
                        module = imp.load_source(data[entry]['name'], filename)
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            # We're now at version 2
            working_version = 2

        # Version 3 introduced:
        # - new Descriptor format
        if working_version < 3:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[entry]:
                        module = imp.load_source(data[entry]['name'], data[entry]['source'])
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            working_version = 3

        # Version 4 introduced:
        # - Flexible SSD layout
        if working_version < 4:
            import os
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
            for service in ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.MD_SERVER).services:
                mds_service = service.mds_service
                storagedriver = None
                for current_storagedriver in service.storagerouter.storagedrivers:
                    if current_storagedriver.vpool_guid == mds_service.vpool_guid:
                        storagedriver = current_storagedriver
                        break
                tasks = {}
                if storagedriver._data.get('mountpoint_md'):
                    tasks['{0}/mds_{1}_{2}'.format(storagedriver._data.get('mountpoint_md'),
                                                   storagedriver.vpool.name,
                                                   mds_service.number)] = (DiskPartition.ROLES.DB, StorageDriverPartition.SUBROLE.MDS)
                if storagedriver._data.get('mountpoint_temp'):
                    tasks['{0}/mds_{1}_{2}'.format(storagedriver._data.get('mountpoint_temp'),
                                                   storagedriver.vpool.name,
                                                   mds_service.number)] = (DiskPartition.ROLES.SCRUB, StorageDriverPartition.SUBROLE.MDS)
                for disk in service.storagerouter.disks:
                    for partition in disk.partitions:
                        for directory, (role, subrole) in tasks.iteritems():
                            with remote(storagedriver.storagerouter.ip, [os], username='******') as rem:
                                stat_dir = directory
                                while not rem.os.path.exists(stat_dir) and stat_dir != '/':
                                    stat_dir = stat_dir.rsplit('/', 1)[0]
                                    if not stat_dir:
                                        stat_dir = '/'
                                inode = rem.os.stat(stat_dir).st_dev
                            if partition.inode == inode:
                                if role not in partition.roles:
                                    partition.roles.append(role)
                                    partition.save()
                                number = 0
                                migrated = False
                                for sd_partition in storagedriver.partitions:
                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                        if sd_partition.mds_service == mds_service:
                                            migrated = True
                                            break
                                        if sd_partition.partition_guid == partition.guid:
                                            number = max(sd_partition.number, number)
                                if migrated is False:
                                    sd_partition = StorageDriverPartition()
                                    sd_partition.role = role
                                    sd_partition.sub_role = subrole
                                    sd_partition.partition = partition
                                    sd_partition.storagedriver = storagedriver
                                    sd_partition.mds_service = mds_service
                                    sd_partition.size = None
                                    sd_partition.number = number + 1
                                    sd_partition.save()
                                    client = SSHClient(storagedriver.storagerouter, username='******')
                                    path = sd_partition.path.rsplit('/', 1)[0]
                                    if path:
                                        client.dir_create(path)
                                        client.dir_chown(path, 'ovs', 'ovs')
                                    client.dir_create(directory)
                                    client.dir_chown(directory, 'ovs', 'ovs')
                                    client.symlink({sd_partition.path: directory})
            for storagedriver in StorageDriverList.get_storagedrivers():
                migrated_objects = {}
                for disk in storagedriver.storagerouter.disks:
                    for partition in disk.partitions:
                        # Process all mountpoints that are unique and don't have a specified size
                        for key, (role, sr_info) in {'mountpoint_md': (DiskPartition.ROLES.DB, {'metadata_{0}': StorageDriverPartition.SUBROLE.MD,
                                                                                                'tlogs_{0}': StorageDriverPartition.SUBROLE.TLOG}),
                                                     'mountpoint_fragmentcache': (DiskPartition.ROLES.WRITE, {'fcache_{0}': StorageDriverPartition.SUBROLE.FCACHE}),
                                                     'mountpoint_foc': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
                                                                                                    'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
                                                     'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
                                                                                                    'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
                                                     'mountpoint_readcaches': (DiskPartition.ROLES.READ, {'': None}),
                                                     'mountpoint_writecaches': (DiskPartition.ROLES.WRITE, {'sco_{0}': StorageDriverPartition.SUBROLE.SCO})}.iteritems():
                            if key in storagedriver._data:
                                is_list = isinstance(storagedriver._data[key], list)
                                entries = storagedriver._data[key][:] if is_list is True else [storagedriver._data[key]]
                                for entry in entries:
                                    if not entry:
                                        if is_list:
                                            storagedriver._data[key].remove(entry)
                                            if len(storagedriver._data[key]) == 0:
                                                del storagedriver._data[key]
                                        else:
                                            del storagedriver._data[key]
                                    else:
                                        with remote(storagedriver.storagerouter.ip, [os], username='******') as rem:
                                            inode = rem.os.stat(entry).st_dev
                                        if partition.inode == inode:
                                            if role not in partition.roles:
                                                partition.roles.append(role)
                                                partition.save()
                                            for folder, subrole in sr_info.iteritems():
                                                number = 0
                                                migrated = False
                                                for sd_partition in storagedriver.partitions:
                                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                                        if sd_partition.partition_guid == partition.guid:
                                                            number = max(sd_partition.number, number)
                                                if migrated is False:
                                                    sd_partition = StorageDriverPartition()
                                                    sd_partition.role = role
                                                    sd_partition.sub_role = subrole
                                                    sd_partition.partition = partition
                                                    sd_partition.storagedriver = storagedriver
                                                    sd_partition.size = None
                                                    sd_partition.number = number + 1
                                                    sd_partition.save()
                                                    if folder:
                                                        source = '{0}/{1}'.format(entry, folder.format(storagedriver.vpool.name))
                                                    else:
                                                        source = entry
                                                    client = SSHClient(storagedriver.storagerouter, username='******')
                                                    path = sd_partition.path.rsplit('/', 1)[0]
                                                    if path:
                                                        client.dir_create(path)
                                                        client.dir_chown(path, 'ovs', 'ovs')
                                                    client.symlink({sd_partition.path: source})
                                                    migrated_objects[source] = sd_partition
                                            if is_list:
                                                storagedriver._data[key].remove(entry)
                                                if len(storagedriver._data[key]) == 0:
                                                    del storagedriver._data[key]
                                            else:
                                                del storagedriver._data[key]
                                            storagedriver.save()
                if 'mountpoint_bfs' in storagedriver._data:
                    storagedriver.mountpoint_dfs = storagedriver._data['mountpoint_bfs']
                    if not storagedriver.mountpoint_dfs:
                        storagedriver.mountpoint_dfs = None
                    del storagedriver._data['mountpoint_bfs']
                    storagedriver.save()
                if 'mountpoint_temp' in storagedriver._data:
                    del storagedriver._data['mountpoint_temp']
                    storagedriver.save()
                if migrated_objects:
                    print 'Loading sizes'
                    config = StorageDriverConfiguration('storagedriver', storagedriver.vpool_guid, storagedriver.storagedriver_id)
                    config.load()
                    for readcache in config.configuration.get('content_addressed_cache', {}).get('clustercache_mount_points', []):
                        path = readcache.get('path', '').rsplit('/', 1)[0]
                        size = int(readcache['size'].strip('KiB')) * 1024 if 'size' in readcache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()
                    for writecache in config.configuration.get('scocache', {}).get('scocache_mount_points', []):
                        path = writecache.get('path', '')
                        size = int(writecache['size'].strip('KiB')) * 1024 if 'size' in writecache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()

            working_version = 4

        # Version 5 introduced:
        # - Failure Domains
        if working_version < 5:
            import os
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.lists.failuredomainlist import FailureDomainList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            failure_domains = FailureDomainList.get_failure_domains()
            if len(failure_domains) > 0:
                failure_domain = failure_domains[0]
            else:
                failure_domain = FailureDomain()
                failure_domain.name = 'Default'
                failure_domain.save()
            for storagerouter in StorageRouterList.get_storagerouters():
                change = False
                if storagerouter.primary_failure_domain is None:
                    storagerouter.primary_failure_domain = failure_domain
                    change = True
                if storagerouter.rdma_capable is None:
                    client = SSHClient(storagerouter, username='******')
                    rdma_capable = False
                    with remote(client.ip, [os], username='******') as rem:
                        for root, dirs, files in rem.os.walk('/sys/class/infiniband'):
                            for directory in dirs:
                                ports_dir = '/'.join([root, directory, 'ports'])
                                if not rem.os.path.exists(ports_dir):
                                    continue
                                for sub_root, sub_dirs, _ in rem.os.walk(ports_dir):
                                    if sub_root != ports_dir:
                                        continue
                                    for sub_directory in sub_dirs:
                                        state_file = '/'.join([sub_root, sub_directory, 'state'])
                                        if rem.os.path.exists(state_file):
                                            if 'ACTIVE' in client.run('cat {0}'.format(state_file)):
                                                rdma_capable = True
                    storagerouter.rdma_capable = rdma_capable
                    change = True
                if change is True:
                    storagerouter.save()

            working_version = 5

        # Version 6 introduced:
        # - Distributed scrubbing
        if working_version < 6:
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.sshclient import SSHClient
            for storage_driver in StorageDriverList.get_storagedrivers():
                root_client = SSHClient(storage_driver.storagerouter, username='******')
                for partition in storage_driver.partitions:
                    if partition.role == DiskPartition.ROLES.SCRUB:
                        old_path = partition.path
                        partition.sub_role = None
                        partition.save()
                        partition.invalidate_dynamics(['folder', 'path'])
                        if root_client.dir_exists(partition.path):
                            continue  # New directory already exists
                        if '_mds_' in old_path:
                            if root_client.dir_exists(old_path):
                                root_client.symlink({partition.path: old_path})
                        if not root_client.dir_exists(partition.path):
                            root_client.dir_create(partition.path)
                        root_client.dir_chmod(partition.path, 0777)

            working_version = 6

        # Version 7 introduced:
        # - vPool status
        if working_version < 7:
            from ovs.dal.hybrids import vpool
            reload(vpool)
            from ovs.dal.hybrids.vpool import VPool
            from ovs.dal.lists.vpoollist import VPoolList
            for _vpool in VPoolList.get_vpools():
                vpool = VPool(_vpool.guid)
                if hasattr(vpool, 'status') and vpool.status is None:
                    vpool.status = VPool.STATUSES.RUNNING
                    vpool.save()

            working_version = 7

        # Version 10 introduced:
        # - Reverse indexes are stored in persistent store
        # - Store more non-changing metadata on disk iso using a dynamic property
        if working_version < 10:
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.datalist import DataList
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            from ovs.extensions.storage.volatilefactory import VolatileFactory
            persistent = PersistentFactory.get_client()
            for prefix in ['ovs_listcache', 'ovs_reverseindex']:
                for key in persistent.prefix(prefix):
                    persistent.delete(key)
            for key in persistent.prefix('ovs_data_'):
                persistent.set(key, persistent.get(key))
            base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}'
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                all_objects = DataList(cls, {'type': DataList.where_operator.AND,
                                             'items': []})
                for item in all_objects:
                    guid = item.guid
                    for relation in item._relations:
                        if relation.foreign_type is None:
                            rcls = cls
                            rclsname = rcls.__name__.lower()
                        else:
                            rcls = relation.foreign_type
                            rclsname = rcls.__name__.lower()
                        key = relation.name
                        rguid = item._data[key]['guid']
                        if rguid is not None:
                            reverse_key = base_reverse_key.format(rclsname, rguid, relation.foreign_key, guid)
                            persistent.set(reverse_key, 0)
            volatile = VolatileFactory.get_client()
            try:
                volatile._client.flush_all()
            except:
                pass
            from ovs.dal.lists.vdisklist import VDiskList
            for vdisk in VDiskList.get_vdisks():
                try:
                    vdisk.metadata = {'lba_size': vdisk.info['lba_size'],
                                      'cluster_multiplier': vdisk.info['cluster_multiplier']}
                    vdisk.save()
                except:
                    pass

            working_version = 10

        # Version 11 introduced:
        # - ALBA accelerated ALBA, meaning different vpool.metadata information
        if working_version < 11:
            from ovs.dal.lists.vpoollist import VPoolList

            for vpool in VPoolList.get_vpools():
                vpool.metadata = {'backend': vpool.metadata}
                if 'metadata' in vpool.metadata['backend']:
                    vpool.metadata['backend']['arakoon_config'] = vpool.metadata['backend'].pop('metadata')
                if 'backend_info' in vpool.metadata['backend']:
                    vpool.metadata['backend']['backend_info']['fragment_cache_on_read'] = True
                    vpool.metadata['backend']['backend_info']['fragment_cache_on_write'] = False
                vpool.save()
            working_version = 11

        return working_version
コード例 #37
0
    def _bootstrap_dal_models(self):
        """
        Load/hook dal models as snmp oids
        """
        _guids = set()

        enabled_key = "{0}_config_dal_enabled".format(STORAGE_PREFIX)
        self.instance_oid = 0
        try:
            enabled = self.persistent.get(enabled_key)
        except KeyNotFoundException:
            enabled = True  # Enabled by default, can be disabled by setting the key
        if enabled:
            from ovs.dal.lists.vdisklist import VDiskList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.dal.lists.pmachinelist import PMachineList
            from ovs.dal.lists.vmachinelist import VMachineList
            from ovs.dal.lists.vpoollist import VPoolList
            from ovs.dal.lists.storagedriverlist import StorageDriverList

            for storagerouter in StorageRouterList.get_storagerouters():
                _guids.add(storagerouter.guid)
                if not self._check_added(storagerouter):
                    self._register_dal_model(10, storagerouter, 'guid', "0")
                    self._register_dal_model(10, storagerouter, 'name', "1")
                    self._register_dal_model(10, storagerouter, 'pmachine', "3", key = 'host_status')
                    self._register_dal_model(10, storagerouter, 'description', "4")
                    self._register_dal_model(10, storagerouter, 'devicename', "5")
                    self._register_dal_model(10, storagerouter, 'dtl_mode', "6")
                    self._register_dal_model(10, storagerouter, 'ip', "8")
                    self._register_dal_model(10, storagerouter, 'machineid', "9")
                    self._register_dal_model(10, storagerouter, 'status', "10")
                    self._register_dal_model(10, storagerouter, '#vdisks', "11",
                                             func = lambda storagerouter: len([vdisk for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]),
                                             atype = int)
                    self._register_dal_model(10, storagerouter, '#vmachines', "12",
                                             func = lambda storagerouter: len(set([vdisk.vmachine.guid for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id])),
                                             atype = int)
                    self._register_dal_model(10, storagerouter, '#stored_data', "13",
                                             func = lambda storagerouter: sum([vdisk.vmachine.stored_data for vpool_vdisks in [storagedriver.vpool.vdisks for storagedriver in storagerouter.storagedrivers] for vdisk in vpool_vdisks if vdisk.storagedriver_id == storagedriver.storagedriver_id]),
                                             atype = int)
                    self.instance_oid += 1

            for vm in VMachineList.get_vmachines():
                _guids.add(vm.guid)
                if not self._check_added(vm):
                    if vm.is_vtemplate:
                        self._register_dal_model(11, vm, 'guid', "0")
                        self._register_dal_model(11, vm, 'name', "1")

                        def _children(vmt):
                            children = 0
                            disks = [vd.guid for vd in vmt.vdisks]
                            for vdisk in [vdisk.parent_vdisk_guid for item in [vm.vdisks for vm in VMachineList.get_vmachines() if not vm.is_vtemplate] for vdisk in item]:
                                for disk in disks:
                                    if vdisk == disk:
                                        children += 1
                            return children
                        self._register_dal_model(11, vm, '#children', 2, func = _children, atype = int)
                        self.instance_oid += 1

            for vm in VMachineList.get_vmachines():
                _guids.add(vm.guid)
                if not self._check_added(vm):
                    if not vm.is_vtemplate:
                        self._register_dal_model(0, vm, 'guid', "0")
                        self._register_dal_model(0, vm, 'name', "1")
                        self._register_dal_model(0, vm, 'statistics', "2.0", key = "operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.2", key = "data_read", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.6", key = "write_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.8", key = "read_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.10", key = "backend_write_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.11", key = "backend_data_read", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.12", key = "cache_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.16", key = "backend_data_written", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.17", key = "data_read_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.18", key = "read_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.20", key = "data_written_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.23", key = "timestamp", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.27", key = "data_written", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.30", key = "operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.33", key = "write_operations_ps", atype = int)
                        self._register_dal_model(0, vm, 'statistics', "2.34", key = "data_transferred", atype = int)
                        self._register_dal_model(0, vm, 'stored_data', "3", atype = int)
                        self._register_dal_model(0, vm, 'description', "4")
                        self._register_dal_model(0, vm, 'devicename', "5")
                        self._register_dal_model(0, vm, 'dtl_mode', "6")
                        self._register_dal_model(0, vm, 'hypervisorid', "7")
                        self._register_dal_model(0, vm, 'ip', "8")
                        self._register_dal_model(0, vm, 'status', "10")
                        self._register_dal_model(0, vm, 'stored_data', "10", atype = int)
                        self._register_dal_model(0, vm, 'snapshots', "11", atype = int)
                        self._register_dal_model(0, vm, 'vdisks', "12", atype = int)
                        self._register_dal_model(0, vm, 'DTL', '13',
                                                 func = lambda vm: 'DEGRADED' if all(item == 'DEGRADED' for item in [vd.info['failover_mode'] for vd in vm.vdisks]) else 'OK')
                    self.instance_oid += 1

            for vd in VDiskList.get_vdisks():
                _guids.add(vd.guid)
                if not self._check_added(vd):
                    self._register_dal_model(1, vd, 'guid', "0")
                    self._register_dal_model(1, vd, 'name', "1")
                    self._register_dal_model(1, vd, 'statistics', "2.0", key = "operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.1", key = "data_written_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.2", key = "data_read", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.6", key = "write_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.8", key = "read_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.10", key = "backend_write_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.11", key = "backend_data_read", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.12", key = "cache_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.16", key = "backend_data_written", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.17", key = "data_read_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.18", key = "read_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.20", key = "cluster_cache_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.23", key = "timestamp", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.27", key = "data_written", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.30", key = "operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.33", key = "write_operations_ps", atype = int)
                    self._register_dal_model(1, vd, 'statistics', "2.34", key = "data_transferred", atype = int)
                    self._register_dal_model(1, vd, 'info', "3", key = 'stored', atype = int)
                    self._register_dal_model(1, vd, 'info', "4", key = 'failover_mode', atype = int)
                    self._register_dal_model(1, vd, 'snapshots', "5", atype = int)
                    self.instance_oid += 1

            for pm in PMachineList.get_pmachines():
                _guids.add(pm.guid)
                if not self._check_added(pm):
                    self._register_dal_model(2, pm, 'guid', "0")
                    self._register_dal_model(2, pm, 'name', "1")
                    self._register_dal_model(2, pm, 'host_status', "2")
                    self.instance_oid += 1

            for vp in VPoolList.get_vpools():
                _guids.add(vp.guid)
                if not self._check_added(vp):
                    self._register_dal_model(3, vp, 'guid', "0")
                    self._register_dal_model(3, vp, 'name', "1")
                    self._register_dal_model(3, vp, 'statistics', "2.0", key = "operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.1", key = "cluster_cache_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.2", key = "data_read", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.3", key = "sco_cache_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.4", key = "sco_cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.5", key = "sco_cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.6", key = "write_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.7", key = "cluster_cache_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.8", key = "read_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.9", key = "sco_cache_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.10", key = "backend_write_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.11", key = "backend_data_read", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.12", key = "cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.13", key = "backend_write_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.14", key = "metadata_store_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.15", key = "metadata_store_misses", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.16", key = "backend_data_written", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.17", key = "data_read_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.18", key = "read_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.19", key = "cluster_cache_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.20", key = "data_written_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.21", key = "cluster_cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.22", key = "cache_hits_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.23", key = "timestamp", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.24", key = "metadata_store_misses_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.25", key = "backend_data_written_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.26", key = "backend_read_operations", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.27", key = "data_written", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.28", key = "metadata_store_hits", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.29", key = "backend_data_read_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.30", key = "operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.31", key = "backend_read_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.32", key = "data_transferred_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.33", key = "write_operations_ps", atype = int)
                    self._register_dal_model(3, vp, 'statistics', "2.34", key = "data_transferred", atype = int)
                    self._register_dal_model(3, vp, 'status', "3")
                    self._register_dal_model(3, vp, 'description', "4")
                    self._register_dal_model(3, vp, 'vdisks', "5", atype = int)
                    self._register_dal_model(3, vp, '#vmachines', "6",
                                             func = lambda vp: len(set([vd.vmachine.guid for vd in vp.vdisks])),
                                             atype = int)
                    self.instance_oid += 1

            for storagedriver in StorageDriverList.get_storagedrivers():
                _guids.add(storagedriver.guid)
                if not self._check_added(storagedriver):
                    self._register_dal_model(4, storagedriver, 'guid', "0")
                    self._register_dal_model(4, storagedriver, 'name', "1")
                    self._register_dal_model(4, storagedriver, 'stored_data', "2", atype = int)
                    self.instance_oid += 1

            try:
                # try to load OVS Backends
                from ovs.dal.lists.albabackendlist import AlbaBackendList
                for backend in AlbaBackendList.get_albabackends():
                    _guids.add(backend.guid)
                    if not self._check_added(backend):
                        self._register_dal_model(5, backend, 'guid', 0)
                        self._register_dal_model(5, backend, 'name', 1)
                        for disk_id in range(len((backend.all_disks))):
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.0'.format(disk_id), key = "name", index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.1'.format(disk_id), key = "usage.size", atype = long, index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.2'.format(disk_id), key = "usage.used", atype = long, index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.3'.format(disk_id), key = "usage.available", atype = long, index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.4'.format(disk_id), key = "state.state", index=disk_id)
                            self._register_dal_model(5, backend, 'all_disks', '2.{0}.5'.format(disk_id), key = "node_id", index=disk_id)

                        self.instance_oid += 1
            except ImportError:
                print('OVS Backend not present')
                pass
            reload = False
            for object_guid in list(self.model_oids):
                if object_guid not in _guids:
                    self.model_oids.remove(object_guid)
                    reload = True
            if reload:
                self._reload_snmp()
コード例 #38
0
    def delete_snapshots(timestamp=None):
        """
        Delete snapshots & scrubbing policy

        Implemented delete snapshot policy:
        < 1d | 1d bucket | 1 | best of bucket   | 1d
        < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w
        < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m
        > 1m | delete

        :param timestamp: Timestamp to determine whether snapshots should be kept or not, if none provided, current time will be used
        :type timestamp: float

        :return: None
        """
        GenericController._logger.info('Delete snapshots started')

        day = timedelta(1)
        week = day * 7

        def make_timestamp(offset):
            """
            Create an integer based timestamp
            :param offset: Offset in days
            :return: Timestamp
            """
            return int(mktime((base - offset).timetuple()))

        # Calculate bucket structure
        if timestamp is None:
            timestamp = time.time()
        base = datetime.fromtimestamp(timestamp).date() - day
        buckets = []
        # Buckets first 7 days: [0-1[, [1-2[, [2-3[, [3-4[, [4-5[, [5-6[, [6-7[
        for i in xrange(0, 7):
            buckets.append({
                'start': make_timestamp(day * i),
                'end': make_timestamp(day * (i + 1)),
                'type': '1d',
                'snapshots': []
            })
        # Week buckets next 3 weeks: [7-14[, [14-21[, [21-28[
        for i in xrange(1, 4):
            buckets.append({
                'start': make_timestamp(week * i),
                'end': make_timestamp(week * (i + 1)),
                'type': '1w',
                'snapshots': []
            })
        buckets.append({
            'start': make_timestamp(week * 4),
            'end': 0,
            'type': 'rest',
            'snapshots': []
        })

        # Get a list of all snapshots that are used as parents for clones
        parent_snapshots = set(
            [vd.parentsnapshot for vd in VDiskList.get_with_parent_snaphots()])

        # Place all snapshots in bucket_chains
        bucket_chains = []
        for vdisk in VDiskList.get_vdisks():
            if vdisk.info['object_type'] in ['BASE']:
                bucket_chain = copy.deepcopy(buckets)
                for snapshot in vdisk.snapshots:
                    if snapshot.get('is_sticky') is True:
                        continue
                    if snapshot['guid'] in parent_snapshots:
                        GenericController._logger.info(
                            'Not deleting snapshot {0} because it has clones'.
                            format(snapshot['guid']))
                        continue
                    timestamp = int(snapshot['timestamp'])
                    for bucket in bucket_chain:
                        if bucket['start'] >= timestamp > bucket['end']:
                            bucket['snapshots'].append({
                                'timestamp':
                                timestamp,
                                'snapshot_id':
                                snapshot['guid'],
                                'vdisk_guid':
                                vdisk.guid,
                                'is_consistent':
                                snapshot['is_consistent']
                            })
                bucket_chains.append(bucket_chain)

        # Clean out the snapshot bucket_chains, we delete the snapshots we want to keep
        # And we'll remove all snapshots that remain in the buckets
        for bucket_chain in bucket_chains:
            first = True
            for bucket in bucket_chain:
                if first is True:
                    best = None
                    for snapshot in bucket['snapshots']:
                        if best is None:
                            best = snapshot
                        # Consistent is better than inconsistent
                        elif snapshot[
                                'is_consistent'] and not best['is_consistent']:
                            best = snapshot
                        # Newer (larger timestamp) is better than older snapshots
                        elif snapshot['is_consistent'] == best['is_consistent'] and \
                                snapshot['timestamp'] > best['timestamp']:
                            best = snapshot
                    bucket['snapshots'] = [
                        s for s in bucket['snapshots']
                        if s['timestamp'] != best['timestamp']
                    ]
                    first = False
                elif bucket['end'] > 0:
                    oldest = None
                    for snapshot in bucket['snapshots']:
                        if oldest is None:
                            oldest = snapshot
                        # Older (smaller timestamp) is the one we want to keep
                        elif snapshot['timestamp'] < oldest['timestamp']:
                            oldest = snapshot
                    bucket['snapshots'] = [
                        s for s in bucket['snapshots']
                        if s['timestamp'] != oldest['timestamp']
                    ]

        # Delete obsolete snapshots
        for bucket_chain in bucket_chains:
            for bucket in bucket_chain:
                for snapshot in bucket['snapshots']:
                    VDiskController.delete_snapshot(
                        vdisk_guid=snapshot['vdisk_guid'],
                        snapshot_id=snapshot['snapshot_id'])
        GenericController._logger.info('Delete snapshots finished')
コード例 #39
0
    def test_sync(self):
        """
        Validates whether the sync works as expected
        """
        structure = DalHelper.build_dal_structure({
            'vpools': [1],
            'storagerouters': [1],
            'storagedrivers':
            [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1)]
        }  # (<id>, <storagedriver_id>)
                                                  )
        vpool = structure['vpools'][1]
        storagedriver = structure['storagedrivers'][1]
        mds_service = structure['mds_services'][1]
        # noinspection PyArgumentList
        backend_config = MDSMetaDataBackendConfig([
            MDSNodeConfig(address=str(mds_service.service.storagerouter.ip),
                          port=mds_service.service.ports[0])
        ])
        srclient = StorageRouterClient(vpool.guid, None)

        VDiskController.create_new('one', 1024**3, storagedriver.guid)
        VDiskController.create_new('two', 1024**3, storagedriver.guid)

        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 2)
        self.assertEqual(len(srclient.list_volumes()), 2)

        VDiskController.sync_with_reality()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 2)
        self.assertEqual(len(srclient.list_volumes()), 2)

        volume_id = srclient.create_volume('/three.raw', backend_config,
                                           1024**3,
                                           storagedriver.storagedriver_id)

        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 2)
        self.assertEqual(len(srclient.list_volumes()), 3)

        VDiskController.sync_with_reality()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 3)
        self.assertEqual(len(srclient.list_volumes()), 3)

        vdisk = VDiskList.get_vdisk_by_volume_id(volume_id)
        self.assertEqual(vdisk.devicename, '/three.raw')

        vdisk = VDisk()
        vdisk.volume_id = 'foo'
        vdisk.name = 'foo'
        vdisk.devicename = 'foo.raw'
        vdisk.size = 1024**3
        vdisk.vpool = vpool
        vdisk.save()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 4)
        self.assertEqual(len(srclient.list_volumes()), 3)

        VDiskController.sync_with_reality()
        vdisks = VDiskList.get_vdisks()
        self.assertEqual(len(vdisks), 3)
        self.assertEqual(len(srclient.list_volumes()), 3)

        with self.assertRaises(ObjectNotFoundException):
            vdisk.save()
コード例 #40
0
ファイル: scheduledtask.py プロジェクト: grimpy/openvstorage
    def delete_snapshots(timestamp=None):
        """
        Delete snapshots & scrubbing policy

        Implemented delete snapshot policy:
        < 1d | 1d bucket | 1 | best of bucket   | 1d
        < 1w | 1d bucket | 6 | oldest of bucket | 7d = 1w
        < 1m | 1w bucket | 3 | oldest of bucket | 4w = 1m
        > 1m | delete

        :param timestamp: Timestamp to determine whether snapshots should be kept or not, if none provided, current time will be used
        :type timestamp: float

        :return: None
        """
        ScheduledTaskController._logger.info('Delete snapshots started')

        day = timedelta(1)
        week = day * 7

        def make_timestamp(offset):
            """
            Create an integer based timestamp
            :param offset: Offset in days
            :return: Timestamp
            """
            return int(mktime((base - offset).timetuple()))

        # Calculate bucket structure
        if timestamp is None:
            timestamp = time.time()
        base = datetime.fromtimestamp(timestamp).date() - day
        buckets = []
        # Buckets first 7 days: [0-1[, [1-2[, [2-3[, [3-4[, [4-5[, [5-6[, [6-7[
        for i in xrange(0, 7):
            buckets.append({'start': make_timestamp(day * i),
                            'end': make_timestamp(day * (i + 1)),
                            'type': '1d',
                            'snapshots': []})
        # Week buckets next 3 weeks: [7-14[, [14-21[, [21-28[
        for i in xrange(1, 4):
            buckets.append({'start': make_timestamp(week * i),
                            'end': make_timestamp(week * (i + 1)),
                            'type': '1w',
                            'snapshots': []})
        buckets.append({'start': make_timestamp(week * 4),
                        'end': 0,
                        'type': 'rest',
                        'snapshots': []})

        # Get a list of all snapshots that are used as parents for clones
        parent_snapshots = set([vd.parentsnapshot for vd in VDiskList.get_with_parent_snaphots()])

        # Place all snapshots in bucket_chains
        bucket_chains = []
        for vdisk in VDiskList.get_vdisks():
            if vdisk.info['object_type'] in ['BASE']:
                bucket_chain = copy.deepcopy(buckets)
                for snapshot in vdisk.snapshots:
                    if snapshot.get('is_sticky') is True:
                        continue
                    if snapshot['guid'] in parent_snapshots:
                        ScheduledTaskController._logger.info('Not deleting snapshot {0} because it has clones'.format(snapshot['guid']))
                        continue
                    timestamp = int(snapshot['timestamp'])
                    for bucket in bucket_chain:
                        if bucket['start'] >= timestamp > bucket['end']:
                            bucket['snapshots'].append({'timestamp': timestamp,
                                                        'snapshot_id': snapshot['guid'],
                                                        'vdisk_guid': vdisk.guid,
                                                        'is_consistent': snapshot['is_consistent']})
                bucket_chains.append(bucket_chain)

        # Clean out the snapshot bucket_chains, we delete the snapshots we want to keep
        # And we'll remove all snapshots that remain in the buckets
        for bucket_chain in bucket_chains:
            first = True
            for bucket in bucket_chain:
                if first is True:
                    best = None
                    for snapshot in bucket['snapshots']:
                        if best is None:
                            best = snapshot
                        # Consistent is better than inconsistent
                        elif snapshot['is_consistent'] and not best['is_consistent']:
                            best = snapshot
                        # Newer (larger timestamp) is better than older snapshots
                        elif snapshot['is_consistent'] == best['is_consistent'] and \
                                snapshot['timestamp'] > best['timestamp']:
                            best = snapshot
                    bucket['snapshots'] = [s for s in bucket['snapshots'] if
                                           s['timestamp'] != best['timestamp']]
                    first = False
                elif bucket['end'] > 0:
                    oldest = None
                    for snapshot in bucket['snapshots']:
                        if oldest is None:
                            oldest = snapshot
                        # Older (smaller timestamp) is the one we want to keep
                        elif snapshot['timestamp'] < oldest['timestamp']:
                            oldest = snapshot
                    bucket['snapshots'] = [s for s in bucket['snapshots'] if
                                           s['timestamp'] != oldest['timestamp']]

        # Delete obsolete snapshots
        for bucket_chain in bucket_chains:
            for bucket in bucket_chain:
                for snapshot in bucket['snapshots']:
                    VDiskController.delete_snapshot(vdisk_guid=snapshot['vdisk_guid'],
                                                    snapshot_id=snapshot['snapshot_id'])
        ScheduledTaskController._logger.info('Delete snapshots finished')
コード例 #41
0
ファイル: test_vdisk.py プロジェクト: grimpy/openvstorage
    def test_clone(self):
        """
        Test the clone functionality
            - Create a vDisk with name 'clone1'
            - Clone the vDisk and make some assertions
            - Attempt to clone again using same name and same devicename
            - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted
            - Attempt to clone on Storage Driver without MDS service
            - Attempt to clone from snapshot which is not yet completely synced to backend
            - Attempt to delete the snapshot from which a clone was made
            - Clone the vDisk on another Storage Router
            - Clone another vDisk with name 'clone1' linked to another vPool
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'storagerouters': [1, 2, 3],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        mds_services = structure['mds_services']
        service_type = structure['service_type']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters)
        self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters)

        # Basic clone scenario
        vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid,
                                            name='clone1')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks')

        clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshots[0]['guid'])
        self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot')
        self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk')

        for expected_key in ['vdisk_guid', 'name', 'backingdevice']:
            self.assertTrue(expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format(expected_key))
        self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match')
        self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match')
        self.assertTrue(expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match')

        # Attempt to clone again with same name
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone1')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1')

        # Attempt to clone again with a name which will have identical devicename
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone1%')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2')

        # Attempt to clone on Storage Router on which vPool is not extended
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2',
                                  storagerouter_guid=storagerouters[2].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3')

        # Attempt to clone on non-existing Storage Driver
        storagedrivers[1].storagedriver_id = 'non-existing'
        storagedrivers[1].save()
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4')
        storagedrivers[1].storagedriver_id = '1'
        storagedrivers[1].save()

        # Attempt to clone on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[3]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5')
        mds_services[1].service.storagerouter = storagerouters[1]
        mds_services[1].service.save()

        # Attempt to clone by providing snapshot_id not synced to backend
        self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning')
        metadata = {'label': 'label1',
                    'timestamp': int(time.time()),
                    'is_sticky': False,
                    'in_backend': False,
                    'is_automatic': True,
                    'is_consistent': True}
        snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata)
        self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots')
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2',
                                  snapshot_id=snapshot_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6')

        # Update backend synced flag and retry
        vdisk1.storagedriver_client._set_snapshot_in_backend(vdisk1.volume_id, snapshot_id, True)
        vdisk1.invalidate_dynamics('snapshots')
        VDiskController.clone(vdisk_guid=vdisk1.guid,
                              name='clone2',
                              snapshot_id=snapshot_id)
        vdisks = VDiskList.get_vdisks()
        vdisk1.invalidate_dynamics()
        self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks')
        self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks')
        self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots after cloning from a specified snapshot')

        # Attempt to delete the snapshot that has clones
        with self.assertRaises(RuntimeError):
            VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid,
                                            snapshot_id=snapshot_id)

        # Clone on specific Storage Router
        storagedriver = StorageDriver()
        storagedriver.vpool = vpools[1]
        storagedriver.storagerouter = storagerouters[2]
        storagedriver.name = '3'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storagerouters[2].ip
        storagedriver.storage_ip = '127.0.0.1'
        storagedriver.storagedriver_id = '3'
        storagedriver.ports = {'management': 1,
                               'xmlrpc': 2,
                               'dtl': 3,
                               'edge': 4}
        storagedriver.save()

        s_id = '{0}-1'.format(storagedriver.storagerouter.name)
        service = Service()
        service.name = s_id
        service.storagerouter = storagedriver.storagerouter
        service.ports = [3]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storagedriver.vpool
        mds_service.save()

        clone3 = VDisk(VDiskController.clone(vdisk_guid=vdisk1.guid,
                                             name='clone3',
                                             storagerouter_guid=storagerouters[2].guid)['vdisk_guid'])
        self.assertTrue(expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached')

        # Clone vDisk with existing name on another vPool
        vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[2].guid))
        clone_vdisk2 = VDisk(VDiskController.clone(vdisk_guid=vdisk2.guid,
                                                   name='clone1')['vdisk_guid'])
        self.assertTrue(expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool')
        self.assertTrue(expr=len([vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1']) == 2, msg='Expected to find 2 vDisks with name "clone1"')

        # Attempt to clone without specifying snapshot and snapshot fails to sync to backend
        StorageRouterClient.synced = False
        vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_2', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk2.guid,
                                  name='clone4')
        vdisk2.invalidate_dynamics()
        self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure')
        self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure')
        StorageRouterClient.synced = True