Exemple #1
0
    def test_event_resize_from_volumedriver(self):
        """
        Test resize from volumedriver event
            - Create a vDisk using the resize event
            - Resize the created vDisk using the same resize event
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        storagedrivers = structure['storagedrivers']
        mds_service = structure['mds_services'][1]

        # Create volume using resize from voldrv
        device_name = '/vdisk.raw'
        srclient = StorageRouterClient(vpools[1].guid, None)
        mds_backend_config = Helper._generate_mdsmetadatabackendconfig([mds_service])
        volume_id = srclient.create_volume(device_name, mds_backend_config, 1024 ** 4, str(storagedrivers[1].storagedriver_id))
        VDiskController.resize_from_voldrv(volume_id=volume_id,
                                           volume_size=1024 ** 4,
                                           volume_path=device_name,
                                           storagedriver_id=storagedrivers[1].storagedriver_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1,
                        msg='Expected to find 1 vDisk in model')
        self.assertEqual(first=vdisks[0].name,
                         second='vdisk',
                         msg='Volume name should be vdisk')
        self.assertEqual(first=vdisks[0].volume_id,
                         second=volume_id,
                         msg='Volume ID should be {0}'.format(volume_id))
        self.assertEqual(first=vdisks[0].devicename,
                         second=device_name,
                         msg='Device name should be {0}'.format(device_name))
        self.assertEqual(first=vdisks[0].size,
                         second=1024 ** 4,
                         msg='Size should be 1 TiB')

        # Resize volume using resize from voldrv
        VDiskController.resize_from_voldrv(volume_id=volume_id,
                                           volume_size=2 * 1024 ** 4,
                                           volume_path=device_name,
                                           storagedriver_id=storagedrivers[1].storagedriver_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1,
                        msg='Expected to find 1 vDisk in model')
        self.assertEqual(first=vdisks[0].name,
                         second='vdisk',
                         msg='Volume name should be vdisk')
        self.assertEqual(first=vdisks[0].size,
                         second=2 * 1024 ** 4,
                         msg='Size should be 2 TiB')
Exemple #2
0
    def test_set_as_template(self):
        """
        Test the set as template functionality
            - Create a vDisk
            - Set it as template and make some assertions
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1)]}  # (<id>, <storagedriver_id>)
        )
        storagedrivers = structure['storagedrivers']

        vdisk = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[1].guid))
        metadata = {'is_consistent': True,
                    'is_automatic': True,
                    'is_sticky': False}
        for x in range(5):
            metadata['label'] = 'label{0}'.format(x)
            metadata['timestamp'] = int(time.time())
            VDiskController.create_snapshot(vdisk_guid=vdisk.guid, metadata=metadata)
        self.assertTrue(expr=len(vdisk.snapshots) == 5, msg='Expected to find 5 snapshots')

        # Set as template and validate the model
        self.assertFalse(expr=vdisk.is_vtemplate, msg='Dynamic property "is_vtemplate" should be False')
        VDiskController.set_as_template(vdisk.guid)
        vdisk.invalidate_dynamics('snapshots')
        self.assertTrue(expr=vdisk.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True')
        self.assertTrue(expr=len(vdisk.snapshots) == 1, msg='Expected to find only 1 snapshot after converting to template')

        # Try again and verify job succeeds, previously we raised error when setting as template an additional time
        VDiskController.set_as_template(vdisk.guid)
        self.assertTrue(expr=vdisk.is_vtemplate, msg='Dynamic property "is_vtemplate" should still be True')
Exemple #3
0
    def test_delete_snapshot(self):
        """
        Test the delete snapshot functionality
            - Create a vDisk and take a snapshot
            - Attempt to delete a non-existing snapshot
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1)]}  # (<id>, <storagedriver_id>)
        )
        storagedrivers = structure['storagedrivers']

        vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata={'timestamp': int(time.time()),
                                                                          'label': 'label1',
                                                                          'is_consistent': True,
                                                                          'is_automatic': True,
                                                                          'is_sticky': False})
        snapshot = vdisk1.snapshots[0]
        self.assertTrue(expr=len(vdisk1.snapshots) == 1,
                        msg='Expected to find 1 snapshot')
        with self.assertRaises(RuntimeError):
            VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid,
                                            snapshot_id='non-existing')

        VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid,
                                        snapshot_id=snapshot['guid'])
        self.assertTrue(expr=len(vdisk1.snapshots) == 0,
                        msg='Expected to find no more snapshots')
    def test_multi_node_with_unused_domains(self):
        """
        Test DTL checkup on a multi node setup and create some Domains, but do not link them to any Storage Router
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        #  |      sr 2      |       |                 |                  |      1      |
        #  |      sr 3      |       |                 |                  |      1      |
        #  |      sr 4      |       |                 |                  |      1      |
        #  |      sr 5      |       |                 |                  |      1      |
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'domains': [1, 2, 3],
             'storagerouters': [1, 2, 3, 4, 5],
             'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:]]},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
    def test_dtl_no_unnecessary_reconfiguration(self):
        """
        Verify that when more than 3 Storage Routers are available as possible DTL target, the same target is used over and over again
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'domains': [1],
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
             'storagerouter_domains': [(1, 1, 1, True), (2, 2, 1, False), (3, 3, 1, False), (4, 4, 1, False),
                                       (5, 5, 1, False), (6, 6, 1, False), (7, 7, 1, False), (8, 8, 1, False),
                                       (9, 9, 1, False), (10, 10, 1, False)],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
             'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5),
                                (6, 1, 6), (7, 1, 7), (8, 1, 8), (9, 1, 9), (10, 1, 10)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        config = self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                                    validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:]]},
                                                                 {'key': 'port', 'value': 3},
                                                                 {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
        # Rerun DTL checkup 10 times and validate target does not change even though 9 Storage Routers are potential candidate
        for _ in xrange(10):
            self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                               validations=[{'key': 'host', 'value': config.host},
                                                            {'key': 'port', 'value': 3},
                                                            {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
    def test_incorrect_dtl_fixup(self):
        """
        Validates whether the DTL checkup logic can fix a vDisk who's DTL is configured to an unexpected ip
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()]},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # Set DTL manually to an unexpected IP
        vdisk.storagedriver_client.set_manual_dtl_config(volume_id=vdisk.volume_id,
                                                         config=DTLConfig(str(storagerouters[1].ip), 3, DTLMode.SYNCHRONOUS))

        # And after another DTL checkup, it should be restored again
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()]},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
Exemple #7
0
    def test_incorrect_dtl_fixup(self):
        """
        Validates whether the DTL checkup logic can fix a vDisk who's DTL is configured to an unexpected ip
        """
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2],
            'storagedrivers': [(1, 1, 1), (2, 1, 2)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # Set DTL manually to an unexpected IP
        vdisk.storagedriver_client.set_manual_dtl_config(
            volume_id=vdisk.volume_id,
            config=DTLConfig(str(storagerouters[1].ip), 3,
                             DTLMode.SYNCHRONOUS))

        # And after another DTL checkup, it should be restored again
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])
    def test_single_node(self):
        """
        Execute some DTL checkups on a single node installation
        """
        # Create 1 vdisk in single node without domains
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouter = structure['storagerouters'][1]
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |       1        |   1   |                 |                  |                    |
        self._roll_out_dtl_services(vpool=vpool, storagerouters=structure['storagerouters'])
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Create some domains, but do not attach them yet
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |       1        |   1   |                 |                  |                    |
        domains = {}
        for domain_id in range(1, 3):
            domain = Domain()
            domain.name = 'domain_{0}'.format(domain_id)
            domain.save()
            domains[domain_id] = domain

        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Attach a regular Domain to the single Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |      sr 1      |   1   |     domain 1    |                  |                    |
        sr_domain = StorageRouterDomain()
        sr_domain.backup = False
        sr_domain.domain = domains[1]
        sr_domain.storagerouter = storagerouter
        sr_domain.save()

        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Attach a recovery Domain to the single Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |      sr 1      |   1   |                 |     domain 1     |                    |
        for junction in storagerouter.domains:
            junction.delete()
        sr_domain = StorageRouterDomain()
        sr_domain.backup = True
        sr_domain.domain = domains[1]
        sr_domain.storagerouter = storagerouter
        sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])
Exemple #9
0
    def test_multi_node_with_used_domains_on_local_sr(self):
        """
        Test DTL checkup on a multi node setup and create some Domains and link them to the Storage Router on which the vDisk lives
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |                  |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'domains': [1, 2, 3],
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2, 3, 4, 5],
            'storagerouter_domains': [
                (1, 1, 1, False)
            ],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
            'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4),
                               (5, 1, 5)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domain = structure['domains'][1]
        storagerouter = structure['storagerouters'][1]

        # When domains have been attached to the StorageRouter on which the vDisk resides, but no other Storage Routers have same Domain --> Stand Alone
        self._roll_out_dtl_services(vpool=vpool,
                                    storagerouters=structure['storagerouters'])
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])

        # Remove the linked Domain and add a recovery Domain instead --> DTL is still disabled at this point --> DTL checkup should not change anything
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |     domain 1     |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        for junction in storagerouter.domains:
            junction.delete()
        sr_domain = StorageRouterDomain()
        sr_domain.backup = True
        sr_domain.domain = domain
        sr_domain.storagerouter = storagerouter
        sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])
Exemple #10
0
    def test_list_volumes(self):
        """
        Test the list volumes functionality
            - Create 1 vDisk on vPool1 and create 3 vDisks on vPool2
            - List all volumes
            - List the volumes on vPool1
            - List the volumes on vPool2
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        storagedrivers = structure['storagedrivers']

        vpool1 = vpools[1]
        vpool2 = vpools[2]
        VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[1].guid)
        VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[2].guid)
        VDiskController.create_new(volume_name='vdisk_2', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[2].guid)
        VDiskController.create_new(volume_name='vdisk_3', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[2].guid)
        all_vdisks = VDiskList.get_vdisks()

        # List all volumes
        sd_volume_ids = set(VDiskController.list_volumes())
        model_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks])
        self.assertEqual(first=len(sd_volume_ids),
                         second=4,
                         msg='Expected to retrieve all 4 volumes')
        self.assertEqual(first=sd_volume_ids,
                         second=model_volume_ids,
                         msg='Volume IDs from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'.format(sd_volume_ids, model_volume_ids))

        # List all volumes of vpools[1]
        sd_vpool1_volume_ids = set(VDiskController.list_volumes(vpool_guid=vpool1.guid))
        model_vpool1_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool1])
        self.assertEqual(first=len(sd_vpool1_volume_ids),
                         second=1,
                         msg='Expected to retrieve 1 volume')
        self.assertEqual(first=sd_vpool1_volume_ids,
                         second=model_vpool1_volume_ids,
                         msg='Volume IDs for vPool1 from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'.format(sd_vpool1_volume_ids, model_vpool1_volume_ids))

        # List all volumes of vpools[2]
        sd_vpool2_volume_ids = set(VDiskController.list_volumes(vpool_guid=vpool2.guid))
        model_vpool2_volume_ids = set([vdisk.volume_id for vdisk in all_vdisks if vdisk.vpool == vpool2])
        self.assertEqual(first=len(sd_vpool2_volume_ids),
                         second=3,
                         msg='Expected to retrieve 3 volumes')
        self.assertEqual(first=sd_vpool2_volume_ids,
                         second=model_vpool2_volume_ids,
                         msg='Volume IDs for vPool2 from Storage Driver not identical to volume IDs in model. SD: {0}  -  Model: {1}'.format(sd_vpool2_volume_ids, model_vpool2_volume_ids))
    def test_from_single_node_to_multi_node(self):
        """
        Deploy a vDisk on a single node --> This should result in no DTL configured
        Add an additional node and verify DTL will be set
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Add a Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        #  |      sr 2      |       |                 |                  |      1      |
        storagerouter = StorageRouter()
        storagerouter.name = '2'
        storagerouter.ip = '10.0.0.2'
        storagerouter.rdma_capable = False
        storagerouter.save()
        storagerouters[2] = storagerouter
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        storagedriver = StorageDriver()
        storagedriver.vpool = vpool
        storagedriver.storagerouter = storagerouter
        storagedriver.name = '2'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storagerouter.ip
        storagedriver.storage_ip = '10.0.1.2'
        storagedriver.storagedriver_id = '2'
        storagedriver.ports = {'management': 1,
                               'xmlrpc': 2,
                               'dtl': 3,
                               'edge': 4}
        storagedriver.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
Exemple #12
0
    def test_create_snapshot(self):
        """
        Test the create snapshot functionality
            - Create a vDisk
            - Attempt to create a snapshot providing incorrect parameters
            - Create a snapshot and make some assertions
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1)]}  # (<id>, <storagedriver_id>)
        )
        storagedrivers = structure['storagedrivers']

        vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        with self.assertRaises(ValueError):
            # noinspection PyTypeChecker
            VDiskController.create_snapshot(vdisk_guid=vdisk1.guid,
                                            metadata='')

        now = int(time.time())
        snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata={'timestamp': now,
                                                                                        'label': 'label1',
                                                                                        'is_consistent': True,
                                                                                        'is_automatic': True,
                                                                                        'is_sticky': False})
        self.assertTrue(expr=len(vdisk1.snapshots) == 1,
                        msg='Expected to find 1 snapshot')
        snapshot = vdisk1.snapshots[0]
        expected_keys = {'guid', 'timestamp', 'label', 'is_consistent', 'is_automatic', 'is_sticky', 'in_backend', 'stored'}
        self.assertEqual(first=expected_keys,
                         second=set(snapshot.keys()),
                         msg='Set of expected keys differs from reality. Expected: {0}  -  Reality: {1}'.format(expected_keys, set(snapshot.keys())))

        for key, value in {'guid': snapshot_id,
                           'label': 'label1',
                           'stored': 0,
                           'is_sticky': False,
                           'timestamp': now,
                           'in_backend': True,
                           'is_automatic': True,
                           'is_consistent': True}.iteritems():
            self.assertEqual(first=value,
                             second=snapshot[key],
                             msg='Value for key "{0}" does not match reality. Expected: {1}  -  Reality: {2}'.format(key, value, snapshot[key]))
    def test_multi_node_with_used_domains_on_local_sr(self):
        """
        Test DTL checkup on a multi node setup and create some Domains and link them to the Storage Router on which the vDisk lives
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |                  |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'domains': [1, 2, 3],
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1, 2, 3, 4, 5],
             'storagerouter_domains': [(1, 1, 1, False)],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
             'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domain = structure['domains'][1]
        storagerouter = structure['storagerouters'][1]

        # When domains have been attached to the StorageRouter on which the vDisk resides, but no other Storage Routers have same Domain --> Stand Alone
        self._roll_out_dtl_services(vpool=vpool, storagerouters=structure['storagerouters'])
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Remove the linked Domain and add a recovery Domain instead --> DTL is still disabled at this point --> DTL checkup should not change anything
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |     domain 1     |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        for junction in storagerouter.domains:
            junction.delete()
        sr_domain = StorageRouterDomain()
        sr_domain.backup = True
        sr_domain.domain = domain
        sr_domain.storagerouter = storagerouter
        sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])
Exemple #14
0
    def test_multi_node_with_unused_domains(self):
        """
        Test DTL checkup on a multi node setup and create some Domains, but do not link them to any Storage Router
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        #  |      sr 2      |       |                 |                  |      1      |
        #  |      sr 3      |       |                 |                  |      1      |
        #  |      sr 4      |       |                 |                  |      1      |
        #  |      sr 5      |       |                 |                  |      1      |
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'domains': [1, 2, 3],
            'storagerouters': [1, 2, 3, 4, 5],
            'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4),
                               (5, 1, 5)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()[1:]
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])
Exemple #15
0
    def test_delete(self):
        """
        Test the delete of a vDisk
            - Create 2 vDisks with identical names on 2 different vPools
            - Delete 1st vDisk and verify other still remains on correct vPool
            - Delete 2nd vDisk and verify no more volumes left
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'domains': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        domains = structure['domains']
        storagedrivers = structure['storagedrivers']

        vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[2].guid))

        vdisk_domain = VDiskDomain()
        vdisk_domain.domain = domains[1]
        vdisk_domain.vdisk = vdisk1
        vdisk_domain.save()

        # Delete vDisk1 and make some assertions
        VDiskController.delete(vdisk_guid=vdisk1.guid)
        with self.assertRaises(ObjectNotFoundException):
            VDisk(vdisk1.guid)
        self.assertEqual(first=len(VDiskController.list_volumes()),
                         second=1,
                         msg='Expected to find only 1 volume in Storage Driver list_volumes')
        self.assertIn(member=vdisk2,
                      container=VDiskList.get_vdisks(),
                      msg='vDisk2 should still be modeled')

        # Delete vDisk2 and make some assertions
        VDiskController.delete(vdisk_guid=vdisk2.guid)
        with self.assertRaises(ObjectNotFoundException):
            VDisk(vdisk2.guid)
        self.assertEqual(first=len(VDiskController.list_volumes()),
                         second=0,
                         msg='Expected to find no more volumes in Storage Driver list_volumes')
Exemple #16
0
    def test_event_migrate_from_volumedriver(self):
        """
        Test migrate from volumedriver event
        """
        _ = self
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpool = structure['vpools'][1]
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        vdisk = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 4, storagedriver_guid=storagedrivers[1].guid))
        vdisk.storagedriver_client.migrate(vdisk.volume_id, storagedrivers[2].storagedriver_id, False)
        VDiskController.migrate_from_voldrv(volume_id=vdisk.volume_id, new_owner_id=storagedrivers[2].storagedriver_id)
        self.assertEqual(vdisk.storagedriver_id, storagedrivers[2].storagedriver_id)
    def test_multi_node_with_regular_domains(self):
        """
        Test DTL checkup on a multi node setup and create some Domains and link them to the several Storage Routers
        """
        # Add a regular domain to the Storage Router serving the vDisk and another Storage Router --> DTL target should be the specific Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |                  |             |
        #  |      sr 2      |       |     domain 1    |                  |      1      |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'domains': [1, 2, 3],
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1, 2, 3, 4, 5],
             'storagerouter_domains': [(1, 1, 1, False), (2, 2, 1, False)],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
             'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domains = structure['domains']
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # Add the regular Domain as regular Domain to additional Storage Routers --> DTL target should remain on same Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |                  |             |
        #  |      sr 2      |       |     domain 1    |                  |      1      |
        #  |      sr 3      |       |     domain 1    |                  |             |
        #  |      sr 4      |       |     domain 1    |                  |             |
        #  |      sr 5      |       |     domain 1    |                  |             |
        for storagerouter in storagerouters.values()[2:]:
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[1]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # Add recovery Domain to the Storage Router on which the vDisks lives --> nothing should change for now
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |     domain 2     |             |
        #  |      sr 2      |       |     domain 1    |                  |      1      |
        #  |      sr 3      |       |     domain 1    |                  |             |
        #  |      sr 4      |       |     domain 1    |                  |             |
        #  |      sr 5      |       |     domain 1    |                  |             |
        sr_domain = StorageRouterDomain()
        sr_domain.backup = True
        sr_domain.domain = domains[2]
        sr_domain.storagerouter = storagerouters[1]
        sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # # Add the recovery Domain as regular Domain to additional StorageRouters --> Recovery Domain should have priority over regular Domain
        # # || StorageRouter || vDisk | Regular Domain    || Recovery Domain || DTL Target ||
        # #  |      sr 1      |   1   | domain 1           |     domain 2     |             |
        # #  |      sr 2      |       | domain 1           |                  |             |
        # #  |      sr 3      |       | domain 1, domain 2 |                  |      1      |
        # #  |      sr 4      |       | domain 1, domain 2 |                  |      1      |
        # #  |      sr 5      |       | domain 1, domain 2 |                  |      1      |
        for storagerouter in storagerouters.values()[2:]:
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[2]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[2:]]},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
Exemple #18
0
    def test_clone(self):
        """
        Test the clone functionality
            - Create a vDisk with name 'clone1'
            - Clone the vDisk and make some assertions
            - Attempt to clone again using same name and same devicename
            - Attempt to clone on Storage Router which is not linked to the vPool on which the original vDisk is hosted
            - Attempt to clone on Storage Driver without MDS service
            - Attempt to clone from snapshot which is not yet completely synced to backend
            - Attempt to delete the snapshot from which a clone was made
            - Clone the vDisk on another Storage Router
            - Clone another vDisk with name 'clone1' linked to another vPool
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'storagerouters': [1, 2, 3],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        mds_services = structure['mds_services']
        service_type = structure['service_type']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        self._roll_out_dtl_services(vpool=vpools[1], storagerouters=storagerouters)
        self._roll_out_dtl_services(vpool=vpools[2], storagerouters=storagerouters)

        # Basic clone scenario
        vdisk1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        clone1_info = VDiskController.clone(vdisk_guid=vdisk1.guid,
                                            name='clone1')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks')

        clones = VDiskList.get_by_parentsnapshot(vdisk1.snapshots[0]['guid'])
        self.assertTrue(expr=len(clones) == 1, msg='Expected to find 1 vDisk with parent snapshot')
        self.assertTrue(expr=len(vdisk1.child_vdisks) == 1, msg='Expected to find 1 child vDisk')

        for expected_key in ['vdisk_guid', 'name', 'backingdevice']:
            self.assertTrue(expr=expected_key in clone1_info, msg='Expected to find key "{0}" in clone_info'.format(expected_key))
        self.assertTrue(expr=clones[0].guid == clone1_info['vdisk_guid'], msg='Guids do not match')
        self.assertTrue(expr=clones[0].name == clone1_info['name'], msg='Names do not match')
        self.assertTrue(expr=clones[0].devicename == clone1_info['backingdevice'], msg='Device names do not match')

        # Attempt to clone again with same name
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone1')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 1')

        # Attempt to clone again with a name which will have identical devicename
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone1%')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 2')

        # Attempt to clone on Storage Router on which vPool is not extended
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2',
                                  storagerouter_guid=storagerouters[2].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 3')

        # Attempt to clone on non-existing Storage Driver
        storagedrivers[1].storagedriver_id = 'non-existing'
        storagedrivers[1].save()
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 4')
        storagedrivers[1].storagedriver_id = '1'
        storagedrivers[1].save()

        # Attempt to clone on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[3]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 5')
        mds_services[1].service.storagerouter = storagerouters[1]
        mds_services[1].service.save()

        # Attempt to clone by providing snapshot_id not synced to backend
        self.assertTrue(expr=len(vdisk1.snapshots) == 1, msg='Expected to find only 1 snapshot before cloning')
        metadata = {'label': 'label1',
                    'timestamp': int(time.time()),
                    'is_sticky': False,
                    'in_backend': False,
                    'is_automatic': True,
                    'is_consistent': True}
        snapshot_id = VDiskController.create_snapshot(vdisk_guid=vdisk1.guid, metadata=metadata)
        self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots')
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk1.guid,
                                  name='clone2',
                                  snapshot_id=snapshot_id)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks after failed clone attempt 6')

        # Update backend synced flag and retry
        vdisk1.storagedriver_client._set_snapshot_in_backend(vdisk1.volume_id, snapshot_id, True)
        vdisk1.invalidate_dynamics('snapshots')
        VDiskController.clone(vdisk_guid=vdisk1.guid,
                              name='clone2',
                              snapshot_id=snapshot_id)
        vdisks = VDiskList.get_vdisks()
        vdisk1.invalidate_dynamics()
        self.assertTrue(expr=len(vdisks) == 3, msg='Expected to find 3 vDisks')
        self.assertTrue(expr=len(vdisk1.child_vdisks) == 2, msg='Expected to find 2 child vDisks')
        self.assertTrue(expr=len(vdisk1.snapshots) == 2, msg='Expected to find 2 snapshots after cloning from a specified snapshot')

        # Attempt to delete the snapshot that has clones
        with self.assertRaises(RuntimeError):
            VDiskController.delete_snapshot(vdisk_guid=vdisk1.guid,
                                            snapshot_id=snapshot_id)

        # Clone on specific Storage Router
        storagedriver = StorageDriver()
        storagedriver.vpool = vpools[1]
        storagedriver.storagerouter = storagerouters[2]
        storagedriver.name = '3'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storagerouters[2].ip
        storagedriver.storage_ip = '127.0.0.1'
        storagedriver.storagedriver_id = '3'
        storagedriver.ports = {'management': 1,
                               'xmlrpc': 2,
                               'dtl': 3,
                               'edge': 4}
        storagedriver.save()

        s_id = '{0}-1'.format(storagedriver.storagerouter.name)
        service = Service()
        service.name = s_id
        service.storagerouter = storagedriver.storagerouter
        service.ports = [3]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = storagedriver.vpool
        mds_service.save()

        clone3 = VDisk(VDiskController.clone(vdisk_guid=vdisk1.guid,
                                             name='clone3',
                                             storagerouter_guid=storagerouters[2].guid)['vdisk_guid'])
        self.assertTrue(expr=clone3.storagerouter_guid == storagerouters[2].guid, msg='Incorrect Storage Router on which the clone is attached')

        # Clone vDisk with existing name on another vPool
        vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[2].guid))
        clone_vdisk2 = VDisk(VDiskController.clone(vdisk_guid=vdisk2.guid,
                                                   name='clone1')['vdisk_guid'])
        self.assertTrue(expr=clone_vdisk2.vpool == vpools[2], msg='Cloned vDisk with name "clone1" was created on incorrect vPool')
        self.assertTrue(expr=len([vdisk for vdisk in VDiskList.get_vdisks() if vdisk.name == 'clone1']) == 2, msg='Expected to find 2 vDisks with name "clone1"')

        # Attempt to clone without specifying snapshot and snapshot fails to sync to backend
        StorageRouterClient.synced = False
        vdisk2 = VDisk(VDiskController.create_new(volume_name='vdisk_2', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        with self.assertRaises(RuntimeError):
            VDiskController.clone(vdisk_guid=vdisk2.guid,
                                  name='clone4')
        vdisk2.invalidate_dynamics()
        self.assertTrue(expr=len(vdisk2.snapshots) == 0, msg='Expected to find 0 snapshots after clone failure')
        self.assertTrue(expr=len(vdisk2.child_vdisks) == 0, msg='Expected to find 0 children after clone failure')
        StorageRouterClient.synced = True
    def test_distances(self):
        """
        Validates different node distances generated (to be passed into the StorageDriver)
        """
        # Single node cluster, no domains
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)]}  # (<id>, <vpool_id>, <storagerouter_id>)
        )
        storagedrivers = structure['storagedrivers']
        expected = {1: {}}  # No distances, since no other nodes exist
        for sd_id, sd in storagedrivers.iteritems():
            self.assertDictEqual(sd._cluster_node_config()['node_distance_map'], expected[sd_id])

        # Two nodes, no domains
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)]}  # (<id>, <vpool_id>, <storagerouter_id>)
        )
        storagedrivers = structure['storagedrivers']
        expected = {1: {'2': StorageDriver.DISTANCES.NEAR},  # No domain, so everything is near
                    2: {'1': StorageDriver.DISTANCES.NEAR}}
        for sd_id, sd in storagedrivers.iteritems():
            self.assertDictEqual(sd._cluster_node_config()['node_distance_map'], expected[sd_id])

        # Two nodes, one domain, and only one node is is in the domain
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'domains': [1],
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'storagerouter_domains': [(1, 1, 1, False)]}  # (id>, <storagerouter_id>, <domain_id>, <backup>)
        )
        storagedrivers = structure['storagedrivers']
        expected = {1: {'2': StorageDriver.DISTANCES.INFINITE},  # The other one is not in the same domain: infinity
                    2: {'1': StorageDriver.DISTANCES.NEAR}}  # No domain, so everything is near
        for sd_id, sd in storagedrivers.iteritems():
            self.assertDictEqual(sd._cluster_node_config()['node_distance_map'], expected[sd_id])

        # Two nodes, one domain, and both are in the domain
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'domains': [1],
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'storagerouter_domains': [(1, 1, 1, False), (2, 2, 1, False)]}  # (id>, <storagerouter_id>, <domain_id>, <backup>)
        )
        storagedrivers = structure['storagedrivers']
        expected = {1: {'2': StorageDriver.DISTANCES.NEAR},  # Both are in the same (primary) domain: near
                    2: {'1': StorageDriver.DISTANCES.NEAR}}
        for sd_id, sd in storagedrivers.iteritems():
            self.assertDictEqual(sd._cluster_node_config()['node_distance_map'], expected[sd_id])

        # Some more complex scenarios
        # StorageRouter | Primary | Secondairy
        #    1          |    1    |     2
        #    2          |    1    |     3
        #    3          |    2    |     3
        #    4          |    2    |     1
        #    5          |    3    |
        #    6          |    3    |    1,2
        #    7          |         |     1
        #    8          |         |
        #    9          |    4    |
        #   10          |    1    |     5
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'domains': [1, 2, 3, 4, 5],
             'storagerouters': range(1, 11),
             'storagedrivers': [(i, 1, i) for i in range(1, 11)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'storagerouter_domains': [(1,  1,  1, False), (2,  1,  2, True),
                                       (3,  2,  1, False), (4,  2,  3, True),
                                       (5,  3,  2, False), (6,  3,  3, True),
                                       (7,  4,  2, False), (8,  4,  1, True),
                                       (9,  5,  3, False),
                                       (10, 6,  3, False), (11, 6,  1, True), (12, 6, 2, True),
                                       (13, 7,  1, True),
                                       (14, 9,  4, False),
                                       (15, 10, 1, False), (16, 10, 5, True)]}  # (id>, <storagerouter_id>, <domain_id>, <backup>)
        )
        storagedrivers = structure['storagedrivers']
        expected = {1:  {'2':  StorageDriver.DISTANCES.NEAR,
                         '3':  StorageDriver.DISTANCES.FAR,
                         '4':  StorageDriver.DISTANCES.FAR,
                         '5':  StorageDriver.DISTANCES.INFINITE,
                         '6':  StorageDriver.DISTANCES.INFINITE,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '9':  StorageDriver.DISTANCES.INFINITE,
                         '10': StorageDriver.DISTANCES.NEAR},
                    2:  {'1':  StorageDriver.DISTANCES.NEAR,
                         '3':  StorageDriver.DISTANCES.INFINITE,
                         '4':  StorageDriver.DISTANCES.INFINITE,
                         '5':  StorageDriver.DISTANCES.FAR,
                         '6':  StorageDriver.DISTANCES.FAR,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '9':  StorageDriver.DISTANCES.INFINITE,
                         '10': StorageDriver.DISTANCES.NEAR},
                    3:  {'1':  StorageDriver.DISTANCES.INFINITE,
                         '2':  StorageDriver.DISTANCES.INFINITE,
                         '4':  StorageDriver.DISTANCES.NEAR,
                         '5':  StorageDriver.DISTANCES.FAR,
                         '6':  StorageDriver.DISTANCES.FAR,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '9':  StorageDriver.DISTANCES.INFINITE,
                         '10': StorageDriver.DISTANCES.INFINITE},
                    4:  {'1':  StorageDriver.DISTANCES.FAR,
                         '2':  StorageDriver.DISTANCES.FAR,
                         '3':  StorageDriver.DISTANCES.NEAR,
                         '5':  StorageDriver.DISTANCES.INFINITE,
                         '6':  StorageDriver.DISTANCES.INFINITE,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '9':  StorageDriver.DISTANCES.INFINITE,
                         '10': StorageDriver.DISTANCES.FAR},
                    5:  {'1':  StorageDriver.DISTANCES.INFINITE,
                         '2':  StorageDriver.DISTANCES.INFINITE,
                         '3':  StorageDriver.DISTANCES.INFINITE,
                         '4':  StorageDriver.DISTANCES.INFINITE,
                         '6':  StorageDriver.DISTANCES.NEAR,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '9':  StorageDriver.DISTANCES.INFINITE,
                         '10': StorageDriver.DISTANCES.INFINITE},
                    6:  {'1':  StorageDriver.DISTANCES.FAR,
                         '2':  StorageDriver.DISTANCES.FAR,
                         '3':  StorageDriver.DISTANCES.FAR,
                         '4':  StorageDriver.DISTANCES.FAR,
                         '5':  StorageDriver.DISTANCES.NEAR,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '9':  StorageDriver.DISTANCES.INFINITE,
                         '10': StorageDriver.DISTANCES.FAR},
                    7:  {'1':  StorageDriver.DISTANCES.NEAR,
                         '2':  StorageDriver.DISTANCES.NEAR,
                         '3':  StorageDriver.DISTANCES.NEAR,
                         '4':  StorageDriver.DISTANCES.NEAR,
                         '5':  StorageDriver.DISTANCES.NEAR,
                         '6':  StorageDriver.DISTANCES.NEAR,
                         '8':  StorageDriver.DISTANCES.NEAR,
                         '9':  StorageDriver.DISTANCES.NEAR,
                         '10': StorageDriver.DISTANCES.NEAR},
                    8:  {'1':  StorageDriver.DISTANCES.NEAR,
                         '2':  StorageDriver.DISTANCES.NEAR,
                         '3':  StorageDriver.DISTANCES.NEAR,
                         '4':  StorageDriver.DISTANCES.NEAR,
                         '5':  StorageDriver.DISTANCES.NEAR,
                         '6':  StorageDriver.DISTANCES.NEAR,
                         '7':  StorageDriver.DISTANCES.NEAR,
                         '9':  StorageDriver.DISTANCES.NEAR,
                         '10': StorageDriver.DISTANCES.NEAR},
                    9:  {'1':  StorageDriver.DISTANCES.INFINITE,
                         '2':  StorageDriver.DISTANCES.INFINITE,
                         '3':  StorageDriver.DISTANCES.INFINITE,
                         '4':  StorageDriver.DISTANCES.INFINITE,
                         '5':  StorageDriver.DISTANCES.INFINITE,
                         '6':  StorageDriver.DISTANCES.INFINITE,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '10': StorageDriver.DISTANCES.INFINITE},
                    10: {'1':  StorageDriver.DISTANCES.NEAR,
                         '2':  StorageDriver.DISTANCES.NEAR,
                         '3':  StorageDriver.DISTANCES.INFINITE,
                         '4':  StorageDriver.DISTANCES.INFINITE,
                         '5':  StorageDriver.DISTANCES.INFINITE,
                         '6':  StorageDriver.DISTANCES.INFINITE,
                         '7':  StorageDriver.DISTANCES.INFINITE,
                         '8':  StorageDriver.DISTANCES.INFINITE,
                         '9':  StorageDriver.DISTANCES.INFINITE}}
        for sd_id, sd in storagedrivers.iteritems():
            try:
                self.assertDictEqual(sd._cluster_node_config()['node_distance_map'], expected[sd_id])
            except:
                print 'Error processing: {0}'.format(sd_id)
                raise
    def test_manually_overruled_dtl(self):
        """
        The DTL target of a vDisk can be manually overruled by the customer
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |      domain 1   |                  |             |
        #  |      sr 3      |       |      domain 1   |                  |             |
        #  |      sr 4      |       |      domain 2   |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'domains': [1, 2],
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1, 2, 3, 4, 5],
             'storagerouter_domains': [(1, 1, 1, True), (2, 2, 1, False), (3, 3, 1, False), (4, 4, 2, False)],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
             'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domains = structure['domains']
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:3]]},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # Set DTL manually to node 2 and add 2 vdisk domains to the vdisk
        vdisk.storagedriver_client.set_manual_dtl_config(volume_id=vdisk.volume_id,
                                                         config=DTLConfig(str(storagerouters[2].storagedrivers[0].storage_ip), 3, DTLMode.SYNCHRONOUS))
        vdomain1 = VDiskDomain()
        vdomain2 = VDiskDomain()
        vdomain1.vdisk = vdisk
        vdomain2.vdisk = vdisk
        vdomain1.domain = domains[1]
        vdomain2.domain = domains[2]
        vdomain1.save()
        vdomain2.save()
        vdisk.has_manual_dtl = True
        vdisk.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.SYNCHRONOUS}])
        # Delete the vDiskDomain on which the DTL resides, 1 other vDiskDomain remains
        vdomain1.delete()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[4].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.SYNCHRONOUS}])

        # Delete the last vDiskDomain --> DTL is no longer manual
        vdomain2.delete()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[1:3]]},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
        vdisk.discard()
        self.assertFalse(expr=vdisk.has_manual_dtl,
                         msg='vDisk "vdisk_1" should have manual_dtl flag set to False')

        # Overrules the DTL manually to None and validate DTL checkup leaves it as it is
        vdisk.storagedriver_client.set_manual_dtl_config(volume_id=vdisk.volume_id, config=None)
        vdisk.has_manual_dtl = True
        vdisk.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])
Exemple #21
0
    def test_create_from_template(self):
        """
        Test the create from template functionality
            - Create a vDisk and convert to vTemplate
            - Attempt to create from template from a vDisk which is not a vTemplate
            - Create from template basic scenario
            - Attempt to create from template using same name
            - Attempt to create from template using same devicename
            - Attempt to create from template using Storage Router on which vPool is not extended
            - Attempt to create from template using non-existing Storage Driver
            - Attempt to create from template using Storage Driver which does not have an MDS service
            - Create from template on another Storage Router
            - Create from template without specifying a Storage Router
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1, 2, 3],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpool = structure['vpools'][1]
        mds_services = structure['mds_services']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        template = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        vdisk_name = 'from_template_1'
        VDiskController.set_as_template(vdisk_guid=template.guid)
        self.assertTrue(expr=template.is_vtemplate, msg='Dynamic property "is_vtemplate" should be True')

        # Create from vDisk which is not a vTemplate
        template.storagedriver_client._set_object_type(template.volume_id, 'BASE')
        template.invalidate_dynamics(['info', 'is_vtemplate'])
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid)

        # Create from template
        template.storagedriver_client._set_object_type(template.volume_id, 'TEMPLATE')
        template.invalidate_dynamics(['info', 'is_vtemplate'])
        info = VDiskController.create_from_template(vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid)
        expected_keys = ['vdisk_guid', 'name', 'backingdevice']
        self.assertEqual(first=set(info.keys()),
                         second=set(expected_keys),
                         msg='Create from template returned not the expected keys')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks')
        vdisk = [vdisk for vdisk in vdisks if vdisk.is_vtemplate is False][0]
        self.assertTrue(expr=vdisk.name == vdisk_name, msg='vDisk name is incorrect. Expected: {0}  -  Actual: {1}'.format(vdisk_name, vdisk.name))
        self.assertTrue(expr=vdisk.parent_vdisk == template, msg='The parent of the vDisk is incorrect')

        # Attempt to create from template using same name
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name=vdisk_name, storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 1')

        # Attempt to create from template using same devicename
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='^{0}$*'.format(vdisk_name), storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 2')

        # Attempt to create from template on Storage Router on which vPool is not extended
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[3].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 3')

        # Attempt to create on non-existing Storage Driver
        storagedrivers[1].storagedriver_id = 'non-existing'
        storagedrivers[1].save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2')
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 4')
        storagedrivers[1].storagedriver_id = '1'
        storagedrivers[1].save()

        # Attempt to create on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[3]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected 2 vDisks after failed attempt 5')
        mds_services[1].service.storagerouter = storagerouters[1]
        mds_services[1].service.save()

        # Create from template on another Storage Router
        vdisk2 = VDisk(VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_2', storagerouter_guid=storagerouters[2].guid)['vdisk_guid'])
        self.assertTrue(expr=vdisk2.storagerouter_guid == storagerouters[2].guid, msg='Expected vdisk2 to be hosted by Storage Router 2')

        # Create from template without specifying Storage Router
        vdisk3 = VDisk(VDiskController.create_from_template(vdisk_guid=template.guid, name='from_template_3')['vdisk_guid'])
        self.assertTrue(expr=vdisk3.storagerouter_guid == template.storagerouter_guid, msg='Expected vdisk3 to be hosted by Storage Router 1')
    def test_scrubbing(self):
        """
        Validates the scrubbing workflow
        * Scenario 1: Validate disabled scrub task and single vDisk scrub logic
        * Scenario 2: 1 vPool, 10 vDisks, 1 scrub role
                      Scrubbing fails for 5 vDisks, check if scrubbing completed for all other vDisks
                      Run scrubbing a 2nd time and verify scrubbing now works for failed vDisks
        * Scenario 3: 1 vPool, 10 vDisks, 5 scrub roles
                      Check if vDisks are divided among all threads
        * Scenario 4: 3 vPools, 9 vDisks, 5 scrub roles
                      Validate 6 threads will be spawned and used out of a potential of 15 (5 scrub roles * 3 vPools)
                      We limit max amount of threads spawned per vPool to 2 in case 3 to 5 vPools are present
        """
        _ = self
        for i in xrange(1, 6):
            Configuration.set('/ovs/framework/hosts/{0}/ports'.format(i),
                              {'storagedriver': [10000, 10100]})

        ##############
        # Scenario 1 #
        ##############
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1],
            'storagedrivers': [(1, 1, 1)]
        }  # (<id>, <vpool_id>, <storagerouter_id>)
                                                   )
        vdisk = structure['vdisks'][1]
        vpool = structure['vpools'][1]
        storagerouter = structure['storagerouters'][1]
        System._machine_id = {storagerouter.ip: '1'}
        Configuration.set('/ovs/vpools/{0}/proxies/scrub/generic_scrub'.format(
            vpool.guid),
                          json.dumps({}, indent=4),
                          raw=True)
        LockedClient.scrub_controller = {
            'possible_threads': None,
            'volumes': {},
            'waiter': Waiter(1)
        }
        LockedClient.scrub_controller['volumes'][vdisk.volume_id] = {
            'success': False,
            'scrub_work': [0]
        }
        with self.assertRaises(Exception) as raise_info:
            VDiskController.scrub_single_vdisk(vdisk.guid, storagerouter.guid)
        self.assertIn(vdisk.name, raise_info.exception.message)
        LockedClient.scrub_controller['volumes'][vdisk.volume_id] = {
            'success': True,
            'scrub_work': [0]
        }
        VDiskController.scrub_single_vdisk(vdisk.guid, storagerouter.guid)
        with vdisk.storagedriver_client.make_locked_client(
                vdisk.volume_id) as locked_client:
            self.assertEqual(
                first=len(locked_client.get_scrubbing_workunits()),
                second=0,
                msg=
                'Scrubbed vDisk {0} does not have the expected amount of scrubbing items: {1}'
                .format(vdisk.name, 0))

        ##############
        # Scenario 2 #
        ##############
        self.volatile.clean()
        self.persistent.clean()
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1), (2, 1, 1, 1), (3, 1, 1, 1), (4, 1, 1, 1),
                (5, 1, 1, 1), (6, 1, 1, 1), (7, 1, 1, 1), (8, 1, 1, 1),
                (9, 1, 1, 1), (10, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1],
            'storagedrivers': [(1, 1, 1)]
        }  # (<id>, <vpool_id>, <storagerouter_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisks = structure['vdisks']
        storagerouter = structure['storagerouters'][1]
        System._machine_id = {storagerouter.ip: '1'}
        Configuration.set('/ovs/vpools/{0}/proxies/scrub/generic_scrub'.format(
            vpool.guid),
                          json.dumps({}, indent=4),
                          raw=True)
        LockedClient.scrub_controller = {
            'possible_threads':
            ['scrub_{0}_{1}'.format(vpool.guid, storagerouter.guid)],
            'volumes': {},
            'waiter':
            Waiter(1)
        }
        failed_vdisks = []
        successful_vdisks = []
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            success = vdisk_id % 2 == 0
            LockedClient.scrub_controller['volumes'][vdisk.volume_id] = {
                'success': success,
                'scrub_work': range(vdisk_id)
            }
            if success is True:
                successful_vdisks.append(vdisk)
            else:
                failed_vdisks.append(vdisk)

        # Execute scrubbing a 1st time
        with self.assertRaises(Exception) as raise_info:
            ScheduledTaskController.execute_scrub()
        for vdisk in failed_vdisks:
            self.assertIn(vdisk.name, raise_info.exception.message)

        # Validate expected successful vDisks
        for vdisk in successful_vdisks:
            with vdisk.storagedriver_client.make_locked_client(
                    vdisk.volume_id) as locked_client:
                self.assertEqual(
                    first=len(locked_client.get_scrubbing_workunits()),
                    second=0,
                    msg='Scrubbed vDisk {0} does still have scrubbing work left'
                    .format(vdisk.name))
        # Validate expected failed vDisks
        for vdisk in failed_vdisks:
            with vdisk.storagedriver_client.make_locked_client(
                    vdisk.volume_id) as locked_client:
                self.assertEqual(
                    first=len(locked_client.get_scrubbing_workunits()),
                    second=int(vdisk.name),
                    msg=
                    'Scrubbed vDisk {0} does not have the expected amount of scrubbing items: {1}'
                    .format(vdisk.name, int(vdisk.name)))

        # Execute scrubbing again
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            LockedClient.scrub_controller['volumes'][
                vdisk.volume_id]['success'] = True
        ScheduledTaskController.execute_scrub()
        for vdisk in vdisks.values():
            with vdisk.storagedriver_client.make_locked_client(
                    vdisk.volume_id) as locked_client:
                self.assertEqual(
                    first=len(locked_client.get_scrubbing_workunits()),
                    second=0,
                    msg=
                    'Scrubbed vDisk {0} does still have scrubbing work left after scrubbing a 2nd time'
                    .format(vdisk.name))

        ##############
        # Scenario 3 #
        ##############
        self.volatile.clean()
        self.persistent.clean()
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1), (2, 1, 1, 1), (3, 1, 1, 1), (4, 1, 1, 1),
                (5, 1, 1, 1), (6, 1, 1, 1), (7, 1, 1, 1), (8, 1, 1, 1),
                (9, 1, 1, 1), (10, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2, 3, 4, 5],
            'storagedrivers': [(1, 1, 1)]
        }  # (<id>, <vpool_id>, <storagerouter_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisks = structure['vdisks']
        storagerouters = structure['storagerouters']
        System._machine_id = dict(
            (sr.ip, sr.machine_id) for sr in storagerouters.values())
        Configuration.set('/ovs/vpools/{0}/proxies/scrub/generic_scrub'.format(
            vpool.guid),
                          json.dumps({}, indent=4),
                          raw=True)

        thread_names = [
            'scrub_{0}_{1}'.format(vpool.guid, storagerouter.guid)
            for storagerouter in storagerouters.values()
        ]
        LockedClient.scrub_controller = {
            'possible_threads': thread_names,
            'volumes': {},
            'waiter': Waiter(len(thread_names))
        }
        LockedClient.thread_names = thread_names[:]
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            LockedClient.scrub_controller['volumes'][vdisk.volume_id] = {
                'success': True,
                'scrub_work': range(vdisk_id)
            }
        ScheduledTaskController.execute_scrub()
        self.assertEqual(first=len(LockedClient.thread_names),
                         second=0,
                         msg='Not all threads have been used in the process')

        ##############
        # Scenario 4 #
        ##############
        self.volatile.clean()
        self.persistent.clean()
        structure = Helper.build_service_structure({
            'vpools': [1, 2, 3],
            'vdisks': [
                (1, 1, 1, 1), (2, 1, 1, 1), (3, 1, 1, 1), (4, 2, 2, 2),
                (5, 2, 2, 2), (6, 2, 2, 2), (7, 3, 3, 3), (8, 3, 3, 3),
                (9, 3, 3, 3)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1), (2, 2),
                             (3, 3)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2, 3, 4, 5],
            'storagedrivers': [(1, 1, 1), (2, 2, 1), (3, 3, 1)]
        }  # (<id>, <vpool_id>, <storagerouter_id>)
                                                   )
        vpools = structure['vpools']
        vdisks = structure['vdisks']
        storagerouters = structure['storagerouters']

        thread_names = []
        for vpool in vpools.values():
            Configuration.set(
                '/ovs/vpools/{0}/proxies/scrub/generic_scrub'.format(
                    vpool.guid),
                json.dumps({}, indent=4),
                raw=True)
            for storagerouter in storagerouters.values():
                thread_names.append('scrub_{0}_{1}'.format(
                    vpool.guid, storagerouter.guid))
        LockedClient.scrub_controller = {
            'possible_threads': thread_names,
            'volumes': {},
            'waiter': Waiter(len(thread_names) - 9)
        }
        LockedClient.thread_names = thread_names[:]
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            LockedClient.scrub_controller['volumes'][vdisk.volume_id] = {
                'success': True,
                'scrub_work': range(vdisk_id)
            }
        ScheduledTaskController.execute_scrub()
        self.assertEqual(
            first=len(LockedClient.thread_names),
            second=
            9,  # 5 srs * 3 vps = 15 threads, but only 2 will be spawned per vPool --> 15 - 6 = 9 left
            msg='Not all threads have been used in the process')

        # 3 vPools will cause the scrubber to only launch 2 threads per vPool --> 1 possible thread should be unused per vPool
        for vpool in vpools.values():
            threads_left = [
                thread_name for thread_name in LockedClient.thread_names
                if vpool.guid in thread_name
            ]
            self.assertEqual(
                first=len(threads_left),
                second=3,
                msg='Unexpected amount of threads left for vPool {0}'.format(
                    vpool.name))
    def test_scrubbing(self):
        """
        Validates the scrubbing workflow
        * Scenario 1: Validate disabled scrub task and single vDisk scrub logic
        * Scenario 2: 1 vPool, 10 vDisks, 1 scrub role
                      Scrubbing fails for 5 vDisks, check if scrubbing completed for all other vDisks
                      Run scrubbing a 2nd time and verify scrubbing now works for failed vDisks
        * Scenario 3: 1 vPool, 10 vDisks, 5 scrub roles
                      Check if vDisks are divided among all threads
        * Scenario 4: 3 vPools, 9 vDisks, 5 scrub roles
                      Validate 6 threads will be spawned and used out of a potential of 15 (5 scrub roles * 3 vPools)
                      We limit max amount of threads spawned per vPool to 2 in case 3 to 5 vPools are present
        """
        _ = self
        for i in xrange(1, 6):
            Configuration.set("/ovs/framework/hosts/{0}/ports".format(i), {"storagedriver": [10000, 10100]})

        ##############
        # Scenario 1 #
        ##############
        structure = Helper.build_service_structure(
            {
                "vpools": [1],
                "vdisks": [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
                "mds_services": [(1, 1)],  # (<id>, <storagedriver_id>)
                "storagerouters": [1],
                "storagedrivers": [(1, 1, 1)],
            }  # (<id>, <vpool_id>, <storagerouter_id>)
        )
        vdisk = structure["vdisks"][1]
        vpool = structure["vpools"][1]
        storagerouter = structure["storagerouters"][1]
        System._machine_id = {storagerouter.ip: "1"}
        Configuration.set(
            "/ovs/vpools/{0}/proxies/scrub/generic_scrub".format(vpool.guid), json.dumps({}, indent=4), raw=True
        )
        LockedClient.scrub_controller = {"possible_threads": None, "volumes": {}, "waiter": Waiter(1)}
        LockedClient.scrub_controller["volumes"][vdisk.volume_id] = {"success": False, "scrub_work": [0]}
        with self.assertRaises(Exception) as raise_info:
            VDiskController.scrub_single_vdisk(vdisk.guid, storagerouter.guid)
        self.assertIn(vdisk.name, raise_info.exception.message)
        LockedClient.scrub_controller["volumes"][vdisk.volume_id] = {"success": True, "scrub_work": [0]}
        VDiskController.scrub_single_vdisk(vdisk.guid, storagerouter.guid)
        with vdisk.storagedriver_client.make_locked_client(vdisk.volume_id) as locked_client:
            self.assertEqual(
                first=len(locked_client.get_scrubbing_workunits()),
                second=0,
                msg="Scrubbed vDisk {0} does not have the expected amount of scrubbing items: {1}".format(
                    vdisk.name, 0
                ),
            )

        ##############
        # Scenario 2 #
        ##############
        self.volatile.clean()
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {
                "vpools": [1],
                "vdisks": [
                    (1, 1, 1, 1),
                    (2, 1, 1, 1),
                    (3, 1, 1, 1),
                    (4, 1, 1, 1),
                    (5, 1, 1, 1),
                    (6, 1, 1, 1),
                    (7, 1, 1, 1),
                    (8, 1, 1, 1),
                    (9, 1, 1, 1),
                    (10, 1, 1, 1),
                ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
                "mds_services": [(1, 1)],  # (<id>, <storagedriver_id>)
                "storagerouters": [1],
                "storagedrivers": [(1, 1, 1)],
            }  # (<id>, <vpool_id>, <storagerouter_id>)
        )
        vpool = structure["vpools"][1]
        vdisks = structure["vdisks"]
        storagerouter = structure["storagerouters"][1]
        System._machine_id = {storagerouter.ip: "1"}
        Configuration.set(
            "/ovs/vpools/{0}/proxies/scrub/generic_scrub".format(vpool.guid), json.dumps({}, indent=4), raw=True
        )
        LockedClient.scrub_controller = {
            "possible_threads": ["scrub_{0}_{1}".format(vpool.guid, storagerouter.guid)],
            "volumes": {},
            "waiter": Waiter(1),
        }
        failed_vdisks = []
        successful_vdisks = []
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            success = vdisk_id % 2 == 0
            LockedClient.scrub_controller["volumes"][vdisk.volume_id] = {
                "success": success,
                "scrub_work": range(vdisk_id),
            }
            if success is True:
                successful_vdisks.append(vdisk)
            else:
                failed_vdisks.append(vdisk)

        # Execute scrubbing a 1st time
        with self.assertRaises(Exception) as raise_info:
            GenericController.execute_scrub()
        for vdisk in failed_vdisks:
            self.assertIn(vdisk.name, raise_info.exception.message)

        # Validate expected successful vDisks
        for vdisk in successful_vdisks:
            with vdisk.storagedriver_client.make_locked_client(vdisk.volume_id) as locked_client:
                self.assertEqual(
                    first=len(locked_client.get_scrubbing_workunits()),
                    second=0,
                    msg="Scrubbed vDisk {0} does still have scrubbing work left".format(vdisk.name),
                )
        # Validate expected failed vDisks
        for vdisk in failed_vdisks:
            with vdisk.storagedriver_client.make_locked_client(vdisk.volume_id) as locked_client:
                self.assertEqual(
                    first=len(locked_client.get_scrubbing_workunits()),
                    second=int(vdisk.name),
                    msg="Scrubbed vDisk {0} does not have the expected amount of scrubbing items: {1}".format(
                        vdisk.name, int(vdisk.name)
                    ),
                )

        # Execute scrubbing again
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            LockedClient.scrub_controller["volumes"][vdisk.volume_id]["success"] = True
        GenericController.execute_scrub()
        for vdisk in vdisks.values():
            with vdisk.storagedriver_client.make_locked_client(vdisk.volume_id) as locked_client:
                self.assertEqual(
                    first=len(locked_client.get_scrubbing_workunits()),
                    second=0,
                    msg="Scrubbed vDisk {0} does still have scrubbing work left after scrubbing a 2nd time".format(
                        vdisk.name
                    ),
                )

        ##############
        # Scenario 3 #
        ##############
        self.volatile.clean()
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {
                "vpools": [1],
                "vdisks": [
                    (1, 1, 1, 1),
                    (2, 1, 1, 1),
                    (3, 1, 1, 1),
                    (4, 1, 1, 1),
                    (5, 1, 1, 1),
                    (6, 1, 1, 1),
                    (7, 1, 1, 1),
                    (8, 1, 1, 1),
                    (9, 1, 1, 1),
                    (10, 1, 1, 1),
                ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
                "mds_services": [(1, 1)],  # (<id>, <storagedriver_id>)
                "storagerouters": [1, 2, 3, 4, 5],
                "storagedrivers": [(1, 1, 1)],
            }  # (<id>, <vpool_id>, <storagerouter_id>)
        )
        vpool = structure["vpools"][1]
        vdisks = structure["vdisks"]
        storagerouters = structure["storagerouters"]
        System._machine_id = dict((sr.ip, sr.machine_id) for sr in storagerouters.values())
        Configuration.set(
            "/ovs/vpools/{0}/proxies/scrub/generic_scrub".format(vpool.guid), json.dumps({}, indent=4), raw=True
        )

        thread_names = [
            "scrub_{0}_{1}".format(vpool.guid, storagerouter.guid) for storagerouter in storagerouters.values()
        ]
        LockedClient.scrub_controller = {
            "possible_threads": thread_names,
            "volumes": {},
            "waiter": Waiter(len(thread_names)),
        }
        LockedClient.thread_names = thread_names[:]
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            LockedClient.scrub_controller["volumes"][vdisk.volume_id] = {"success": True, "scrub_work": range(vdisk_id)}
        GenericController.execute_scrub()
        self.assertEqual(
            first=len(LockedClient.thread_names), second=0, msg="Not all threads have been used in the process"
        )

        ##############
        # Scenario 4 #
        ##############
        self.volatile.clean()
        self.persistent.clean()
        structure = Helper.build_service_structure(
            {
                "vpools": [1, 2, 3],
                "vdisks": [
                    (1, 1, 1, 1),
                    (2, 1, 1, 1),
                    (3, 1, 1, 1),
                    (4, 2, 2, 2),
                    (5, 2, 2, 2),
                    (6, 2, 2, 2),
                    (7, 3, 3, 3),
                    (8, 3, 3, 3),
                    (9, 3, 3, 3),
                ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
                "mds_services": [(1, 1), (2, 2), (3, 3)],  # (<id>, <storagedriver_id>)
                "storagerouters": [1, 2, 3, 4, 5],
                "storagedrivers": [(1, 1, 1), (2, 2, 1), (3, 3, 1)],
            }  # (<id>, <vpool_id>, <storagerouter_id>)
        )
        vpools = structure["vpools"]
        vdisks = structure["vdisks"]
        storagerouters = structure["storagerouters"]

        thread_names = []
        for vpool in vpools.values():
            Configuration.set(
                "/ovs/vpools/{0}/proxies/scrub/generic_scrub".format(vpool.guid), json.dumps({}, indent=4), raw=True
            )
            for storagerouter in storagerouters.values():
                thread_names.append("scrub_{0}_{1}".format(vpool.guid, storagerouter.guid))
        LockedClient.scrub_controller = {
            "possible_threads": thread_names,
            "volumes": {},
            "waiter": Waiter(len(thread_names) - 9),
        }
        LockedClient.thread_names = thread_names[:]
        for vdisk_id in sorted(vdisks):
            vdisk = vdisks[vdisk_id]
            LockedClient.scrub_controller["volumes"][vdisk.volume_id] = {"success": True, "scrub_work": range(vdisk_id)}
        GenericController.execute_scrub()
        self.assertEqual(
            first=len(LockedClient.thread_names),
            second=9,  # 5 srs * 3 vps = 15 threads, but only 2 will be spawned per vPool --> 15 - 6 = 9 left
            msg="Not all threads have been used in the process",
        )

        # 3 vPools will cause the scrubber to only launch 2 threads per vPool --> 1 possible thread should be unused per vPool
        for vpool in vpools.values():
            threads_left = [thread_name for thread_name in LockedClient.thread_names if vpool.guid in thread_name]
            self.assertEqual(
                first=len(threads_left),
                second=3,
                msg="Unexpected amount of threads left for vPool {0}".format(vpool.name),
            )
    def test_cluster_maintenance(self):
        """
        Validates whether a cluster can be correctly created
        """
        Configuration.set('/ovs/framework/hosts/1/ports',
                          {'arakoon': [10000, 10100]})
        Configuration.set('/ovs/framework/hosts/2/ports',
                          {'arakoon': [20000, 20100]})

        structure = Helper.build_service_structure({'storagerouters': [1, 2]})
        storagerouters = structure['storagerouters']
        System._machine_id = {
            storagerouters[1].ip: '1',
            storagerouters[2].ip: '2'
        }

        # Create new cluster
        mountpoint = storagerouters[1].disks[0].partitions[0].mountpoint
        if os.path.exists(mountpoint) and mountpoint != '/':
            shutil.rmtree(mountpoint)
        base_dir = mountpoint + '/test_create_cluster'
        info = ArakoonInstaller.create_cluster(
            'test', ServiceType.ARAKOON_CLUSTER_TYPES.FWK,
            storagerouters[1].ip, base_dir)

        reality = Helper.extract_dir_structure(base_dir)
        expected = {
            'dirs': {
                'arakoon': {
                    'dirs': {
                        'test': {
                            'dirs': {
                                'tlogs': {
                                    'dirs': {},
                                    'files': []
                                },
                                'db': {
                                    'dirs': {},
                                    'files': []
                                }
                            },
                            'files': []
                        }
                    },
                    'files': []
                }
            },
            'files': []
        }
        self.assertDictEqual(reality, expected)
        expected = '{0}\n\n{1}\n\n'.format(
            ArakoonInstallerTester.EXPECTED_CLUSTER_CONFIG.format(
                '1', 'test', ''),
            ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                '1', storagerouters[1].ip, 10000, base_dir, '1', 10001))
        self.assertEqual(
            Configuration.get(ArakoonInstaller.CONFIG_KEY.format('test'),
                              raw=True), expected)
        # @TODO: assert service availability here. It should be stopped

        ArakoonInstaller.start_cluster('test',
                                       storagerouters[1].ip,
                                       filesystem=False)
        # @TODO: assert the service is running

        config = ArakoonClusterConfig('test', filesystem=False)
        config.load_config(storagerouters[1].ip)
        client = ArakoonInstaller.build_client(config)
        reality = client.get(ArakoonInstaller.INTERNAL_CONFIG_KEY)
        self.assertEqual(reality, expected)
        self.assertFalse(client.exists(ArakoonInstaller.METADATA_KEY))

        ArakoonInstaller.claim_cluster('test',
                                       storagerouters[1].ip,
                                       filesystem=False,
                                       metadata=info['metadata'])

        reality = json.loads(client.get(ArakoonInstaller.METADATA_KEY))
        expected = {
            'cluster_name': 'test',
            'cluster_type': 'FWK',
            'in_use': True,
            'internal': True
        }
        self.assertDictEqual(reality, expected)

        # Extending cluster
        mountpoint = storagerouters[2].disks[0].partitions[0].mountpoint
        if os.path.exists(mountpoint) and mountpoint != '/':
            shutil.rmtree(mountpoint)
        base_dir2 = mountpoint + '/test_extend_cluster'
        ArakoonInstaller.extend_cluster(storagerouters[1].ip,
                                        storagerouters[2].ip, 'test',
                                        base_dir2)
        reality = Helper.extract_dir_structure(base_dir)
        expected = {
            'dirs': {
                'arakoon': {
                    'dirs': {
                        'test': {
                            'dirs': {
                                'tlogs': {
                                    'dirs': {},
                                    'files': []
                                },
                                'db': {
                                    'dirs': {},
                                    'files': []
                                }
                            },
                            'files': []
                        }
                    },
                    'files': []
                }
            },
            'files': []
        }
        self.assertDictEqual(reality, expected)
        expected = '{0}\n\n{1}\n\n{2}\n\n'.format(
            ArakoonInstallerTester.EXPECTED_CLUSTER_CONFIG.format(
                '1,2', 'test', ''),
            ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                '1', storagerouters[1].ip, 10000, base_dir, '1', 10001),
            ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                '2', storagerouters[2].ip, 20000, base_dir2, '2', 20001))
        self.assertEqual(
            Configuration.get(ArakoonInstaller.CONFIG_KEY.format('test'),
                              raw=True), expected)
        # @TODO: assert service availability here. It should be stopped

        catchup_command = 'arakoon --node 2 -config file://opt/OpenvStorage/config/framework.json?key=/ovs/arakoon/test/config -catchup-only'
        SSHClient._run_returns[catchup_command] = None
        SSHClient._run_recordings = []
        ArakoonInstaller.restart_cluster_add('test', [storagerouters[1].ip],
                                             storagerouters[2].ip,
                                             filesystem=False)
        self.assertIn(catchup_command, SSHClient._run_recordings)
        # @TODO: assert the service is running

        config = ArakoonClusterConfig('test', filesystem=False)
        config.load_config(storagerouters[2].ip)
        client = ArakoonInstaller.build_client(config)
        reality = client.get(ArakoonInstaller.INTERNAL_CONFIG_KEY)
        self.assertEqual(reality, expected)

        reality = json.loads(client.get(ArakoonInstaller.METADATA_KEY))
        expected = {
            'cluster_name': 'test',
            'cluster_type': 'FWK',
            'in_use': True,
            'internal': True
        }
        self.assertDictEqual(reality, expected)

        # Shrinking cluster
        ArakoonInstaller.shrink_cluster(storagerouters[1].ip,
                                        storagerouters[2].ip, 'test')
        reality = Helper.extract_dir_structure(base_dir)
        expected = {
            'dirs': {
                'arakoon': {
                    'dirs': {
                        'test': {
                            'dirs': {},
                            'files': []
                        }
                    },
                    'files': []
                }
            },
            'files': []
        }
        self.assertDictEqual(reality, expected)
        expected = '{0}\n\n{1}\n\n'.format(
            ArakoonInstallerTester.EXPECTED_CLUSTER_CONFIG.format(
                '2', 'test', ''),
            ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                '2', storagerouters[2].ip, 20000, base_dir2, '2', 20001))
        self.assertEqual(
            Configuration.get(ArakoonInstaller.CONFIG_KEY.format('test'),
                              raw=True), expected)
        # @TODO: assert service availability here. It should be stopped

        ArakoonInstaller.restart_cluster_remove('test', [storagerouters[2].ip],
                                                filesystem=False)
        # @TODO: assert the service is running

        config = ArakoonClusterConfig('test', filesystem=False)
        config.load_config(storagerouters[2].ip)
        client = ArakoonInstaller.build_client(config)
        reality = client.get(ArakoonInstaller.INTERNAL_CONFIG_KEY)
        self.assertEqual(reality, expected)

        reality = json.loads(client.get(ArakoonInstaller.METADATA_KEY))
        expected = {
            'cluster_name': 'test',
            'cluster_type': 'FWK',
            'in_use': True,
            'internal': True
        }
        self.assertDictEqual(reality, expected)
Exemple #25
0
    def test_single_node(self):
        """
        Execute some DTL checkups on a single node installation
        """
        # Create 1 vdisk in single node without domains
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1],
            'storagedrivers': [(1, 1, 1)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouter = structure['storagerouters'][1]
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |       1        |   1   |                 |                  |                    |
        self._roll_out_dtl_services(vpool=vpool,
                                    storagerouters=structure['storagerouters'])
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])

        # Create some domains, but do not attach them yet
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |       1        |   1   |                 |                  |                    |
        domains = {}
        for domain_id in range(1, 3):
            domain = Domain()
            domain.name = 'domain_{0}'.format(domain_id)
            domain.save()
            domains[domain_id] = domain

        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])

        # Attach a regular Domain to the single Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |      sr 1      |   1   |     domain 1    |                  |                    |
        sr_domain = StorageRouterDomain()
        sr_domain.backup = False
        sr_domain.domain = domains[1]
        sr_domain.storagerouter = storagerouter
        sr_domain.save()

        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])

        # Attach a recovery Domain to the single Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain ||     DTL Target    ||
        #  |      sr 1      |   1   |                 |     domain 1     |                    |
        for junction in storagerouter.domains:
            junction.delete()
        sr_domain = StorageRouterDomain()
        sr_domain.backup = True
        sr_domain.domain = domains[1]
        sr_domain.storagerouter = storagerouter
        sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])
    def test_multi_node_with_recovery_domains(self):
        """
        Test DTL checkup on a multi node setup and create some Domains and link them to the several Storage Routers
        """
        # Add a recovery Domain to the Storage Router serving the vDisk --> DTL should be random
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'vdisks': [(1, 1, 1, 1)],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
             'domains': [1, 2, 3],
             'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
             'storagerouters': [1, 2, 3, 4, 5],
             'storagerouter_domains': [(1, 1, 1, True)],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
             'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 1, 5)]}  # (<id>, <vpool_id>, <sr_id>)
        )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domains = structure['domains']
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'config', 'value': None}])

        # Add the recovery domain as regular Domain of the same Storage Router --> nothing should change
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |    domain 1     |                  |      1      |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        sr_domain = StorageRouterDomain()
        sr_domain.backup = False
        sr_domain.domain = domains[1]
        sr_domain.storagerouter = storagerouters[2]
        sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # Add the recovery domain as regular Domain to the other Storage Routers --> nothing should change
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |    domain 1     |                  |      1      |
        #  |      sr 3      |       |    domain 1     |                  |             |
        #  |      sr 4      |       |    domain 1     |                  |             |
        #  |      sr 5      |       |    domain 1     |                  |             |
        for storagerouter in storagerouters.values()[2:]:
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[1]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[2].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # Remove the domain from the Storage Router which is used as DTL target
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |    domain 1     |                  |      1      |
        #  |      sr 4      |       |    domain 1     |                  |      1      |
        #  |      sr 5      |       |    domain 1     |                  |      1      |
        for junction in storagerouters[2].domains:
            junction.delete()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': [sr.storagedrivers[0].storage_ip for sr in storagerouters.values()[2:]]},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])

        # Add regular domain to the Storage Router serving the vDisk and some other Storage Routers --> recovery Domain should still get priority
        # || StorageRouter || vDisk | Regular Domain    || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   | domain 2           |      domain 1    |             |
        #  |      sr 2      |       | domain 2           |                  |             |
        #  |      sr 3      |       | domain 2           |                  |             |
        #  |      sr 4      |       | domain 1, domain 2 |                  |      1      |
        #  |      sr 5      |       | domain 2           |                  |             |
        for junction in storagerouters[3].domains:
            junction.delete()
        for junction in storagerouters[5].domains:
            junction.delete()
        for storagerouter in storagerouters.values():
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[2]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{'key': 'host', 'value': storagerouters[4].storagedrivers[0].storage_ip},
                                                        {'key': 'port', 'value': 3},
                                                        {'key': 'mode', 'value': DTLMode.ASYNCHRONOUS}])
Exemple #27
0
    def test_manually_overruled_dtl(self):
        """
        The DTL target of a vDisk can be manually overruled by the customer
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |      domain 1   |                  |             |
        #  |      sr 3      |       |      domain 1   |                  |             |
        #  |      sr 4      |       |      domain 2   |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'domains': [1, 2],
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2, 3, 4, 5],
            'storagerouter_domains': [
                (1, 1, 1, True), (2, 2, 1, False), (3, 3, 1, False),
                (4, 4, 2, False)
            ],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
            'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4),
                               (5, 1, 5)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domains = structure['domains']
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()[1:3]
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # Set DTL manually to node 2 and add 2 vdisk domains to the vdisk
        vdisk.storagedriver_client.set_manual_dtl_config(
            volume_id=vdisk.volume_id,
            config=DTLConfig(
                str(storagerouters[2].storagedrivers[0].storage_ip), 3,
                DTLMode.SYNCHRONOUS))
        vdomain1 = VDiskDomain()
        vdomain2 = VDiskDomain()
        vdomain1.vdisk = vdisk
        vdomain2.vdisk = vdisk
        vdomain1.domain = domains[1]
        vdomain2.domain = domains[2]
        vdomain1.save()
        vdomain2.save()
        vdisk.has_manual_dtl = True
        vdisk.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[2].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.SYNCHRONOUS
            }])
        # Delete the vDiskDomain on which the DTL resides, 1 other vDiskDomain remains
        vdomain1.delete()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[4].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.SYNCHRONOUS
            }])

        # Delete the last vDiskDomain --> DTL is no longer manual
        vdomain2.delete()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()[1:3]
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])
        vdisk.discard()
        self.assertFalse(
            expr=vdisk.has_manual_dtl,
            msg='vDisk "vdisk_1" should have manual_dtl flag set to False')

        # Overrules the DTL manually to None and validate DTL checkup leaves it as it is
        vdisk.storagedriver_client.set_manual_dtl_config(
            volume_id=vdisk.volume_id, config=None)
        vdisk.has_manual_dtl = True
        vdisk.save()
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])
Exemple #28
0
    def test_from_single_node_to_multi_node(self):
        """
        Deploy a vDisk on a single node --> This should result in no DTL configured
        Add an additional node and verify DTL will be set
        """
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1],
            'storagedrivers': [(1, 1, 1)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])

        # Add a Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |                  |             |
        #  |      sr 2      |       |                 |                  |      1      |
        storagerouter = StorageRouter()
        storagerouter.name = '2'
        storagerouter.ip = '10.0.0.2'
        storagerouter.rdma_capable = False
        storagerouter.save()
        storagerouters[2] = storagerouter
        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)

        storagedriver = StorageDriver()
        storagedriver.vpool = vpool
        storagedriver.storagerouter = storagerouter
        storagedriver.name = '2'
        storagedriver.mountpoint = '/'
        storagedriver.cluster_ip = storagerouter.ip
        storagedriver.storage_ip = '10.0.1.2'
        storagedriver.storagedriver_id = '2'
        storagedriver.ports = {
            'management': 1,
            'xmlrpc': 2,
            'dtl': 3,
            'edge': 4
        }
        storagedriver.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[2].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])
Exemple #29
0
    def test_multi_node_with_regular_domains(self):
        """
        Test DTL checkup on a multi node setup and create some Domains and link them to the several Storage Routers
        """
        # Add a regular domain to the Storage Router serving the vDisk and another Storage Router --> DTL target should be the specific Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |                  |             |
        #  |      sr 2      |       |     domain 1    |                  |      1      |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'domains': [1, 2, 3],
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2, 3, 4, 5],
            'storagerouter_domains': [
                (1, 1, 1, False), (2, 2, 1, False)
            ],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
            'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4),
                               (5, 1, 5)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domains = structure['domains']
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[2].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # Add the regular Domain as regular Domain to additional Storage Routers --> DTL target should remain on same Storage Router
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |                  |             |
        #  |      sr 2      |       |     domain 1    |                  |      1      |
        #  |      sr 3      |       |     domain 1    |                  |             |
        #  |      sr 4      |       |     domain 1    |                  |             |
        #  |      sr 5      |       |     domain 1    |                  |             |
        for storagerouter in storagerouters.values()[2:]:
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[1]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[2].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # Add recovery Domain to the Storage Router on which the vDisks lives --> nothing should change for now
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |     domain 1    |     domain 2     |             |
        #  |      sr 2      |       |     domain 1    |                  |      1      |
        #  |      sr 3      |       |     domain 1    |                  |             |
        #  |      sr 4      |       |     domain 1    |                  |             |
        #  |      sr 5      |       |     domain 1    |                  |             |
        sr_domain = StorageRouterDomain()
        sr_domain.backup = True
        sr_domain.domain = domains[2]
        sr_domain.storagerouter = storagerouters[1]
        sr_domain.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[2].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # # Add the recovery Domain as regular Domain to additional StorageRouters --> Recovery Domain should have priority over regular Domain
        # # || StorageRouter || vDisk | Regular Domain    || Recovery Domain || DTL Target ||
        # #  |      sr 1      |   1   | domain 1           |     domain 2     |             |
        # #  |      sr 2      |       | domain 1           |                  |             |
        # #  |      sr 3      |       | domain 1, domain 2 |                  |      1      |
        # #  |      sr 4      |       | domain 1, domain 2 |                  |      1      |
        # #  |      sr 5      |       | domain 1, domain 2 |                  |      1      |
        for storagerouter in storagerouters.values()[2:]:
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[2]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()[2:]
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])
    def test_node_config_checkup(self):
        """
        Validates correct working of cluster registry checkup
        """
        base_structure = {'1': {'vrouter_id': '1',
                                'message_host': '10.0.1.1',
                                'message_port': 1,
                                'xmlrpc_host': '10.0.0.1',
                                'xmlrpc_port': 2,
                                'failovercache_host': '10.0.1.1',
                                'failovercache_port': 3,
                                'network_server_uri': 'tcp://10.0.1.1:4',
                                'node_distance_map': None},
                          '2': {'vrouter_id': '2',
                                'message_host': '10.0.1.2',
                                'message_port': 1,
                                'xmlrpc_host': '10.0.0.2',
                                'xmlrpc_port': 2,
                                'failovercache_host': '10.0.1.2',
                                'failovercache_port': 3,
                                'network_server_uri': 'tcp://10.0.1.2:4',
                                'node_distance_map': None}}

        def _validate_node_config(_config, _expected_map):
            expected = copy.deepcopy(base_structure[_config.vrouter_id])
            expected['node_distance_map'] = _expected_map[_config.vrouter_id]
            self.assertDictEqual(expected, {'vrouter_id': _config.vrouter_id,
                                            'message_host': _config.message_host,
                                            'message_port': _config.message_port,
                                            'xmlrpc_host': _config.xmlrpc_host,
                                            'xmlrpc_port': _config.xmlrpc_port,
                                            'failovercache_host': _config.failovercache_host,
                                            'failovercache_port': _config.failovercache_port,
                                            'network_server_uri': _config.network_server_uri,
                                            'node_distance_map': _config.node_distance_map})

        structure = Helper.build_service_structure(
            {'vpools': [1],
             'domains': [1, 2],
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 1, 2)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'storagerouter_domains': [(1, 1, 1, False), (2, 2, 1, False)]}  # (id>, <storagerouter_id>, <domain_id>, <backup>)
        )
        storagerouters = structure['storagerouters']
        vpool = structure['vpools'][1]
        System._machine_id = {storagerouters[1].ip: '1',
                              storagerouters[2].ip: '2'}
        ArakoonInstaller.create_cluster('voldrv', ServiceType.ARAKOON_CLUSTER_TYPES.SD, storagerouters[1].ip, '/tmp')

        # Initial run, it will now be configured
        StorageRouterClient.node_config_recordings = []
        result = StorageDriverController.cluster_registry_checkup()
        self.assertDictEqual(result, {vpool.guid: {'success': True,
                                                   'changes': True}})
        self.assertListEqual(sorted(StorageRouterClient.node_config_recordings), ['1', '2'])
        expected_map = {'1': {'2': StorageDriver.DISTANCES.NEAR},
                        '2': {'1': StorageDriver.DISTANCES.NEAR}}
        configs = vpool.clusterregistry_client.get_node_configs()
        for config in configs:
            _validate_node_config(config, expected_map)

        # Running it again should not change anything
        StorageRouterClient.node_config_recordings = []
        result = StorageDriverController.cluster_registry_checkup()
        self.assertDictEqual(result, {vpool.guid: {'success': True,
                                                   'changes': False}})
        self.assertListEqual(sorted(StorageRouterClient.node_config_recordings), [])
        expected_map = {'1': {'2': StorageDriver.DISTANCES.NEAR},
                        '2': {'1': StorageDriver.DISTANCES.NEAR}}
        configs = vpool.clusterregistry_client.get_node_configs()
        for config in configs:
            _validate_node_config(config, expected_map)

        # Validate some error paths
        domain = structure['domains'][2]
        junction = structure['storagerouters'][1].domains[0]
        junction.domain = domain
        junction.save()
        vpool_config_path = 'file://opt/OpenvStorage/config/framework.json?key=/ovs/vpools/{0}/hosts/1/config'.format(vpool.guid)
        StorageRouterClient.exceptions['server_revision'] = {vpool_config_path: Exception('ClusterNotReachableException')}
        StorageRouterClient.node_config_recordings = []
        result = StorageDriverController.cluster_registry_checkup()
        self.assertDictEqual(result, {vpool.guid: {'success': True,
                                                   'changes': True}})
        self.assertListEqual(sorted(StorageRouterClient.node_config_recordings), ['2'])
        expected_map = {'1': {'2': StorageDriver.DISTANCES.INFINITE},
                        '2': {'1': StorageDriver.DISTANCES.INFINITE}}
        configs = vpool.clusterregistry_client.get_node_configs()
        for config in configs:
            _validate_node_config(config, expected_map)
Exemple #31
0
    def test_set_and_get_config_params(self):
        """
        Test the set_config_params functionality by validation through the get_config_params functionality
            - Verify default configuration for newly created vDisk
            - Attempt to set disallowed values
            - Attempt to sync and async mode without specifying DTL target
            - Set SCO size
        """
        structure = Helper.build_service_structure({
            'vpools': [1],
            'storagerouters': [1],
            'storagedrivers':
            [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
            'mds_services': [(1, 1)]
        }  # (<id>, <storagedriver_id>)
                                                   )
        storagedrivers = structure['storagedrivers']

        # Create vDisk and validate default configuration
        vdisk_1 = VDisk(
            VDiskController.create_new(
                volume_name='vdisk_1',
                volume_size=1024**3,
                storagedriver_guid=storagedrivers[1].guid))
        configuration = VDiskController.get_config_params(
            vdisk_guid=vdisk_1.guid)
        expected_keys = {
            'sco_size', 'dtl_mode', 'write_buffer', 'dtl_target',
            'metadata_cache_size'
        }
        self.assertEqual(
            first=expected_keys,
            second=set(configuration.keys()),
            msg=
            'Keys returned by get_config_params do not match the expected keys'
        )
        tlog_multiplier = vdisk_1.storagedriver_client.get_tlog_multiplier(
            vdisk_1.volume_id)
        default_sco_size = vdisk_1.storagedriver_client.get_sco_multiplier(
            vdisk_1.volume_id) / 1024 * 4
        non_disposable_sco_factor = vdisk_1.storagedriver_client.get_sco_cache_max_non_disposable_factor(
            vdisk_1.volume_id)
        cache_capacity = 4096  # Based on 1GiB size and "metadata_page_capacity" of 64 (6 bits)
        default_values = {
            'sco_size':
            default_sco_size,
            'dtl_mode':
            StorageDriverClient.FRAMEWORK_DTL_NO_SYNC,
            'dtl_target': [],
            'write_buffer':
            int(tlog_multiplier * default_sco_size *
                non_disposable_sco_factor),
            'metadata_cache_size':
            StorageDriverClient.METADATA_CACHE_PAGE_SIZE * cache_capacity
        }
        for key, value in default_values.iteritems():
            self.assertEqual(
                first=configuration[key],
                second=value,
                msg=
                'Value for "{0}" does not match expected default value: {1} vs {2}'
                .format(key, configuration[key], value))

        # Attempt to set incorrect values
        new_config_params = {
            'dtl_mode': StorageDriverClient.FRAMEWORK_DTL_NO_SYNC,
            'sco_size': 4,
            'dtl_target': [],
            'write_buffer': 128
        }
        for key, values in {
                'dtl_mode': ['unknown', StorageDriverClient.VOLDRV_DTL_ASYNC],
                'sco_size':
                list(set(range(257)).difference({4, 8, 16, 32, 64, 128})) +
            [-1],
                'dtl_target': ['', {}, (), 0],
                'write_buffer': [-1] + range(128) + range(10241, 10300),
                'metadata_cache_size': [-1] + range(256 * 24)
        }.iteritems():
            for value in values:
                config_params = copy.deepcopy(new_config_params)
                config_params[key] = value
                with self.assertRaises(RuntimeError):
                    VDiskController.set_config_params(
                        vdisk_guid=vdisk_1.guid,
                        new_config_params=config_params)

        # Attempt to set DTL mode sync or async without specifying a target
        for dtl_mode in [
                StorageDriverClient.FRAMEWORK_DTL_SYNC,
                StorageDriverClient.FRAMEWORK_DTL_ASYNC
        ]:
            config_params = copy.deepcopy(new_config_params)
            config_params['dtl_mode'] = dtl_mode
            with self.assertRaises(ValueError):
                VDiskController.set_config_params(
                    vdisk_guid=vdisk_1.guid, new_config_params=config_params)

        # Set SCO size
        set_config = copy.deepcopy(new_config_params)
        set_config['sco_size'] = 32
        VDiskController.set_config_params(vdisk_guid=vdisk_1.guid,
                                          new_config_params=set_config)
        get_config = VDiskController.get_config_params(vdisk_guid=vdisk_1.guid)
        for key in set_config.iterkeys():
            self.assertEqual(
                first=set_config[key],
                second=get_config[key],
                msg=
                'Actual value for key "{0}" differs from expected. Expected: {1}  -  Actual: {2}'
                .format(key, set_config[key], get_config[key]))
Exemple #32
0
    def test_multi_node_with_recovery_domains(self):
        """
        Test DTL checkup on a multi node setup and create some Domains and link them to the several Storage Routers
        """
        # Add a recovery Domain to the Storage Router serving the vDisk --> DTL should be random
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'domains': [1, 2, 3],
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2, 3, 4, 5],
            'storagerouter_domains': [
                (1, 1, 1, True)
            ],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
            'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4),
                               (5, 1, 5)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        domains = structure['domains']
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                           validations=[{
                                               'key': 'config',
                                               'value': None
                                           }])

        # Add the recovery domain as regular Domain of the same Storage Router --> nothing should change
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |    domain 1     |                  |      1      |
        #  |      sr 3      |       |                 |                  |             |
        #  |      sr 4      |       |                 |                  |             |
        #  |      sr 5      |       |                 |                  |             |
        sr_domain = StorageRouterDomain()
        sr_domain.backup = False
        sr_domain.domain = domains[1]
        sr_domain.storagerouter = storagerouters[2]
        sr_domain.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[2].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # Add the recovery domain as regular Domain to the other Storage Routers --> nothing should change
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |    domain 1     |                  |      1      |
        #  |      sr 3      |       |    domain 1     |                  |             |
        #  |      sr 4      |       |    domain 1     |                  |             |
        #  |      sr 5      |       |    domain 1     |                  |             |
        for storagerouter in storagerouters.values()[2:]:
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[1]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[2].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # Remove the domain from the Storage Router which is used as DTL target
        # || StorageRouter || vDisk | Regular Domain || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   |                 |      domain 1    |             |
        #  |      sr 2      |       |                 |                  |             |
        #  |      sr 3      |       |    domain 1     |                  |      1      |
        #  |      sr 4      |       |    domain 1     |                  |      1      |
        #  |      sr 5      |       |    domain 1     |                  |      1      |
        for junction in storagerouters[2].domains:
            junction.delete()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()[2:]
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])

        # Add regular domain to the Storage Router serving the vDisk and some other Storage Routers --> recovery Domain should still get priority
        # || StorageRouter || vDisk | Regular Domain    || Recovery Domain || DTL Target ||
        #  |      sr 1      |   1   | domain 2           |      domain 1    |             |
        #  |      sr 2      |       | domain 2           |                  |             |
        #  |      sr 3      |       | domain 2           |                  |             |
        #  |      sr 4      |       | domain 1, domain 2 |                  |      1      |
        #  |      sr 5      |       | domain 2           |                  |             |
        for junction in storagerouters[3].domains:
            junction.delete()
        for junction in storagerouters[5].domains:
            junction.delete()
        for storagerouter in storagerouters.values():
            sr_domain = StorageRouterDomain()
            sr_domain.backup = False
            sr_domain.domain = domains[2]
            sr_domain.storagerouter = storagerouter
            sr_domain.save()
        self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value':
                storagerouters[4].storagedrivers[0].storage_ip
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])
    def test_set_and_get_config_params(self):
        """
        Test the set_config_params functionality by validation through the get_config_params functionality
            - Verify default configuration for newly created vDisk
            - Attempt to set disallowed values
            - Attempt to sync and async mode without specifying DTL target
            - Set SCO size
        """
        structure = Helper.build_service_structure(
            {'vpools': [1],
             'storagerouters': [1],
             'storagedrivers': [(1, 1, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1)]}  # (<id>, <storagedriver_id>)
        )
        storagedrivers = structure['storagedrivers']

        # Create vDisk and validate default configuration
        vdisk_1 = VDisk(VDiskController.create_new(volume_name='vdisk_1', volume_size=1024 ** 3, storagedriver_guid=storagedrivers[1].guid))
        configuration = VDiskController.get_config_params(vdisk_guid=vdisk_1.guid)
        expected_keys = {'sco_size', 'dtl_mode', 'write_buffer', 'dtl_target', 'metadata_cache_size'}
        self.assertEqual(first=expected_keys,
                         second=set(configuration.keys()),
                         msg='Keys returned by get_config_params do not match the expected keys')
        tlog_multiplier = vdisk_1.storagedriver_client.get_tlog_multiplier(vdisk_1.volume_id)
        default_sco_size = vdisk_1.storagedriver_client.get_sco_multiplier(vdisk_1.volume_id) / 1024 * 4
        non_disposable_sco_factor = vdisk_1.storagedriver_client.get_sco_cache_max_non_disposable_factor(vdisk_1.volume_id)
        cache_capacity = 4096  # Based on 1GiB size and "metadata_page_capacity" of 64 (6 bits)
        default_values = {'sco_size': default_sco_size,
                          'dtl_mode': StorageDriverClient.FRAMEWORK_DTL_NO_SYNC,
                          'dtl_target': [],
                          'write_buffer': int(tlog_multiplier * default_sco_size * non_disposable_sco_factor),
                          'metadata_cache_size': StorageDriverClient.METADATA_CACHE_PAGE_SIZE * cache_capacity}
        for key, value in default_values.iteritems():
            self.assertEqual(first=configuration[key],
                             second=value,
                             msg='Value for "{0}" does not match expected default value: {1} vs {2}'.format(key, configuration[key], value))

        # Attempt to set incorrect values
        new_config_params = {'dtl_mode': StorageDriverClient.FRAMEWORK_DTL_NO_SYNC,
                             'sco_size': 4,
                             'dtl_target': [],
                             'write_buffer': 128}
        for key, values in {'dtl_mode': ['unknown', StorageDriverClient.VOLDRV_DTL_ASYNC],
                            'sco_size': list(set(range(257)).difference({4, 8, 16, 32, 64, 128})) + [-1],
                            'dtl_target': ['', {}, (), 0],
                            'write_buffer': [-1] + range(128) + range(10241, 10300),
                            'metadata_cache_size': [-1] + range(256 * 24)}.iteritems():
            for value in values:
                config_params = copy.deepcopy(new_config_params)
                config_params[key] = value
                with self.assertRaises(RuntimeError):
                    VDiskController.set_config_params(vdisk_guid=vdisk_1.guid, new_config_params=config_params)

        # Attempt to set DTL mode sync or async without specifying a target
        for dtl_mode in [StorageDriverClient.FRAMEWORK_DTL_SYNC, StorageDriverClient.FRAMEWORK_DTL_ASYNC]:
            config_params = copy.deepcopy(new_config_params)
            config_params['dtl_mode'] = dtl_mode
            with self.assertRaises(ValueError):
                VDiskController.set_config_params(vdisk_guid=vdisk_1.guid, new_config_params=config_params)

        # Set SCO size
        set_config = copy.deepcopy(new_config_params)
        set_config['sco_size'] = 32
        VDiskController.set_config_params(vdisk_guid=vdisk_1.guid, new_config_params=set_config)
        get_config = VDiskController.get_config_params(vdisk_guid=vdisk_1.guid)
        for key in set_config.iterkeys():
            self.assertEqual(first=set_config[key],
                             second=get_config[key],
                             msg='Actual value for key "{0}" differs from expected. Expected: {1}  -  Actual: {2}'.format(key, set_config[key], get_config[key]))
Exemple #34
0
    def test_dtl_no_unnecessary_reconfiguration(self):
        """
        Verify that when more than 3 Storage Routers are available as possible DTL target, the same target is used over and over again
        """
        structure = Helper.build_service_structure({
            'vpools': [1],
            'vdisks': [
                (1, 1, 1, 1)
            ],  # (<id>, <storagedriver_id>, <vpool_id>, <mds_service_id>)
            'domains': [1],
            'mds_services': [(1, 1)],  # (<id>, <storagedriver_id>)
            'storagerouters': [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
            'storagerouter_domains': [
                (1, 1, 1, True), (2, 2, 1, False), (3, 3, 1, False),
                (4, 4, 1, False), (5, 5, 1, False), (6, 6, 1, False),
                (7, 7, 1, False), (8, 8, 1, False), (9, 9, 1, False),
                (10, 10, 1, False)
            ],  # (<sr_domain_id>, <sr_id>, <domain_id>, <backup>)
            'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4),
                               (5, 1, 5), (6, 1, 6), (7, 1, 7), (8, 1, 8),
                               (9, 1, 9), (10, 1, 10)]
        }  # (<id>, <vpool_id>, <sr_id>)
                                                   )
        vpool = structure['vpools'][1]
        vdisk = structure['vdisks'][1]
        storagerouters = structure['storagerouters']

        self._roll_out_dtl_services(vpool=vpool, storagerouters=storagerouters)
        config = self._run_and_validate_dtl_checkup(
            vdisk=vdisk,
            validations=[{
                'key':
                'host',
                'value': [
                    sr.storagedrivers[0].storage_ip
                    for sr in storagerouters.values()[1:]
                ]
            }, {
                'key': 'port',
                'value': 3
            }, {
                'key': 'mode',
                'value': DTLMode.ASYNCHRONOUS
            }])
        # Rerun DTL checkup 10 times and validate target does not change even though 9 Storage Routers are potential candidate
        for _ in xrange(10):
            self._run_and_validate_dtl_checkup(vdisk=vdisk,
                                               validations=[{
                                                   'key':
                                                   'host',
                                                   'value':
                                                   config.host
                                               }, {
                                                   'key': 'port',
                                                   'value': 3
                                               }, {
                                                   'key':
                                                   'mode',
                                                   'value':
                                                   DTLMode.ASYNCHRONOUS
                                               }])
    def test_cluster_maintenance(self):
        """
        Validates whether a cluster can be correctly created
        """
        Configuration.set('/ovs/framework/hosts/1/ports', {'arakoon': [10000, 10100]})
        Configuration.set('/ovs/framework/hosts/2/ports', {'arakoon': [20000, 20100]})

        structure = Helper.build_service_structure(
            {'storagerouters': [1, 2]}
        )
        storagerouters = structure['storagerouters']
        System._machine_id = {storagerouters[1].ip: '1',
                              storagerouters[2].ip: '2'}

        # Create new cluster
        mountpoint = storagerouters[1].disks[0].partitions[0].mountpoint
        if os.path.exists(mountpoint) and mountpoint != '/':
            shutil.rmtree(mountpoint)
        base_dir = mountpoint + '/test_create_cluster'
        info = ArakoonInstaller.create_cluster('test', ServiceType.ARAKOON_CLUSTER_TYPES.FWK, storagerouters[1].ip, base_dir)

        reality = Helper.extract_dir_structure(base_dir)
        expected = {'dirs': {'arakoon': {'dirs': {'test': {'dirs': {'tlogs': {'dirs': {},
                                                                              'files': []},
                                                                    'db': {'dirs': {},
                                                                           'files': []}},
                                                           'files': []}},
                                         'files': []}},
                    'files': []}
        self.assertDictEqual(reality, expected)
        expected = '{0}\n\n{1}\n\n'.format(ArakoonInstallerTester.EXPECTED_CLUSTER_CONFIG.format('1', 'test', ''),
                                           ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                                               '1', storagerouters[1].ip, 10000, base_dir, '1', 10001
                                           ))
        self.assertEqual(Configuration.get(ArakoonInstaller.CONFIG_KEY.format('test'), raw=True), expected)
        # @TODO: assert service availability here. It should be stopped

        ArakoonInstaller.start_cluster('test', storagerouters[1].ip, filesystem=False)
        # @TODO: assert the service is running

        config = ArakoonClusterConfig('test', filesystem=False)
        config.load_config(storagerouters[1].ip)
        client = ArakoonInstaller.build_client(config)
        reality = client.get(ArakoonInstaller.INTERNAL_CONFIG_KEY)
        self.assertEqual(reality, expected)
        self.assertFalse(client.exists(ArakoonInstaller.METADATA_KEY))

        ArakoonInstaller.claim_cluster('test', storagerouters[1].ip, filesystem=False, metadata=info['metadata'])

        reality = json.loads(client.get(ArakoonInstaller.METADATA_KEY))
        expected = {'cluster_name': 'test',
                    'cluster_type': 'FWK',
                    'in_use': True,
                    'internal': True}
        self.assertDictEqual(reality, expected)

        # Extending cluster
        mountpoint = storagerouters[2].disks[0].partitions[0].mountpoint
        if os.path.exists(mountpoint) and mountpoint != '/':
            shutil.rmtree(mountpoint)
        base_dir2 = mountpoint + '/test_extend_cluster'
        ArakoonInstaller.extend_cluster(storagerouters[1].ip, storagerouters[2].ip, 'test', base_dir2)
        reality = Helper.extract_dir_structure(base_dir)
        expected = {'dirs': {'arakoon': {'dirs': {'test': {'dirs': {'tlogs': {'dirs': {},
                                                                              'files': []},
                                                                    'db': {'dirs': {},
                                                                           'files': []}},
                                                           'files': []}},
                                         'files': []}},
                    'files': []}
        self.assertDictEqual(reality, expected)
        expected = '{0}\n\n{1}\n\n{2}\n\n'.format(ArakoonInstallerTester.EXPECTED_CLUSTER_CONFIG.format('1,2', 'test', ''),
                                                  ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                                                      '1', storagerouters[1].ip, 10000, base_dir, '1', 10001
                                                  ),
                                                  ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                                                      '2', storagerouters[2].ip, 20000, base_dir2, '2', 20001
                                                  ))
        self.assertEqual(Configuration.get(ArakoonInstaller.CONFIG_KEY.format('test'), raw=True), expected)
        # @TODO: assert service availability here. It should be stopped

        catchup_command = 'arakoon --node 2 -config file://opt/OpenvStorage/config/framework.json?key=/ovs/arakoon/test/config -catchup-only'
        SSHClient._run_returns[catchup_command] = None
        SSHClient._run_recordings = []
        ArakoonInstaller.restart_cluster_add('test', [storagerouters[1].ip], storagerouters[2].ip, filesystem=False)
        self.assertIn(catchup_command, SSHClient._run_recordings)
        # @TODO: assert the service is running

        config = ArakoonClusterConfig('test', filesystem=False)
        config.load_config(storagerouters[2].ip)
        client = ArakoonInstaller.build_client(config)
        reality = client.get(ArakoonInstaller.INTERNAL_CONFIG_KEY)
        self.assertEqual(reality, expected)

        reality = json.loads(client.get(ArakoonInstaller.METADATA_KEY))
        expected = {'cluster_name': 'test',
                    'cluster_type': 'FWK',
                    'in_use': True,
                    'internal': True}
        self.assertDictEqual(reality, expected)

        # Shrinking cluster
        ArakoonInstaller.shrink_cluster(storagerouters[1].ip, storagerouters[2].ip, 'test')
        reality = Helper.extract_dir_structure(base_dir)
        expected = {'dirs': {'arakoon': {'dirs': {'test': {'dirs': {}, 'files': []}},
                                         'files': []}},
                    'files': []}
        self.assertDictEqual(reality, expected)
        expected = '{0}\n\n{1}\n\n'.format(ArakoonInstallerTester.EXPECTED_CLUSTER_CONFIG.format('2', 'test', ''),
                                           ArakoonInstallerTester.EXPECTED_NODE_CONFIG.format(
                                               '2', storagerouters[2].ip, 20000, base_dir2, '2', 20001
                                           ))
        self.assertEqual(Configuration.get(ArakoonInstaller.CONFIG_KEY.format('test'), raw=True), expected)
        # @TODO: assert service availability here. It should be stopped

        ArakoonInstaller.restart_cluster_remove('test', [storagerouters[2].ip], filesystem=False)
        # @TODO: assert the service is running

        config = ArakoonClusterConfig('test', filesystem=False)
        config.load_config(storagerouters[2].ip)
        client = ArakoonInstaller.build_client(config)
        reality = client.get(ArakoonInstaller.INTERNAL_CONFIG_KEY)
        self.assertEqual(reality, expected)

        reality = json.loads(client.get(ArakoonInstaller.METADATA_KEY))
        expected = {'cluster_name': 'test',
                    'cluster_type': 'FWK',
                    'in_use': True,
                    'internal': True}
        self.assertDictEqual(reality, expected)
Exemple #36
0
    def test_create_new(self):
        """
        Test the create new volume functionality
            - Attempt to create a vDisk larger than 2 TiB
            - Create a vDisk of exactly 2 TiB
            - Attempt to create a vDisk with identical name
            - Attempt to create a vDisk with identical devicename
            - Create a vDisk with identical name on another vPool
        """
        structure = Helper.build_service_structure(
            {'vpools': [1, 2],
             'storagerouters': [1, 2],
             'storagedrivers': [(1, 1, 1), (2, 2, 1)],  # (<id>, <vpool_id>, <storagerouter_id>)
             'mds_services': [(1, 1), (2, 2)]}  # (<id>, <storagedriver_id>)
        )
        vpools = structure['vpools']
        mds_services = structure['mds_services']
        storagedrivers = structure['storagedrivers']
        storagerouters = structure['storagerouters']
        size_64_tib = 64 * 1024 ** 4

        # Verify maximum size of 64TiB
        vdisk_name_1 = 'vdisk_1'
        vdisk_name_2 = 'vdisk_2'
        with self.assertRaises(ValueError):
            VDiskController.create_new(volume_name=vdisk_name_1, volume_size=size_64_tib + 1, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 0, msg='Expected to find 0 vDisks after failure 1')

        # Create volume of maximum size
        VDiskController.create_new(volume_name=vdisk_name_1, volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 1, msg='Expected to find 1 vDisk')
        self.assertTrue(expr=vdisks[0].storagerouter_guid == storagerouters[1].guid, msg='Storage Router does not match expected value')
        self.assertTrue(expr=vdisks[0].size == size_64_tib, msg='Size does not match expected value')
        self.assertTrue(expr=vdisks[0].name == vdisk_name_1, msg='Name does not match expected value')
        self.assertTrue(expr=vdisks[0].vpool == vpools[1], msg='vPool does not match expected value')
        self.assertTrue(expr=vdisks[0].devicename == VDiskController.clean_devicename(vdisk_name_1), msg='Devicename does not match expected value')

        # Attempt to create same volume on same vPool
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(volume_name=vdisk_name_1, volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1, msg='Expected to find 1 vDisk after failure 2')

        # Attempt to create volume with identical devicename on same vPool
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(volume_name='{0}%^$'.format(vdisk_name_1), volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 1, msg='Expected to find 1 vDisk after failure 3')

        # Create same volume on another vPool
        vdisk2 = VDisk(VDiskController.create_new(volume_name=vdisk_name_2, volume_size=size_64_tib, storagedriver_guid=storagedrivers[2].guid))
        vdisks = VDiskList.get_vdisks()
        self.assertTrue(expr=len(vdisks) == 2, msg='Expected to find 2 vDisks')
        self.assertTrue(expr=vdisk2.storagerouter_guid == storagerouters[1].guid, msg='Storage Router does not match expected value')
        self.assertTrue(expr=vdisk2.size == size_64_tib, msg='Size does not match expected value')
        self.assertTrue(expr=vdisk2.name == vdisk_name_2, msg='Name does not match expected value')
        self.assertTrue(expr=vdisk2.vpool == vpools[2], msg='vPool does not match expected value')
        self.assertTrue(expr=vdisk2.devicename == VDiskController.clean_devicename(vdisk_name_2), msg='Devicename does not match expected value')

        # Attempt to create vDisk on Storage Driver without MDS service
        mds_services[1].service.storagerouter = storagerouters[2]
        mds_services[1].service.save()
        with self.assertRaises(RuntimeError):
            VDiskController.create_new(volume_name='vdisk_3', volume_size=size_64_tib, storagedriver_guid=storagedrivers[1].guid)
        self.assertTrue(expr=len(VDiskList.get_vdisks()) == 2, msg='Expected to find 2 vDisks after failure 4')