def test_get_volume_stats(self, _mock_get_version): def _fake_get_all_physical_volumes(obj, root_helper, vg_name): return [{}] @staticmethod def _fake_get_all_volume_groups(root_helper, vg_name=None): return [{'name': 'cinder-volumes', 'size': '5.52', 'available': '0.52', 'lv_count': '2', 'uuid': 'vR1JU3-FAKE-C4A9-PQFh-Mctm-9FwA-Xwzc1m'}] def _fake_get_volumes(obj, lv_name=None): return [{'vg': 'fake_vg', 'name': 'fake_vol', 'size': '1000'}] self.stubs.Set(brick_lvm.LVM, 'get_all_volume_groups', _fake_get_all_volume_groups) self.stubs.Set(brick_lvm.LVM, 'get_all_physical_volumes', _fake_get_all_physical_volumes) self.stubs.Set(brick_lvm.LVM, 'get_volumes', _fake_get_volumes) self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') self.volume.driver._update_volume_stats() stats = self.volume.driver._stats self.assertEqual( float('5.52'), stats['pools'][0]['total_capacity_gb']) self.assertEqual( float('0.52'), stats['pools'][0]['free_capacity_gb']) self.assertEqual( float('5.0'), stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual( int('1'), stats['pools'][0]['total_volumes']) self.assertFalse(stats['sparse_copy_volume']) # Check value of sparse_copy_volume for thin enabled case. # This value is set in check_for_setup_error. self.configuration = conf.Configuration(None) self.configuration.lvm_type = 'thin' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db, vg_obj=vg_obj) lvm_driver.check_for_setup_error() lvm_driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') lvm_driver._update_volume_stats() stats = lvm_driver._stats self.assertTrue(stats['sparse_copy_volume'])
def test_get_volume_stats(self, _mock_get_version, mock_vgs, mock_pvs, mock_get_volumes): self.volume.driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') self.volume.driver._update_volume_stats() stats = self.volume.driver._stats self.assertEqual(float('5.52'), stats['pools'][0]['total_capacity_gb']) self.assertEqual(float('0.52'), stats['pools'][0]['free_capacity_gb']) self.assertEqual(float('5.0'), stats['pools'][0]['provisioned_capacity_gb']) self.assertEqual(int('1'), stats['pools'][0]['total_volumes']) self.assertFalse(stats['sparse_copy_volume']) # Check value of sparse_copy_volume for thin enabled case. # This value is set in check_for_setup_error. self.configuration = conf.Configuration(None) self.configuration.lvm_type = 'thin' self.configuration.target_helper = 'lioadm' vg_obj = fake_lvm.FakeBrickLVM('cinder-volumes', False, None, 'default') lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db, vg_obj=vg_obj) lvm_driver.check_for_setup_error() lvm_driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') lvm_driver._update_volume_stats() stats = lvm_driver._stats self.assertTrue(stats['sparse_copy_volume'])
def setUp(self): self._mox = mox.Mox() self.configuration = mox.MockObject(conf.Configuration) self.configuration.volume_group_name = 'fake-volumes' super(BrickLvmTestCase, self).setUp() self.stubs.Set(processutils, 'execute', self.fake_execute) self.vg = brick.LVM(self.configuration.volume_group_name)
def test_snapshot_over_subscription(self, _mock_get_version, mock_vgs, mock_pvs, mock_get_volumes): self.configuration.lvm_type = 'thin' self.configuration.max_over_subscription_ratio = 1 lvm_driver = lvm.LVMVolumeDriver(configuration=self.configuration, db=db) # Test case for thin LVM lvm_driver._sparse_copy_volume = True lvm_driver.vg = brick_lvm.LVM('cinder-volumes', 'sudo') volume_size = 5 with mock.patch.object(self.volume.driver, '_create_volume'), \ mock.patch.object(lvm_driver.vg, 'get_volume', return_value={'name': 'fake_lv', 'size': volume_size}), \ mock.patch.object(lvm_driver.vg, 'create_lv_snapshot'): lvm_driver.vg.vg_size = 10 lvm_driver.vg.vg_provisioned_capacity = 8 fake_volume = tests_utils.create_volume( self.context, size=volume_size) fake_snapshot = tests_utils.create_snapshot( self.context, fake_volume['id']) self.assertRaises(exception.LVMThinPoolCapacityError, lvm_driver.create_snapshot, fake_snapshot)
def setUp(self): self.configuration = mox.MockObject(conf.Configuration) self.configuration.volume_group_name = 'fake-vg' super(BrickLvmTestCase, self).setUp() # Stub processutils.execute for static methods self.stubs.Set(processutils, 'execute', self.fake_execute) self.vg = brick.LVM(self.configuration.volume_group_name, 'sudo', False, None, 'default', self.fake_execute)
def setUp(self): self.configuration = mock.Mock(conf.Configuration) self.configuration.volume_group_name = 'fake-vg' super(BrickLvmTestCase, self).setUp() self.mock_object(processutils, 'execute', self.fake_execute) self.vg = brick.LVM(self.configuration.volume_group_name, 'sudo', False, None, 'default', self.fake_execute)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') lvm_mirrors = int(lvm_mirrors) except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg != self.vg.vg_name: vg_list = volutils.get_all_volume_groups() try: (vg for vg in vg_list if vg['name'] == dest_vg).next() except StopIteration: message = (_("Destination Volume Group %s does not exist") % dest_vg) LOG.error(message) return false_ret helper = utils.get_root_helper() dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type=lvm_type, executor=self._execute) self.remove_export(ctxt, volume) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) volutils.copy_volume(self.local_path(volume), self.local_path(volume, vg=dest_vg), volume['size'], self.configuration.volume_dd_blocksize, execute=self._execute) self._delete_volume(volume) model_update = self._create_export(ctxt, volume, vg=dest_vg) return (True, model_update)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') lvm_mirrors = int(lvm_mirrors) except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg != self.vg.vg_name: vg_list = volutils.get_all_volume_groups() try: next(vg for vg in vg_list if vg['name'] == dest_vg) except StopIteration: LOG.error(_LE("Destination Volume Group %s does not exist"), dest_vg) return false_ret helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type=lvm_type, executor=self._execute, lvm_conf=lvm_conf_file) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in size_in_mb = int(volume['size']) * units.Ki
def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" if self.vg is None: root_helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None try: self.vg = lvm.LVM(self.configuration.volume_group, root_helper, lvm_type=self.configuration.lvm_type, executor=self._execute, lvm_conf=lvm_conf_file) except exception.VolumeGroupNotFound: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups( self.configuration.volume_group) vg_dict = \ next(vg for vg in vg_list if vg['name'] == self.vg.vg_name) if vg_dict is None: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) if self.configuration.lvm_type == 'thin': # Specific checks for using Thin provisioned LV's if not volutils.supports_thin_provisioning(): message = _("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to create thin pool, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException( data=exception_message) # Enable sparse copy since lvm_type is 'thin' self.sparse_copy_volume = True
def setUp(self): if not hasattr(self, 'configuration'): self.configuration = mock.Mock(conf.Configuration) self.configuration.lvm_suppress_fd_warnings = False self.configuration.volume_group_name = 'fake-vg' super(BrickLvmTestCase, self).setUp() self.mock_object(processutils, 'execute', self.fake_execute) self.vg = brick.LVM( self.configuration.volume_group_name, 'sudo', False, None, 'default', self.fake_execute, suppress_fd_warn=self.configuration.lvm_suppress_fd_warnings)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg != self.vg.vg_name: vg_list = volutils.get_all_volume_groups() vg_dict = \ (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next() if vg_dict is None: message = ("Destination Volume Group %s does not exist" % dest_vg) LOG.error(_('%s'), message) return false_ret helper = 'sudo cinder-rootwrap %s' % CONF.rootwrap_config dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type, self._execute) self.remove_export(ctxt, volume) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) volutils.copy_volume(self.local_path(volume), self.local_path(volume, vg=dest_vg), volume['size'], execute=self._execute) self._delete_volume(volume) model_update = self._create_export(ctxt, volume, vg=dest_vg) return (True, model_update)
def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" if self.vg is None: root_helper = 'sudo cinder-rootwrap %s' % CONF.rootwrap_config try: self.vg = lvm.LVM(self.configuration.volume_group, root_helper, lvm_type=self.configuration.lvm_type, executor=self._execute) except lvm.VolumeGroupNotFound: message = ("Volume Group %s does not exist" % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups( self.configuration.volume_group) vg_dict = \ (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next() if vg_dict is None: message = ("Volume Group %s does not exist" % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) if self.configuration.lvm_type == 'thin': # Specific checks for using Thin provisioned LV's if not volutils.supports_thin_provisioning(): message = ("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except exception.ProcessExecutionError as exc: exception_message = ("Failed to create thin pool, " "error message was: %s" % exc.stderr) raise exception.VolumeBackendAPIException( data=exception_message)
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') lvm_mirrors = int(lvm_mirrors) except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg == self.vg.vg_name: message = (_("Refusing to migrate volume ID: %(id)s. Please " "check your configuration because source and " "destination are the same Volume Group: %(name)s.") % { 'id': volume['id'], 'name': self.vg.vg_name }) LOG.error(message) raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups() try: next(vg for vg in vg_list if vg['name'] == dest_vg) except StopIteration: LOG.error(_LE("Destination Volume Group %s does not exist"), dest_vg) return false_ret helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type=lvm_type, executor=self._execute, lvm_conf=lvm_conf_file) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) # copy_volume expects sizes in MiB, we store integer GiB # be sure to convert before passing in size_in_mb = int(volume['size']) * units.Ki try: volutils.copy_volume(self.local_path(volume), self.local_path(volume, vg=dest_vg), size_in_mb, self.configuration.volume_dd_blocksize, execute=self._execute, sparse=self._sparse_copy_volume) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Volume migration failed due to " "exception: %(reason)s."), {'reason': six.text_type(e)}, resource=volume) dest_vg_ref.delete(volume) self._delete_volume(volume) return (True, None)
def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" if self.vg is None: root_helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None try: self.vg = lvm.LVM( self.configuration.volume_group, root_helper, lvm_type=self.configuration.lvm_type, executor=self._execute, lvm_conf=lvm_conf_file, suppress_fd_warn=( self.configuration.lvm_suppress_fd_warnings)) except exception.VolumeGroupNotFound: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups( self.configuration.volume_group) vg_dict = \ next(vg for vg in vg_list if vg['name'] == self.vg.vg_name) if vg_dict is None: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.configuration.lvm_type == 'auto': # Default to thin provisioning if it is supported and # the volume group is empty, or contains a thin pool # for us to use. self.vg.update_volume_group_info() self.configuration.lvm_type = 'default' if volutils.supports_thin_provisioning(): if self.vg.get_volume(pool_name) is not None: LOG.info(_LI('Enabling LVM thin provisioning by default ' 'because a thin pool exists.')) self.configuration.lvm_type = 'thin' elif len(self.vg.get_volumes()) == 0: LOG.info(_LI('Enabling LVM thin provisioning by default ' 'because no LVs exist.')) self.configuration.lvm_type = 'thin' if self.configuration.lvm_type == 'thin': # Specific checks for using Thin provisioned LV's if not volutils.supports_thin_provisioning(): message = _("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to create thin pool, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException( data=exception_message) # Enable sparse copy since lvm_type is 'thin' self._sparse_copy_volume = True