def setUp(self): super(TestBaseISCSITargetDriver, self).setUp() self.target = fake.FakeTarget(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=mock.MagicMock(return_value={'provider_auth': 'CHAP otzL 234Z'}))
def setUp(self): super(TestBaseISCSITargetDriver, self).setUp() self.configuration = conf.Configuration(None) self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba' self.target = fake.FakeTarget(root_helper=utils.get_root_helper(), configuration=self.configuration) self.testvol =\ {'project_id': self.fake_project_id, 'name': 'testvol', 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_volume_id, 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'} self.expected_iscsi_properties = \ {'auth_method': 'CHAP', 'auth_password': '******', 'auth_username': '******', 'encrypted': False, 'logical_block_size': '512', 'physical_block_size': '512', 'target_discovered': False, 'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' % self.fake_volume_id, 'target_lun': 0, 'target_portal': '10.10.7.1:3260', 'volume_id': self.fake_volume_id}
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) global LOG LOG = logging.getLogger(__name__) if not CONF.enabled_backends: LOG.error('Configuration for cinder-volume does not specify ' '"enabled_backends". Using DEFAULT section to configure ' 'drivers is not supported since Ocata.') sys.exit(1) if os.name == 'nt': # We cannot use oslo.service to spawn multiple services on Windows. # It relies on forking, which is not available on Windows. # Furthermore, service objects are unmarshallable objects that are # passed to subprocesses. _launch_services_win32() else: _launch_services_posix()
def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(NfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(nfs_opts) root_helper = utils.get_root_helper() # base bound to instance is used in RemoteFsConnector. self.base = getattr(self.configuration, 'nfs_mount_point_base') self.base = os.path.realpath(self.base) opts = getattr(self.configuration, 'nfs_mount_options') nas_mount_options = getattr(self.configuration, 'nas_mount_options', None) if nas_mount_options is not None: LOG.debug('overriding nfs_mount_options with nas_mount_options') opts = nas_mount_options self._remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', root_helper, execute=execute, nfs_mount_point_base=self.base, nfs_mount_options=opts) supports_auto_mosr = kwargs.get('supports_auto_mosr', False) self._sparse_copy_volume_data = True self.reserved_percentage = self.configuration.reserved_percentage self.max_over_subscription_ratio = ( vutils.get_max_over_subscription_ratio( self.configuration.max_over_subscription_ratio, supports_auto=supports_auto_mosr))
def __init__(self, *args, **kwargs): super(TestIetAdmDriver, self).__init__(*args, **kwargs) self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.iscsi_ip_address = "10.9.8.7" self.fake_project_id = "ed2c1fd4-5fc0-11e4-aa15-123b93f75cba" self.fake_volume_id = "83c2e877-feed-46be-8435-77884fe55b45" self.target = iet.IetAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.testvol = { "project_id": self.fake_project_id, "name": "testvol", "size": 1, "id": self.fake_volume_id, "volume_type_id": None, "provider_location": "10.9.8.7:3260 " "iqn.2010-10.org.openstack:" "volume-%s 0" % self.fake_volume_id, "provider_auth": "CHAP stack-1-a60e2611875f40199931f2" "c76370d66b 2FE0CQ8J196R", "provider_geometry": "512 512", "created_at": timeutils.utcnow(), "host": "fake_host@lvm#lvm", } self.expected_iscsi_properties = { "auth_method": "CHAP", "auth_password": "******", "auth_username": "******", "encrypted": False, "logical_block_size": "512", "physical_block_size": "512", "target_discovered": False, "target_iqn": "iqn.2010-10.org.openstack:volume-%s" % self.fake_volume_id, "target_lun": 0, "target_portal": "10.10.7.1:3260", "volume_id": self.fake_volume_id, }
def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\ info.split(':') lvm_mirrors = int(lvm_mirrors) except ValueError: return false_ret if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_vg != self.vg.vg_name: vg_list = volutils.get_all_volume_groups() try: (vg for vg in vg_list if vg['name'] == dest_vg).next() except StopIteration: message = (_("Destination Volume Group %s does not exist") % dest_vg) LOG.error(message) return false_ret helper = utils.get_root_helper() dest_vg_ref = lvm.LVM(dest_vg, helper, lvm_type=lvm_type, executor=self._execute) self.remove_export(ctxt, volume) self._create_volume(volume['name'], self._sizestr(volume['size']), lvm_type, lvm_mirrors, dest_vg_ref) volutils.copy_volume(self.local_path(volume), self.local_path(volume, vg=dest_vg), volume['size'], self.configuration.volume_dd_blocksize, execute=self._execute) self._delete_volume(volume) model_update = self._create_export(ctxt, volume, vg=dest_vg) return (True, model_update) else: message = (_("Refusing to migrate volume ID: %(id)s. Please " "check your configuration because source and " "destination are the same Volume Group: %(name)s.") % {'id': volume['id'], 'name': self.vg.vg_name}) LOG.exception(message) raise exception.VolumeBackendAPIException(data=message)
def setUp(self): super(TestNVMETDriver, self).setUp() self.configuration.target_protocol = 'nvmet_rdma' self.target = nvmet.NVMET(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target_ip = '192.168.0.1' self.target_port = '1234' self.nvmet_subsystem_name = self.configuration.target_prefix self.nvmet_ns_id = self.configuration.nvmet_ns_id self.nvmet_port_id = self.configuration.nvmet_port_id self.nvme_transport_type = 'rdma' self.fake_volume_id = 'c446b9a2-c968-4260-b95f-a18a7b41c004' self.testvol_path = ( '/dev/stack-volumes-lvmdriver-1/volume-%s' % self.fake_volume_id) self.fake_project_id = 'ed2c1fd4-5555-1111-aa15-123b93f75cba' self.testvol = ( {'project_id': self.fake_project_id, 'name': 'testvol', 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': self.target.get_nvmeof_location( "nqn.%s-%s" % (self.nvmet_subsystem_name, self.fake_volume_id), self.target_ip, self.target_port, self.nvme_transport_type, self.nvmet_ns_id), 'provider_auth': None, 'provider_geometry': None, 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'})
def _reclaim_unused_storage(self): """If an empty array exists, reclaim it for future use.""" # get root_helper. root_helper = utils.get_root_helper() arraydevs = lvm.LVM.get_raid_arrays(root_helper) jboddevs = lvm.LVM.get_jbods_devs(root_helper, contutil._get_cont_vg_prefix()) arraydevs.extend(jboddevs) for arrdev in arraydevs: vgname = contutil._get_cont_vg_name(arrdev) cnt = lvm.LVM.get_lvcnt_by_vgname(root_helper, vgname) if cnt == 0: if (arrdev in self.raidstat and self.raidstat[arrdev][0] == 0): if (time.time() - self.raidstat[arrdev][1] > self.configuration.reclaim_interval): # reclaim it if it has been unused for more than 5 min. contutil.remove_cont_cinder_volume(root_helper, arrdev) lvm.LVM.remove_array(root_helper, arrdev, vgname=vgname) del self.raidstat[arrdev] LOG.debug('[MRA] array [%(arr)s] has been reclaimed' % {'arr': arrdev}) continue # update stat. self.raidstat[arrdev] = [cnt, time.time()]
def test_brick_get_connector(self): root_helper = utils.get_root_helper() self.mox.StubOutClassWithMocks(connector, "ISCSIConnector") connector.ISCSIConnector( execute=putils.execute, driver=None, root_helper=root_helper, use_multipath=False, device_scan_attempts=3 ) self.mox.StubOutClassWithMocks(connector, "FibreChannelConnector") connector.FibreChannelConnector( execute=putils.execute, driver=None, root_helper=root_helper, use_multipath=False, device_scan_attempts=3 ) self.mox.StubOutClassWithMocks(connector, "AoEConnector") connector.AoEConnector(execute=putils.execute, driver=None, root_helper=root_helper, device_scan_attempts=3) self.mox.StubOutClassWithMocks(connector, "LocalConnector") connector.LocalConnector(execute=putils.execute, driver=None, root_helper=root_helper, device_scan_attempts=3) self.mox.ReplayAll() utils.brick_get_connector("iscsi") utils.brick_get_connector("fibre_channel") utils.brick_get_connector("aoe") utils.brick_get_connector("local") self.mox.VerifyAll()
def test_init_backup_repo_path(self, mock_remotefs_client_class): self.override_config('backup_share', FAKE_BACKUP_SHARE) self.override_config('backup_mount_point_base', FAKE_BACKUP_MOUNT_POINT_BASE) mock_remotefsclient = mock.Mock() mock_remotefsclient.get_mount_point = mock.Mock( return_value=FAKE_BACKUP_PATH) self.mock_object(nfs.NFSBackupDriver, '_check_configuration') mock_remotefs_client_class.return_value = mock_remotefsclient self.mock_object(utils, 'get_root_helper') with mock.patch.object(nfs.NFSBackupDriver, '_init_backup_repo_path'): driver = nfs.NFSBackupDriver(self.ctxt) path = driver._init_backup_repo_path() self.assertEqual(FAKE_BACKUP_PATH, path) utils.get_root_helper.called_once() mock_remotefs_client_class.assert_called_once_with( 'nfs', utils.get_root_helper(), nfs_mount_point_base=FAKE_BACKUP_MOUNT_POINT_BASE, nfs_mount_options=None ) mock_remotefsclient.mount.assert_called_once_with(FAKE_BACKUP_SHARE) mock_remotefsclient.get_mount_point.assert_called_once_with( FAKE_BACKUP_SHARE)
def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(NfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) root_helper = utils.get_root_helper() self._remotefsclient = remotefs.RemoteFsClient('nfs', root_helper, execute=execute)
def create_volume_from_snapshot(self, volume, snapshot): """Creates a volume from a snapshot.""" root_helper = utils.get_root_helper() # create temporary volume from snapshot # create snapshot of temporary volume # create volume from snapshot # promote temporary snapshot zvol_snapshot = self._zfs_snapshot(snapshot) zvol = self._zfs_volume(volume) try: self._execute('zfs', 'clone', zvol_snapshot, zvol, root_helper=root_helper, run_as_root=True) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to create volume from snapshot, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException(data=exception_message) self.extend_volume(volume, volume['size'])
def setUp(self): super(TestBaseISCSITargetDriver, self).setUp() self.configuration = conf.Configuration(None) self.fake_id_1 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_id_2 = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba' self.target = FakeDriver(root_helper=utils.get_root_helper(), configuration=self.configuration) self.testvol_1 =\ {'project_id': self.fake_id_1, 'name': 'testvol', 'size': 1, 'id': self.fake_id_2, 'volume_type_id': None, 'provider_location': '10.10.7.1:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_id_2, 'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2' 'c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'} self.expected_iscsi_properties = \ {'auth_method': 'CHAP', 'auth_password': '******', 'auth_username': '******', 'encrypted': False, 'logical_block_size': '512', 'physical_block_size': '512', 'target_discovered': False, 'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' % self.fake_id_2, 'target_lun': 0, 'target_portal': '10.10.7.1:3260', 'volume_id': self.fake_id_2}
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.set_defaults( default_log_levels=logging.get_default_log_levels() + _EXTRA_DEFAULT_LOG_LEVELS) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) global LOG LOG = logging.getLogger(__name__) if CONF.backup_workers > 1: LOG.info('Backup running with %s processes.', CONF.backup_workers) launcher = service.get_launcher() for i in range(CONF.backup_workers): _launch_backup_process(launcher, i) launcher.wait() else: LOG.info('Backup running in single process mode.') server = service.Service.create(binary='cinder-backup', coordination=True, process_number=1) service.serve(server) service.wait()
def check_for_setup_error(self): """Verify that requirements are in place to use ZFS driver.""" root_helper = utils.get_root_helper() try: self._execute('zfs', 'list', root_helper=root_helper, run_as_root=True) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to initialize ZFS driver, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException(data=exception_message) try: self._execute('zfs', 'list', self.configuration.zfs_zpool, root_helper=root_helper, run_as_root=True) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to to initilize ZFS driver, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException(data=exception_message) self.zpool = self.configuration.zfs_zpool
def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(NfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(nfs_opts) root_helper = utils.get_root_helper() # base bound to instance is used in RemoteFsConnector. self.base = getattr(self.configuration, 'nfs_mount_point_base', CONF.nfs_mount_point_base) self.base = os.path.realpath(self.base) opts = getattr(self.configuration, 'nfs_mount_options', CONF.nfs_mount_options) nas_mount_options = getattr(self.configuration, 'nas_mount_options', None) if nas_mount_options is not None: LOG.debug('overriding nfs_mount_options with nas_mount_options') opts = nas_mount_options self._remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', root_helper, execute=execute, nfs_mount_point_base=self.base, nfs_mount_options=opts)
def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" if self.vg is None: root_helper = utils.get_root_helper() try: self.vg = lvm.LVM( self.configuration.volume_group, root_helper, lvm_type=self.configuration.lvm_type, executor=self._execute, ) except brick_exception.VolumeGroupNotFound: message = _("Volume Group %s does not exist") % self.configuration.volume_group raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups(self.configuration.volume_group) vg_dict = (vg for vg in vg_list if vg["name"] == self.vg.vg_name).next() if vg_dict is None: message = _("Volume Group %s does not exist") % self.configuration.volume_group raise exception.VolumeBackendAPIException(data=message) if self.configuration.lvm_type == "thin": # Specific checks for using Thin provisioned LV's if not volutils.supports_thin_provisioning(): message = _("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except processutils.ProcessExecutionError as exc: exception_message = _("Failed to create thin pool, " "error message was: %s") % exc.stderr raise exception.VolumeBackendAPIException(data=exception_message)
def __init__(self, *args, **kwargs): self.db = kwargs.get('db') super(ZFSVolumeDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) self._execute=utils.execute r_helper=utils.get_root_helper() self.nova_client_call=client_call.NovaCmdExecute() self.iscsiobj=ZFS(root_helper=r_helper,*args, **kwargs) self.target_helper = self.iscsiobj.get_target_helper(self.db) self.targetbase=TargetIscsiBase(*args, **kwargs) self.hostname = socket.gethostname() self.backend_name =\ self.configuration.safe_get('volume_backend_name') or 'ZFS_HALF_iSCSI' self.poolname=self.configuration.safe_get('volume_pool_name') or '' self.volume_dd_bksize=self.configuration.safe_get('volume_dd_blocksize') if self.volume_dd_bksize.isdigit() is False: self.volume_dd_bksize='1M' else: if int(self.volume_dd_bksize) > 1000 or int(self.volume_dd_bksize) < 1024: self.volume_dd_bksize='1M' else: bksize=int(self.volume_dd_bksize) * 1024 self.volume_dd_bksize=bksize self.protocol = 'ZFS_iSCSI' self.private_keymgr_init() self.cmdcls=zfscmd.ZFSVolumeCMD(pool_name=self.poolname) self.configuration.zfspool=self.poolname self._stats = {} LOG.info('zfs driver_init_ok')
def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" volume['provider_location'] = ( self.create_export(context, volume, None)['provider_location']) connection_data = self.initialize_connection(volume, None)['data'] target_connector = ( connector.InitiatorConnector.factory(initiator.NVME, utils.get_root_helper())) try: device_info = target_connector.connect_volume(connection_data) except Exception: LOG.info('Could not connect SPDK target device') return connection_data['device_path'] = device_info['path'] try: image_utils.upload_volume(context, image_service, image_meta, device_info['path']) finally: target_connector.disconnect_volume(connection_data, volume)
def setUp(self): super(TestLioAdmDriver, self).setUp() self.configuration = conf.Configuration(None) self.configuration.append_config_values = mock.Mock(return_value=0) self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get) self.configuration.iscsi_ip_address = '10.9.8.7' self.fake_volumes_dir = '/tmp/tmpfile' self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:' self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba' self.fake_volume_id = '83c2e877-feed-46be-8435-77884fe55b45' with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.fake_iscsi_scan = ('iqn.2010-10.org.openstack:' 'volume-83c2e877-feed-46be-8435-77884fe55b45') self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'}) self.testvol =\ {'project_id': self.fake_project_id, 'name': 'volume-%s' % self.fake_volume_id, 'size': 1, 'id': self.fake_volume_id, 'volume_type_id': None, 'provider_location': '10.9.8.7:3260 ' 'iqn.2010-10.org.openstack:' 'volume-%s 0' % self.fake_volume_id, 'provider_auth': 'CHAP c76370d66b 2FE0CQ8J196R', 'provider_geometry': '512 512', 'created_at': timeutils.utcnow(), 'host': 'fake_host@lvm#lvm'}
def copy_volume_to_image(self, context, volume, image_service, image_meta): """Copy the volume to the specified image.""" # retrieve store information from extra-specs store_id = volume.volume_type.extra_specs.get('image_service:store_id') volume['provider_location'] = (self.create_export( context, volume, None)['provider_location']) connection_data = self.initialize_connection(volume, None)['data'] target_connector = (connector.InitiatorConnector.factory( initiator.NVME, utils.get_root_helper())) try: device_info = target_connector.connect_volume(connection_data) except Exception: LOG.info('Could not connect SPDK target device') return connection_data['device_path'] = device_info['path'] try: image_utils.upload_volume(context, image_service, image_meta, device_info['path'], store_id=store_id) finally: target_connector.disconnect_volume(connection_data, volume)
def get_target_helper(self, db): root_helper = utils.get_root_helper() if CONF.iser_helper == 'fake': return iscsi.FakeIscsiHelper() else: return iscsi.ISERTgtAdm(root_helper, CONF.volumes_dir, db=db)
def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" volume['provider_location'] = (self.create_export( context, volume, None)['provider_location']) connection_data = self.initialize_connection(volume, None)['data'] target_connector = (connector.InitiatorConnector.factory( initiator.NVME, utils.get_root_helper())) try: device_info = target_connector.connect_volume(connection_data) except Exception: LOG.info('Could not connect SPDK target device') return connection_data['device_path'] = device_info['path'] try: image_utils.fetch_to_raw(context, image_service, image_id, device_info['path'], self.configuration.volume_dd_blocksize, size=volume['size']) finally: target_connector.disconnect_volume(connection_data, volume)
def get_target_admin(self): root_helper = utils.get_root_helper() if CONF.iser_helper == "fake": return iscsi.FakeIscsiHelper() else: return iscsi.ISERTgtAdm(root_helper, CONF.volumes_dir)
def __init__(self, execute=putils.execute, *args, **kwargs): self._remotefsclient = None super(NfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(nfs_opts) root_helper = utils.get_root_helper() # base bound to instance is used in RemoteFsConnector. self.base = getattr(self.configuration, 'nfs_mount_point_base', CONF.nfs_mount_point_base) self.base = os.path.realpath(self.base) opts = getattr(self.configuration, 'nfs_mount_options', CONF.nfs_mount_options) nas_mount_options = getattr(self.configuration, 'nas_mount_options', None) if nas_mount_options is not None: LOG.debug('overriding nfs_mount_options with nas_mount_options') opts = nas_mount_options self._remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', root_helper, execute=execute, nfs_mount_point_base=self.base, nfs_mount_options=opts) self._sparse_copy_volume_data = True self.reserved_percentage = self.configuration.reserved_percentage self.max_over_subscription_ratio = ( self.configuration.max_over_subscription_ratio)
def test_brick_get_connector(self): root_helper = utils.get_root_helper() self.mox.StubOutClassWithMocks(connector, 'ISCSIConnector') connector.ISCSIConnector(execute=putils.execute, driver=None, root_helper=root_helper, use_multipath=False) self.mox.StubOutClassWithMocks(connector, 'FibreChannelConnector') connector.FibreChannelConnector(execute=putils.execute, driver=None, root_helper=root_helper, use_multipath=False) self.mox.StubOutClassWithMocks(connector, 'AoEConnector') connector.AoEConnector(execute=putils.execute, driver=None, root_helper=root_helper) self.mox.StubOutClassWithMocks(connector, 'LocalConnector') connector.LocalConnector(execute=putils.execute, driver=None, root_helper=root_helper) self.mox.ReplayAll() utils.brick_get_connector('iscsi') utils.brick_get_connector('fibre_channel') utils.brick_get_connector('aoe') utils.brick_get_connector('local') self.mox.VerifyAll()
def copy_image_to_volume(self, context, volume, image_service, image_id): """Fetch the image from image_service and write it to the volume.""" volume['provider_location'] = ( self.create_export(context, volume, None)['provider_location']) connection_data = self.initialize_connection(volume, None)['data'] target_connector = ( connector.InitiatorConnector.factory(initiator.NVME, utils.get_root_helper())) try: device_info = target_connector.connect_volume(connection_data) except Exception: LOG.info('Could not connect SPDK target device') return connection_data['device_path'] = device_info['path'] try: image_utils.fetch_to_raw(context, image_service, image_id, device_info['path'], self.configuration.volume_dd_blocksize, size=volume['size']) finally: target_connector.disconnect_volume(connection_data, volume)
def __init__(self, context): self.backup_mount_point_base = CONF.glusterfs_backup_mount_point self.backup_share = CONF.glusterfs_backup_share self._execute = putils.execute self._root_helper = utils.get_root_helper() backup_path = self._init_backup_repo_path() super().__init__(context, backup_path=backup_path)
def __init__(self, context, db=None): self.backup_mount_point_base = CONF.glusterfs_backup_mount_point self.backup_share = CONF.glusterfs_backup_share self._execute = putils.execute self._root_helper = utils.get_root_helper() backup_path = self._init_backup_repo_path() super(GlusterfsBackupDriver, self).__init__(context, backup_path=backup_path)
def setUp(self): super(TestIserLioAdmDriver, self).setUp() self.configuration.iscsi_protocol = 'iser' with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'})
def get_target_helper(self, db): root_helper = utils.get_root_helper() ##CONF.iscsi_helper == 'tgtadm': return iscsi.TgtAdm(root_helper, CONF.volumes_dir, CONF.iscsi_target_prefix, db=db)
def _init_backup_repo_path(self): remotefsclient = remotefs_brick.RemoteFsClient( 'nfs', utils.get_root_helper(), nfs_mount_point_base=self.backup_mount_point_base, nfs_mount_options=self.mount_options) remotefsclient.mount(self.backup_share) return remotefsclient.get_mount_point(self.backup_share)
def setUp(self): super(TestLioAdmDriver, self).setUp() with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'})
def __init__(self, *args, **kwargs): super(SRBDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(srb_opts) self.urls_setup = False self.backend_name = None self.base_urls = None self.root_helper = utils.get_root_helper() self._attached_devices = {}
def get_target_admin(self): root_helper = utils.get_root_helper() if CONF.iser_helper == 'fake': return iser.FakeIserHelper() else: return iser.TgtAdm(root_helper, CONF.volumes_dir)
def setUp(self): super(TestLioAdmDriver, self).setUp() with mock.patch.object(lio.LioAdm, '_verify_rtstool'): self.target = lio.LioAdm(root_helper=utils.get_root_helper(), configuration=self.configuration) self.fake_iscsi_scan = ('iqn.2010-10.org.openstack:' 'volume-83c2e877-feed-46be-8435-77884fe55b45') self.target.db = mock.MagicMock( volume_get=lambda x, y: {'provider_auth': 'IncomingUser foo bar'})
def _get_manageable_resource_info(self, cinder_resources, resource_type, marker, limit, offset, sort_keys, sort_dirs): entries = [] cinder_ids = [resource['id'] for resource in cinder_resources] root_helper = utils.get_root_helper() try: out, err = self._execute('zfs', 'list', '-r', '-H', '-p', '-t', resource_type, '-oname,volsize', self.zpool, root_helper=root_helper, run_as_root=True) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to zfs list, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException(data=exception_message) for entry in out.splitlines(): name, size = entry.strip().split('\t') if resource_type == 'volume': potential_id = volutils.extract_id_from_volume_name(name) else: potential_id = volutils.extract_id_from_snapshot_name(name) info = { 'reference': { 'source-name': name }, 'size': int(math.ceil(float(size) / units.Gi)), 'cinder_id': None, 'extra_info': None } if potential_id in cinder_ids: info['safe_to_manage'] = False info['reason_not_safe'] = 'already managed' info['cinder_id'] = potential_id else: info['safe_to_manage'] = True info['reason_not_safe'] = None if resource_type == 'snapshot': zpool, zvol, snapshot = name.replace('@', '/').split('/') info['source_reference'] = {'source-name': zvol} entries.append(info) return volutils.paginate_entries_list(entries, marker, limit, offset, sort_keys, sort_dirs)
def __init__(self, context, db_driver=None): self._check_configuration() self.backup_mount_point_base = CONF.glusterfs_backup_mount_point self.backup_share = CONF.glusterfs_backup_share self._execute = putils.execute self._root_helper = utils.get_root_helper() backup_path = self._init_backup_repo_path() super(GlusterfsBackupDriver, self).__init__(context, backup_path=backup_path)
def migrate_volume(self, ctxt, volume, host, thin=False): """Optimize the migration if the destination is on the same server. If the specified host is another back-end on the same server, and the volume is not attached, we can do the migration locally without going through iSCSI. """ false_ret = (False, None) if volume['status'] != 'available': return false_ret if 'location_info' not in host['capabilities']: return false_ret info = host['capabilities']['location_info'] try: (dest_type, dest_hostname, dest_zpool, zfs_type) =\ info.split(':') except ValueError: return false_ret if (dest_type != 'ZFSVolumeDriver' or dest_hostname != self.hostname): return false_ret if dest_zpool == self.zpool: message = (_("Refusing to migrate volume ID: %(id)s. Please " "check your configuration because source and " "destination are the same ZVOL: %(name)s.") % { 'id': volume['id'], 'name': self.zpool }) LOG.error(message) raise exception.VolumeBackendAPIException(data=message) root_helper = utils.get_root_helper() try: self._execute('zfs', 'list', dest_zpool, root_helper=root_helper, run_as_root=True) except processutils.ProcessExecutionError as exc: exception_message = (_("Destination ZFS Pool does not exist, " "error message was: %s") % six.text_type(exc.stderr)) raise exception.VolumeBackendAPIException(data=exception_message) try: self._execute('zfs-migrate', self._zfs_volume(volume, zpool=self.zpool), self._zfs_volume(volume, zpool=dest_zpool), root_helper=root_helper, run_as_root=True) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error( "Volume migration failed due to " "exception: %(reason)s.", {'reason': six.text_type(e)}, resource=volume) self.delete_volume(volume) return (True, None)
def __init__(self, context, db=None): self.backup_mount_point_base = CONF.backup_mount_point_base self.backup_share = CONF.backup_share self.mount_options = CONF.backup_mount_options self._execute = putils.execute self._root_helper = utils.get_root_helper() backup_path = self._init_backup_repo_path() LOG.debug("Using NFS backup repository: %s", backup_path) super(NFSBackupDriver, self).__init__(context, backup_path=backup_path)
def set_chap_authention(self, port, gid): ctl_no = port[0] port_no = port[1] unit = self.unit_name auth_username = self.conf.hitachi_auth_user auth_password = self.conf.hitachi_auth_password add_chap_user = self.conf.hitachi_add_chap_user assign_flag = True added_flag = False opt = '-unit %s -refer %s %s -user %s' % (unit, ctl_no, port_no, auth_username) ret, stdout, stderr = self.exec_hsnm('auchapuser', opt, noretry=True) if ret: if not add_chap_user: msg = basic_lib.output_err(643, user=auth_username) raise exception.HBSDError(message=msg) root_helper = utils.get_root_helper() cmd = ('%s env %s auchapuser -unit %s -add %s %s ' '-tno %d -user %s' % (root_helper, SNM2_ENV, unit, ctl_no, port_no, gid, auth_username)) LOG.debug('Add CHAP user') loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_add_chap_user, cmd, auth_username, auth_password, time.time()) added_flag = loop.start(interval=EXEC_INTERVAL).wait() else: lines = stdout.splitlines()[4:] for line in lines: if int(shlex.split(line)[0][0:3]) == gid: assign_flag = False break if assign_flag: opt = '-unit %s -assign %s %s -tno %d -user %s' % ( unit, ctl_no, port_no, gid, auth_username) ret, stdout, stderr = self.exec_hsnm('auchapuser', opt) if ret: if added_flag: _ret, _stdout, _stderr = self.delete_chap_user(port) if _ret: msg = basic_lib.set_msg(303, user=auth_username) LOG.warning(msg) msg = basic_lib.output_err(600, cmd='auchapuser', ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return added_flag
def check_for_setup_error(self): """Verify that requirements are in place to use LVM driver.""" # [MRA] we will not use this function. return if self.vg is None: root_helper = utils.get_root_helper() lvm_conf_file = self.configuration.lvm_conf_file if lvm_conf_file.lower() == 'none': lvm_conf_file = None # [MRA] Automatic redundancy control. vgname, phydev = self._create_initial_vg(self.configuration) try: self.vg = lvm.LVM(self.configuration.volume_group, root_helper, create_vg=True, physical_volumes=phydev, lvm_type=self.configuration.lvm_type, executor=self._execute, lvm_conf=lvm_conf_file) except brick_exception.VolumeGroupNotFound: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) vg_list = volutils.get_all_volume_groups( self.configuration.volume_group) vg_dict = \ (vg for vg in vg_list if vg['name'] == self.vg.vg_name).next() if vg_dict is None: message = (_("Volume Group %s does not exist") % self.configuration.volume_group) raise exception.VolumeBackendAPIException(data=message) if self.configuration.lvm_type == 'thin': # Specific checks for using Thin provisioned LV's if not volutils.supports_thin_provisioning(): message = _("Thin provisioning not supported " "on this version of LVM.") raise exception.VolumeBackendAPIException(data=message) pool_name = "%s-pool" % self.configuration.volume_group if self.vg.get_volume(pool_name) is None: try: self.vg.create_thin_pool(pool_name) except processutils.ProcessExecutionError as exc: exception_message = (_("Failed to create thin pool, " "error message was: %s") % exc.stderr) raise exception.VolumeBackendAPIException( data=exception_message)
def set_chap_authention(self, port, gid): ctl_no = port[0] port_no = port[1] unit = self.unit_name auth_username = self.conf.hitachi_auth_user auth_password = self.conf.hitachi_auth_password add_chap_user = self.conf.hitachi_add_chap_user assign_flag = True added_flag = False opt = "-unit %s -refer %s %s -user %s" % (unit, ctl_no, port_no, auth_username) ret, stdout, stderr = self.exec_hsnm("auchapuser", opt, noretry=True) if ret: if not add_chap_user: msg = basic_lib.output_err(643, user=auth_username) raise exception.HBSDError(message=msg) root_helper = utils.get_root_helper() cmd = "%s env %s auchapuser -unit %s -add %s %s " "-tno %d -user %s" % ( root_helper, SNM2_ENV, unit, ctl_no, port_no, gid, auth_username, ) LOG.debug("Add CHAP user") loop = loopingcall.FixedIntervalLoopingCall( self._wait_for_add_chap_user, cmd, auth_username, auth_password, time.time() ) added_flag = loop.start(interval=EXEC_INTERVAL).wait() else: lines = stdout.splitlines()[4:] for line in lines: if int(shlex.split(line)[0][0:3]) == gid: assign_flag = False break if assign_flag: opt = "-unit %s -assign %s %s -tno %d -user %s" % (unit, ctl_no, port_no, gid, auth_username) ret, stdout, stderr = self.exec_hsnm("auchapuser", opt) if ret: if added_flag: _ret, _stdout, _stderr = self.delete_chap_user(port) if _ret: msg = basic_lib.set_msg(303, user=auth_username) LOG.warning(msg) msg = basic_lib.output_err(600, cmd="auchapuser", ret=ret, out=stdout, err=stderr) raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr) return added_flag
def main(): objects.register_all() gmr_opts.set_defaults(CONF) CONF(sys.argv[1:], project='cinder', version=version.version_string()) logging.setup(CONF, "cinder") python_logging.captureWarnings(True) priv_context.init(root_helper=shlex.split(utils.get_root_helper())) utils.monkey_patch() gmr.TextGuruMeditation.setup_autorun(version, conf=CONF) launcher = service.get_launcher() LOG = logging.getLogger(__name__) service_started = False if CONF.enabled_backends: for backend in filter(None, CONF.enabled_backends): CONF.register_opt(host_opt, group=backend) backend_host = getattr(CONF, backend).backend_host host = "%s@%s" % (backend_host or CONF.host, backend) # We also want to set cluster to None on empty strings, and we # ignore leading and trailing spaces. cluster = CONF.cluster and CONF.cluster.strip() cluster = (cluster or None) and '%s@%s' % (cluster, backend) try: server = service.Service.create(host=host, service_name=backend, binary='cinder-volume', coordination=True, cluster=cluster) except Exception: msg = _('Volume service %s failed to start.') % host LOG.exception(msg) else: # Dispose of the whole DB connection pool here before # starting another process. Otherwise we run into cases where # child processes share DB connections which results in errors. session.dispose_engine() launcher.launch_service(server) service_started = True else: LOG.warning(_LW('Configuration for cinder-volume does not specify ' '"enabled_backends", using DEFAULT as backend. ' 'Support for DEFAULT section to configure drivers ' 'will be removed in the next release.')) server = service.Service.create(binary='cinder-volume', coordination=True, cluster=CONF.cluster) launcher.launch_service(server) service_started = True if not service_started: msg = _('No volume service(s) started successfully, terminating.') LOG.error(msg) sys.exit(1) launcher.wait()
def _update_available_physical_devices(self): """Filter out already-in-use devices""" root_helper = utils.get_root_helper() old_devlist = self.ref_physical_devices.split(',') new_devlist = lvm.LVM.filter_blkdev_in_use(root_helper, old_devlist) LOG.debug('[MRA] orig: %(old)s, filtered: %(new)s' % {'old': old_devlist, 'new': new_devlist}) # put back with original format. self.configuration.physical_devices = ','.join('{1}'.format(*k) for k in enumerate(new_devlist)) return len(new_devlist)
def _get_lvm_vg(self, volume, create_vg=False): # NOTE(joachim): One-device volume group to manage thin snapshots # Get origin volume name even for snapshots volume_name = self._get_volname(volume) physical_volumes = [self._device_path(volume)] with patched(lvm.LVM, 'activate_lv', self._activate_lv): return LVM(volume_name, utils.get_root_helper(), create_vg=create_vg, physical_volumes=physical_volumes, lvm_type='thin', executor=self._execute)
def __init__(self, execute=processutils.execute, *args, **kwargs): self._remotefsclient = None super(GlusterfsDriver, self).__init__(*args, **kwargs) self.configuration.append_config_values(volume_opts) root_helper = utils.get_root_helper() self.base = getattr(self.configuration, 'glusterfs_mount_point_base', CONF.glusterfs_mount_point_base) self._remotefsclient = remotefs_brick.RemoteFsClient( 'glusterfs', root_helper, execute, glusterfs_mount_point_base=self.base)