コード例 #1
0
    def copy_volume_to_image(self, context, volume, image_service, image_meta):
        if not os.path.exists(
                self.configuration.provider_image_conversion_dir):
            fileutils.ensure_tree(
                self.configuration.provider_image_conversion_dir)
        provider_volume_id = self._get_provider_volumeid_from_volume(volume)
        task_ret = self.adpter.export_volume(
            provider_volume_id,
            self.configuration.provider_image_conversion_dir,
            str(image_meta['id']),
            cgw_host_id=self.configuration.cgw_host_id,
            cgw_host_ip=self.configuration.cgw_host_ip,
            cgw_username=self.configuration.cgw_username,
            cgw_certificate=self.configuration.cgw_certificate,
            transfer_station=self.configuration.storage_tmp_dir)
        if not task_ret:
            raise exception_ex.ProviderExportVolumeError
        temp_path = os.path.join(
            self.configuration.provider_image_conversion_dir,
            str(image_meta['id']))
        upload_image = temp_path

        try:
            image_utils.upload_volume(context, image_service, image_meta,
                                      upload_image)
        finally:
            fileutils.delete_if_exists(upload_image)
コード例 #2
0
ファイル: scality.py プロジェクト: rahul4-jain/cinder
 def do_setup(self, context):
     """Any initialization the volume driver does while starting."""
     self._check_prerequisites()
     self._mount_sofs()
     voldir = os.path.join(self.configuration.scality_sofs_mount_point,
                           self.configuration.scality_sofs_volume_dir)
     fileutils.ensure_tree(voldir)
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn
            #              amically-allocating-and-destroying-mutexes
            sem = _semaphores.get(name, semaphore.Semaphore())
            if name not in _semaphores:
                # this check is not racy - we're already holding ref locally
                # so GC won't remove the item and there was no IO switch
                # (only valid in greenthreads)
                _semaphores[name] = sem

            with sem:
                LOG.debug(_('Got semaphore "%(lock)s" for method '
                            '"%(method)s"...'), {'lock': name,
                                                 'method': f.__name__})
                if external and not CONF.disable_process_locking:
                    LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
                                'method "%(method)s"...'),
                              {'lock': name, 'method': f.__name__})
                    cleanup_dir = False

                    # We need a copy of lock_path because it is non-local
                    local_lock_path = lock_path
                    if not local_lock_path:
                        local_lock_path = CONF.lock_path

                    if not local_lock_path:
                        cleanup_dir = True
                        local_lock_path = tempfile.mkdtemp()

                    if not os.path.exists(local_lock_path):
                        cleanup_dir = True
                        fileutils.ensure_tree(local_lock_path)

                    # NOTE(mikal): the lock name cannot contain directory
                    # separators
                    safe_name = name.replace(os.sep, '_')
                    lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
                    lock_file_path = os.path.join(local_lock_path,
                                                  lock_file_name)

                    try:
                        lock = InterProcessLock(lock_file_path)
                        with lock:
                            LOG.debug(_('Got file lock "%(lock)s" at %(path)s '
                                        'for method "%(method)s"...'),
                                      {'lock': name,
                                       'path': lock_file_path,
                                       'method': f.__name__})
                            retval = f(*args, **kwargs)
                    finally:
                        # NOTE(vish): This removes the tempdir if we needed
                        #             to create one. This is used to cleanup
                        #             the locks left behind by unit tests.
                        if cleanup_dir:
                            shutil.rmtree(local_lock_path)
                else:
                    retval = f(*args, **kwargs)

            return retval
コード例 #4
0
 def do_setup(self, context):
     """Any initialization the volume driver does while starting."""
     self._check_prerequisites()
     self._mount_sofs()
     voldir = os.path.join(self.configuration.scality_sofs_mount_point,
                           self.configuration.scality_sofs_volume_dir)
     fileutils.ensure_tree(voldir)
コード例 #5
0
    def setUp(self):
        super(TestIetAdmDriver, self).setUp()
        self.fake_volumes_dir = tempfile.mkdtemp()
        fileutils.ensure_tree(self.fake_volumes_dir)
        self.addCleanup(self._cleanup)

        self.exec_patcher = mock.patch.object(utils, 'execute')
        self.mock_execute = self.exec_patcher.start()
        self.addCleanup(self.exec_patcher.stop)
コード例 #6
0
ファイル: file.py プロジェクト: ziziwu/openstack
    def _ensure_ebs_exist(self):
        """Look for EBS storage mounted, and directroy created"""
        ebs_path = os.path.join(self.configuration.pofs_mount_point, self.configuration.ebs_volume_directory)
        for i in range(0, 100):
            fileutils.ensure_tree(os.path.join(ebs_path, str(i)))

        snap_path = os.path.join(self.configuration.pofs_mount_point, self.configuration.ebs_snapshot_directory)
        for i in range(0, 100):
            fileutils.ensure_tree(os.path.join(snap_path, str(i)))
コード例 #7
0
    def setUp(self):
        super(TestIetAdmDriver, self).setUp()
        self.fake_volumes_dir = tempfile.mkdtemp()
        fileutils.ensure_tree(self.fake_volumes_dir)
        self.addCleanup(self._cleanup)

        self.exec_patcher = mock.patch.object(utils, 'execute')
        self.mock_execute = self.exec_patcher.start()
        self.addCleanup(self.exec_patcher.stop)
コード例 #8
0
ファイル: file.py プロジェクト: ziziwu/openstack
 def delete_snapshot(self, snapshot_ref):
     """delete a snapshot"""
     snapshot = dict(snapshot_ref)
     snapshot_path = self._get_snapshot_path(snapshot['id'])
     if os.path.exists(snapshot_path):
         recycle = self.recycle_path()
         fileutils.ensure_tree(recycle)
         cmd = ('mv', snapshot_path, recycle)
         processutils.execute(*cmd, run_as_root=True)
     LOG.info("Deleted snapshot %s"%snapshot['id'])
コード例 #9
0
ファイル: scality.py プロジェクト: rahul4-jain/cinder
    def _mount_sofs(self):
        config = self.configuration.scality_sofs_config
        mount_path = self.configuration.scality_sofs_mount_point

        fileutils.ensure_tree(mount_path)
        if not self._sofs_is_mounted():
            self._execute('mount', '-t', 'sofs', config, mount_path,
                          run_as_root=True)
        if not self._sofs_is_mounted():
            msg = _("Cannot mount Scality SOFS, check syslog for errors")
            LOG.warning(msg)
            raise exception.VolumeBackendAPIException(data=msg)
コード例 #10
0
    def _mount_sofs(self):
        config = self.configuration.scality_sofs_config
        mount_path = self.configuration.scality_sofs_mount_point

        fileutils.ensure_tree(mount_path)
        if not self._sofs_is_mounted():
            self._execute('mount',
                          '-t',
                          'sofs',
                          config,
                          mount_path,
                          run_as_root=True)
        if not self._sofs_is_mounted():
            msg = _("Cannot mount Scality SOFS, check syslog for errors")
            LOG.warning(msg)
            raise exception.VolumeBackendAPIException(data=msg)
コード例 #11
0
ファイル: test_windows.py プロジェクト: skyniluyy/cinder
    def _test_copy_volume_to_image(self, supported_format):
        drv = self._driver

        vol = db_fakes.get_fake_volume_info()

        image_meta = db_fakes.get_fake_image_meta()

        fake_get_supported_format = lambda x: supported_format

        self.stubs.Set(os.path, 'exists', lambda x: False)
        self.stubs.Set(drv, 'local_path', self.fake_local_path)
        self.stubs.Set(windows_utils.WindowsUtils, 'get_supported_format',
                       fake_get_supported_format)

        self.mox.StubOutWithMock(fileutils, 'ensure_tree')
        self.mox.StubOutWithMock(fileutils, 'delete_if_exists')
        self.mox.StubOutWithMock(image_utils, 'upload_volume')
        self.mox.StubOutWithMock(windows_utils.WindowsUtils, 'copy_vhd_disk')
        self.mox.StubOutWithMock(vhdutils.VHDUtils, 'convert_vhd')

        fileutils.ensure_tree(CONF.image_conversion_dir)
        temp_vhd_path = os.path.join(CONF.image_conversion_dir,
                                     str(image_meta['id']) + "." +
                                     supported_format)
        upload_image = temp_vhd_path

        windows_utils.WindowsUtils.copy_vhd_disk(self.fake_local_path(vol),
                                                 temp_vhd_path)
        if supported_format == 'vhdx':
            upload_image = upload_image[:-1]
            vhdutils.VHDUtils.convert_vhd(temp_vhd_path, upload_image,
                                          constants.VHD_TYPE_DYNAMIC)

        image_utils.upload_volume(None, None, image_meta, upload_image, 'vhd')

        fileutils.delete_if_exists(temp_vhd_path)
        fileutils.delete_if_exists(upload_image)

        self.mox.ReplayAll()

        drv.copy_volume_to_image(None, vol, None, image_meta)
コード例 #12
0
ファイル: windows.py プロジェクト: shishirng/cinder
    def copy_volume_to_image(self, context, volume, image_service, image_meta):
        """Copy the volume to the specified image."""
        disk_format = self.utils.get_supported_format()
        if not os.path.exists(self.configuration.image_conversion_dir):
            fileutils.ensure_tree(self.configuration.image_conversion_dir)

        temp_vhd_path = os.path.join(self.configuration.image_conversion_dir, str(image_meta["id"]) + "." + disk_format)
        upload_image = temp_vhd_path

        try:
            self.utils.copy_vhd_disk(self.local_path(volume), temp_vhd_path)
            # qemu-img does not yet fully support vhdx format, so we'll first
            # convert the image to vhd before attempting upload
            if disk_format == "vhdx":
                upload_image = upload_image[:-1]
                self.vhdutils.convert_vhd(temp_vhd_path, upload_image, constants.VHD_TYPE_DYNAMIC)

            image_utils.upload_volume(context, image_service, image_meta, upload_image, "vhd")
        finally:
            fileutils.delete_if_exists(temp_vhd_path)
            fileutils.delete_if_exists(upload_image)
コード例 #13
0
ファイル: file.py プロジェクト: ziziwu/openstack
    def delete_volume(self, volume):
        """Deletes a logical volume."""

        if not volume['provider_location']:
            LOG.warn(_('Volume %s does not have provider_location specified, '
                     'skipping'), volume['name'])
            return

        mounted_path = self.local_path(volume)

        #if not self._path_exists(mounted_path):
        if not os.path.exists(mounted_path):
            volume = volume['name']

            LOG.warn(_('Trying to delete non-existing volume %(volume)s at '
                     'path %(mounted_path)s') % locals())
            return

        recycle = self.recycle_path()
        fileutils.ensure_tree(recycle)
        cmd = ('mv', mounted_path, recycle)
        processutils.execute(*cmd, run_as_root=True)
コード例 #14
0
ファイル: driver.py プロジェクト: arthur-wangfeng/basket
    def copy_volume_to_image(self, context, volume, image_service, image_meta):
        if not os.path.exists(self.configuration.provider_image_conversion_dir):
            fileutils.ensure_tree(self.configuration.provider_image_conversion_dir)
        provider_volume_id = self._get_provider_volumeid_from_volume(volume)
        task_ret = self.adpter.export_volume(provider_volume_id,
                                             self.configuration.provider_image_conversion_dir,
                                             str(image_meta['id']),
                                             cgw_host_id=self.configuration.cgw_host_id,
                                             cgw_host_ip=self.configuration.cgw_host_ip,
                                             cgw_username=self.configuration.cgw_username,
                                             cgw_certificate=self.configuration.cgw_certificate,
                                             transfer_station=self.configuration.storage_tmp_dir)
        if not task_ret:
            raise exception_ex.ProviderExportVolumeError
        temp_path = os.path.join(self.configuration.provider_image_conversion_dir, str(image_meta['id']))
        upload_image = temp_path

        try:
            image_utils.upload_volume(context, image_service, image_meta,
                                      upload_image)
        finally:
            fileutils.delete_if_exists(upload_image)
コード例 #15
0
ファイル: windows.py プロジェクト: skyniluyy/cinder
    def copy_volume_to_image(self, context, volume, image_service, image_meta):
        """Copy the volume to the specified image."""
        disk_format = self.utils.get_supported_format()
        if not os.path.exists(self.configuration.image_conversion_dir):
            fileutils.ensure_tree(self.configuration.image_conversion_dir)

        temp_vhd_path = os.path.join(self.configuration.image_conversion_dir,
                                     str(image_meta['id']) + '.' + disk_format)
        upload_image = temp_vhd_path

        try:
            self.utils.copy_vhd_disk(self.local_path(volume), temp_vhd_path)
            # qemu-img does not yet fully support vhdx format, so we'll first
            # convert the image to vhd before attempting upload
            if disk_format == 'vhdx':
                upload_image = upload_image[:-1]
                self.vhdutils.convert_vhd(temp_vhd_path, upload_image,
                                          constants.VHD_TYPE_DYNAMIC)

            image_utils.upload_volume(context, image_service, image_meta,
                                      upload_image, 'vhd')
        finally:
            fileutils.delete_if_exists(temp_vhd_path)
            fileutils.delete_if_exists(upload_image)
コード例 #16
0
    def create_iser_target(self, name, tid, lun, path,
                           chap_auth=None, **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        if chap_auth is None:
            volume_conf = """
                <target %s>
                    driver iser
                    backing-store %s
                </target>
            """ % (name, path)
        else:
            volume_conf = """
                <target %s>
                    driver iser
                    backing-store %s
                    %s
                </target>
            """ % (name, path, chap_auth)

        LOG.info(_('Creating iser_target for: %s') % vol_id)
        volume_path = os.path.join(self.volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(self.volumes_dir, old_name)

        try:
            (out, err) = self._execute('tgt-admin',
                                       '--update',
                                       name,
                                       run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(_("Failed to create iser target for volume "
                        "id:%(vol_id)s: %(e)s")
                      % {'vol_id': vol_id, 'e': str(e)})

            #Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISERTargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (self.iser_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_("Failed to create iser target for volume "
                        "id:%(vol_id)s. Please ensure your tgtd config file "
                        "contains 'include %(volumes_dir)s/*'") %
                      {'vol_id': vol_id, 'volumes_dir': self.volumes_dir})
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
    def setUp(self):
        super(TargetDriverFixture, self).setUp()
        self.configuration = conf.Configuration(None)
        self.configuration.append_config_values = mock.Mock(return_value=0)
        self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get)
        self.configuration.iscsi_ip_address = '10.9.8.7'
        self.configuration.iscsi_port = 3260

        self.fake_volumes_dir = tempfile.mkdtemp()
        fileutils.ensure_tree(self.fake_volumes_dir)

        self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
        self.fake_project_id_2 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
        self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'

        self.addCleanup(self._cleanup)

        self.testvol =\
            {'project_id': self.fake_project_id,
             'name': 'testvol',
             'size': 1,
             'id': self.fake_volume_id,
             'volume_type_id': None,
             'provider_location': '10.10.7.1:3260 '
                                  'iqn.2010-10.org.openstack:'
                                  'volume-%s 0' % self.fake_volume_id,
             'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
                              'c76370d66b 2FE0CQ8J196R',
             'provider_geometry': '512 512',
             'created_at': timeutils.utcnow(),
             'host': 'fake_host@lvm#lvm'}

        self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:'
        self.target_string = ('127.0.0.1:3260,1 ' + self.iscsi_target_prefix +
                              'volume-%s' % self.testvol['id'])

        self.testvol_2 =\
            {'project_id': self.fake_project_id_2,
             'name': 'testvol2',
             'size': 1,
             'id': self.fake_volume_id,
             'volume_type_id': None,
             'provider_location': ('%(ip)s:%(port)d%(iqn)svolume-%(vol)s 2' %
                                   {'ip': self.configuration.iscsi_ip_address,
                                    'port': self.configuration.iscsi_port,
                                    'iqn': self.iscsi_target_prefix,
                                    'vol': self.fake_volume_id}),
             'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
                              'c76370d66b 2FE0CQ8J196R',
             'provider_geometry': '512 512',
             'created_at': timeutils.utcnow(),
             'host': 'fake_host@lvm#lvm'}

        self.expected_iscsi_properties = \
            {'auth_method': 'CHAP',
             'auth_password': '******',
             'auth_username': '******',
             'encrypted': False,
             'logical_block_size': '512',
             'physical_block_size': '512',
             'target_discovered': False,
             'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
                           self.fake_volume_id,
             'target_lun': 0,
             'target_portal': '10.10.7.1:3260',
             'volume_id': self.fake_volume_id}

        self.VOLUME_ID = '83c2e877-feed-46be-8435-77884fe55b45'
        self.VOLUME_NAME = 'volume-' + self.VOLUME_ID
        self.test_vol = (self.iscsi_target_prefix + self.VOLUME_NAME)
コード例 #18
0
ファイル: driver.py プロジェクト: kevin-zhangsen/badam
    def copy_volume_to_image(self, context, volume, image_service, image_meta):
        LOG.error('begin time of copy_volume_to_image is %s' %
                  (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        container_format = image_meta.get('container_format')
        file_name = image_meta.get('id')
        if container_format in ['fs_vgw_url', 'vcloud_vgw_url', 'aws_vgw_url']:
            LOG.debug('get the vgw url')
            vgw_url = CONF.vgw.vgw_url.get(container_format)
            #vgw_url = 'http://162.3.125.52:9999/'
            volume_id = volume['id']

            #1.get the provider_volume at provider cloud
            provider_volume_id = self._get_provider_volumeid_from_volume(
                volume)
            if not provider_volume_id:
                LOG.error('get provider_volume_id of volume %s error' %
                          volume_id)
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
            provider_volume = self._get_provider_volume(provider_volume_id)
            if not provider_volume:
                LOG.error(
                    'get provider_volume of volume %s at provider cloud error'
                    % volume_id)
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)

            origin_provider_volume_state = provider_volume.extra.get(
                'attachment_status')
            origin_attach_node_id = None
            origin_device_name = None
            #2.judge if the volume is available
            if origin_provider_volume_state is not None:
                origin_attach_node_id = provider_volume.extra['instance_id']
                origin_device_name = provider_volume.extra['device']
                self.adpter.detach_volume(provider_volume)
                time.sleep(1)
                retry_time = 50
                provider_volume = self._get_provider_volume(provider_volume_id)
                while retry_time > 0:
                    if provider_volume and provider_volume.extra.get(
                            'attachment_status') is None:
                        break
                    else:
                        time.sleep(1)
                        provider_volume = self._get_provider_volume(
                            provider_volume_id)
                        retry_time = retry_time - 1
            #3.attach the volume to vgw host
            try:
                #3.1 get the vgw host
                vgw_host = self._get_provider_node(
                    self.configuration.cgw_host_id)
                if not vgw_host:
                    raise exception_ex.VgwHostNotFound(
                        Vgw_id=self.configuration.cgw_host_id)
                device_name = self._get_next_device_name(vgw_host)
                LOG.error('**********************************************')
                LOG.error('the volume status %s' % provider_volume.state)
                self.adpter.attach_volume(vgw_host, provider_volume,
                                          device_name)
                #query volume status
                time.sleep(1)
                retry_time = 120
                provider_volume = self._get_provider_volume(provider_volume_id)
                while retry_time > 0:
                    if provider_volume and provider_volume.extra.get(
                            'attachment_status') == 'attached':
                        break
                    else:
                        time.sleep(1)
                        provider_volume = self._get_provider_volume(
                            provider_volume_id)
                        retry_time = retry_time - 1

            except Exception as e:
                raise e
            time.sleep(5)
            conn = rpyc.connect(self.configuration.cgw_host_ip,
                                int(CONF.vgw.rpc_service_port))
            LOG.error('begin time of copy_volume_to_file is %s' %
                      (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            full_file_path = conn.root.copy_volume_to_file(
                device_name, file_name, CONF.vgw.store_file_dir)
            LOG.error('end time of copy_volume_to_image is %s' %
                      (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            #todo exception occured clean env
            if not full_file_path:
                self.adpter.detach_volume(provider_volume)
                conn.close()
                raise exception_ex.ProviderExportVolumeError(
                    volume_id=volume_id)
            LOG.error('begin time of push_file_to_vgw is %s' %
                      (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            push_file_result = conn.root.exposed_push_file_to_vgw(
                full_file_path, vgw_url)
            LOG.error('end time of push_file_to_vgw is %s' %
                      (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            if not push_file_result:
                LOG.error('post file file %s  to %s failed' %
                          (push_file_result, vgw_url))
                self.adpter.detach_volume(provider_volume)
                conn.close()
                raise exception_ex.ProviderExportVolumeError(
                    volume_id=volume_id)
            conn.close()
            #create a empty file to glance
            with image_utils.temporary_file() as tmp:
                image_utils.upload_volume(context, image_service, image_meta,
                                          tmp)
            fileutils.delete_if_exists(tmp)
            #4.detach form vgw
            self.adpter.detach_volume(provider_volume)
            time.sleep(1)
            retry_time = 120
            provider_volume = self._get_provider_volume(provider_volume_id)
            while retry_time > 0:
                if provider_volume and provider_volume.extra.get(
                        'attachment_status') is None:
                    break
                else:
                    time.sleep(1)
                    provider_volume = self._get_provider_volume(
                        provider_volume_id)
                    retry_time = retry_time - 1
            LOG.error('**********************************************')
            LOG.error('the volume status %s' % provider_volume.state)
            #attach the volume back
            if origin_provider_volume_state is not None:
                origin_attach_node = self._get_provider_node(
                    origin_attach_node_id)

                self.adpter.attach_volume(origin_attach_node, provider_volume,
                                          origin_device_name)

        else:
            if not os.path.exists(
                    self.configuration.provider_image_conversion_dir):
                fileutils.ensure_tree(
                    self.configuration.provider_image_conversion_dir)
            provider_volume_id = self._get_provider_volumeid_from_volume(
                volume)
            task_ret = self.adpter.export_volume(
                provider_volume_id,
                self.configuration.provider_image_conversion_dir,
                str(image_meta['id']),
                cgw_host_id=self.configuration.cgw_host_id,
                cgw_host_ip=self.configuration.cgw_host_ip,
                cgw_username=self.configuration.cgw_username,
                cgw_certificate=self.configuration.cgw_certificate,
                transfer_station=self.configuration.storage_tmp_dir)
            if not task_ret:
                raise exception_ex.ProviderExportVolumeError
            temp_path = os.path.join(
                self.configuration.provider_image_conversion_dir,
                str(image_meta['id']))
            upload_image = temp_path

            try:
                image_utils.upload_volume(context, image_service, image_meta,
                                          upload_image)
            finally:
                fileutils.delete_if_exists(upload_image)
        LOG.error('end time of copy_volume_to_image is %s' %
                  (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
コード例 #19
0
ファイル: iscsi.py プロジェクト: sachsgiri/cinder
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = kwargs.get('write_cache', 'on')
        if chap_auth is None:
            volume_conf = self.VOLUME_CONF % (name, path, write_cache)
        else:
            volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name,
                                                             path, chap_auth,
                                                             write_cache)

        LOG.info(_('Creating iscsi_target for: %s') % vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug('Created volume path %(vp)s,\n'
                  'content: %(vc)s'
                  % {'vp': volume_path, 'vc': volume_conf})

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # with the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = self._execute('tgt-admin', '--update', name,
                                       run_as_root=True)
            LOG.debug("StdOut from tgt-admin --update: %s", out)
            LOG.debug("StdErr from tgt-admin --update: %s", err)

            # Grab targets list for debug
            # Consider adding a check for lun 0 and 1 for tgtadm
            # before considering this as valid
            (out, err) = self._execute('tgtadm',
                                       '--lld',
                                       'iscsi',
                                       '--op',
                                       'show',
                                       '--mode',
                                       'target',
                                       run_as_root=True)
            LOG.debug("Targets after update: %s" % out)
        except putils.ProcessExecutionError as e:
            LOG.warning(_("Failed to create iscsi target for volume "
                        "id:%(vol_id)s: %(e)s")
                        % {'vol_id': vol_id, 'e': e})

            #Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_("Failed to create iscsi target for volume "
                        "id:%(vol_id)s. Please ensure your tgtd config file "
                        "contains 'include %(volumes_dir)s/*'") % {
                      'vol_id': vol_id,
                      'volumes_dir': volumes_dir, })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
コード例 #20
0
ファイル: cxt.py プロジェクト: vefimova/cinder
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):

        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        volumes_dir = self._get_volumes_dir()
        fileutils.ensure_tree(volumes_dir)

        vol_id = name.split(':')[1]

        if netutils.is_valid_ipv4(self.configuration.iscsi_ip_address):
            portal = "%s:%s" % (self.configuration.iscsi_ip_address,
                                self.configuration.iscsi_port)
        else:
            # ipv6 addresses use [ip]:port format, ipv4 use ip:port
            portal = "[%s]:%s" % (self.configuration.iscsi_ip_address,
                                  self.configuration.iscsi_port)

        if chap_auth is None:
            volume_conf = self.TARGET_FMT % (name, path, portal)
        else:
            volume_conf = self.TARGET_FMT_WITH_CHAP % (name,
                                                       path, portal,
                                                       '"%s":"%s"' % chap_auth)
        LOG.debug('Creating iscsi_target for: %s', vol_id)
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(_LW('Persistence file already exists for volume, '
                            'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug('Created volume path %(vp)s,\n'
                  'content: %(vc)s',
                  {'vp': volume_path, 'vc': volume_conf})

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name:
            LOG.debug('Detected old persistence file for volume '
                      '%{vol}s at %{old_name}s',
                      {'vol': vol_id, 'old_name': old_name})
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = utils.execute('iscsictl', '-S', 'target=%s' % name,
                                       '-f', volume_path,
                                       '-x', self.config,
                                       run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(_LE("Failed to create iscsi target for volume "
                          "id:%(vol_id)s: %(e)s"),
                      {'vol_id': vol_id, 'e': e})

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
        finally:
            LOG.debug("StdOut from iscsictl -S: %s", out)
            LOG.debug("StdErr from iscsictl -S: %s", err)

        # Grab targets list for debug
        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_LE("Failed to create iscsi target for volume "
                          "id:%(vol_id)s. Please verify your configuration "
                          "in %(volumes_dir)'"), {
                      'vol_id': vol_id,
                      'volumes_dir': volumes_dir, })
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
コード例 #21
0
ファイル: driver.py プロジェクト: kevin-zhangsen/badam
    def copy_volume_to_image(self, context, volume, image_service, image_meta): 
        LOG.error('begin time of copy_volume_to_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        container_format=image_meta.get('container_format')
        file_name=image_meta.get('id')
        if container_format in ['fs_vgw_url','vcloud_vgw_url','aws_vgw_url']:
            LOG.debug('get the vgw url')
            vgw_url = CONF.vgw.vgw_url.get(container_format)
            #vgw_url = 'http://162.3.125.52:9999/'
            volume_id = volume['id']
 
            #1.get the provider_volume at provider cloud  
            provider_volume_id = self._get_provider_volumeid_from_volume(volume)
            if not provider_volume_id:
                LOG.error('get provider_volume_id of volume %s error' % volume_id) 
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
            provider_volume=self._get_provider_volume(provider_volume_id)
            if not provider_volume:
                LOG.error('get provider_volume of volume %s at provider cloud error' % volume_id) 
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
            
            origin_provider_volume_state= provider_volume.extra.get('attachment_status')
            origin_attach_node_id = None
            origin_device_name=None
            #2.judge if the volume is available
            if origin_provider_volume_state is not None:
                origin_attach_node_id = provider_volume.extra['instance_id']
                origin_device_name = provider_volume.extra['device']
                self.adpter.detach_volume(provider_volume)
                time.sleep(1)
                retry_time = 50
                provider_volume=self._get_provider_volume(provider_volume_id)
                while retry_time > 0:
                    if provider_volume and provider_volume.extra.get('attachment_status') is None:
                        break
                    else:
                        time.sleep(1)
                        provider_volume=self._get_provider_volume(provider_volume_id)
                        retry_time = retry_time-1
            #3.attach the volume to vgw host
            try:
                #3.1 get the vgw host
                vgw_host= self._get_provider_node(self.configuration.cgw_host_id)
                if not vgw_host:
                    raise exception_ex.VgwHostNotFound(Vgw_id=self.configuration.cgw_host_id)
                device_name=self._get_next_device_name(vgw_host)
                LOG.error('**********************************************')
                LOG.error('the volume status %s' %provider_volume.state)
                self.adpter.attach_volume(vgw_host, provider_volume,
                                       device_name)
                #query volume status
                time.sleep(1)
                retry_time = 120
                provider_volume=self._get_provider_volume(provider_volume_id)
                while retry_time > 0:
                    if provider_volume and provider_volume.extra.get('attachment_status') =='attached':
                        break
                    else:
                        time.sleep(1)
                        provider_volume=self._get_provider_volume(provider_volume_id)
                        retry_time = retry_time-1
                
            except Exception as e:
                raise e
            time.sleep(5)           
            conn=rpyc.connect(self.configuration.cgw_host_ip,int(CONF.vgw.rpc_service_port))
            LOG.error('begin time of copy_volume_to_file is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            full_file_path = conn.root.copy_volume_to_file(device_name,file_name,CONF.vgw.store_file_dir)
            LOG.error('end time of copy_volume_to_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            #todo exception occured clean env
            if not full_file_path:
                self.adpter.detach_volume(provider_volume)
                conn.close()
                raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
            LOG.error('begin time of push_file_to_vgw is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            push_file_result =conn.root.exposed_push_file_to_vgw(full_file_path,vgw_url)
            LOG.error('end time of push_file_to_vgw is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            if not push_file_result:
                LOG.error('post file file %s  to %s failed' %(push_file_result,vgw_url))
                self.adpter.detach_volume(provider_volume)
                conn.close()
                raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
            conn.close()
            #create a empty file to glance
            with image_utils.temporary_file() as tmp:
                image_utils.upload_volume(context,
                                          image_service,
                                          image_meta,
                                          tmp)
            fileutils.delete_if_exists(tmp)
            #4.detach form vgw
            self.adpter.detach_volume(provider_volume)
            time.sleep(1)
            retry_time = 120
            provider_volume=self._get_provider_volume(provider_volume_id)
            while retry_time > 0:
                if provider_volume and provider_volume.extra.get('attachment_status') is None:
                    break
                else:
                    time.sleep(1)
                    provider_volume=self._get_provider_volume(provider_volume_id)
                    retry_time = retry_time-1
            LOG.error('**********************************************')
            LOG.error('the volume status %s' %provider_volume.state)       
            #attach the volume back         
            if origin_provider_volume_state is not None:
                origin_attach_node = self._get_provider_node(origin_attach_node_id)
                 
                self.adpter.attach_volume(origin_attach_node, provider_volume,
                                           origin_device_name)
                
        else:
            if not os.path.exists(self.configuration.provider_image_conversion_dir):
                fileutils.ensure_tree(self.configuration.provider_image_conversion_dir)
            provider_volume_id = self._get_provider_volumeid_from_volume(volume)
            task_ret = self.adpter.export_volume(provider_volume_id,
                                                 self.configuration.provider_image_conversion_dir,
                                                 str(image_meta['id']),
                                                 cgw_host_id=self.configuration.cgw_host_id,
                                                 cgw_host_ip=self.configuration.cgw_host_ip,
                                                 cgw_username=self.configuration.cgw_username,
                                                 cgw_certificate=self.configuration.cgw_certificate,
                                                 transfer_station=self.configuration.storage_tmp_dir)
            if not task_ret:
                raise exception_ex.ProviderExportVolumeError
            temp_path = os.path.join(self.configuration.provider_image_conversion_dir, str(image_meta['id']))
            upload_image = temp_path
    
            try:
                image_utils.upload_volume(context, image_service, image_meta,
                                          upload_image)
            finally:
                fileutils.delete_if_exists(upload_image)
        LOG.error('end time of copy_volume_to_image is %s' %(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
コード例 #22
0
ファイル: iscsi.py プロジェクト: NxtCloud/cinder
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = kwargs.get('write_cache', 'on')
        if chap_auth is None:
            volume_conf = self.VOLUME_CONF % (name, path, write_cache)
        else:
            chap_str = re.sub('^IncomingUser ', 'incominguser ', chap_auth)
            volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name,
                                                             path, chap_str,
                                                             write_cache)

        LOG.info(_LI('Creating iscsi_target for: %s'), vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug('Created volume path %(vp)s,\n'
                  'content: %(vc)s',
                  {'vp': volume_path, 'vc': volume_conf})

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # with the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = self._run('tgt-admin', '--update', name,
                                   run_as_root=True)

            # Grab targets list for debug
            # Consider adding a check for lun 0 and 1 for tgtadm
            # before considering this as valid
            (out, err) = self._run('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
            LOG.debug("Targets after update: %s", out)
        except putils.ProcessExecutionError as e:
            LOG.warning(_LW("Failed to create iscsi target for volume "
                            "id:%(vol_id)s: %(e)s"),
                        {'vol_id': vol_id, 'e': e.stderr})
            if "target already exists" in e.stderr:

                LOG.warning(_LW('Create iscsi target failed for '
                                'target already exists'))
                # NOTE(jdg):  We've run into some cases where the cmd being
                # sent was not correct.  May be related to using the
                # executor direclty?
                # Adding the additional Warning message above to provide
                # a very cleary marker for ER, and if the tgt exists let's
                # just try and use it and move along.
                # Ref bug: #1398078
                pass
            else:
                # Don't forget to remove the persistent file we created
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_LE("Failed to create iscsi target for Volume "
                          "ID: %(vol_id)s. Ensure the tgtd config file "
                          "contains 'include %(volumes_dir)s/*'"), {
                      'vol_id': vol_id, 'volumes_dir': volumes_dir, })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
コード例 #23
0
    def copy_volume_to_image(self, context, volume, image_service, image_meta): 
        
        container_format=image_meta.get('container_format')
        #if container_format in ['az01_vgw_url','az11_vgw_url','az31_vgw_url']:
        if True:
            #vgw_url = self.configuration.container_format
            vgw_url = 'http://162.3.114.62:9999/'
            volume_id = volume['id']
 
            #1.get the provider_volume at provider cloud  
            provider_volume_id = self._get_provider_volumeid_from_volume(volume)
            if not provider_volume_id:
                LOG.error('get provider_volume_id of volume %s error' % volume_id) 
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
            provider_volume=self._get_provider_volume(provider_volume_id)
            if not provider_volume:
                LOG.error('get provider_volume of volume %s at provider cloud error' % volume_id) 
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
            
            origin_provider_volume_state= provider_volume.state
            origin_attach_node_id = None
            origin_device_name=None
            #2.judge if the volume is available
            if provider_volume.state != StorageVolumeState.AVAILABLE:
                origin_attach_node_id = provider_volume.extra['instance_id']
                origin_device_name = provider_volume.extra['device']
                self.adpter.detach_volume(provider_volume)
                time.sleep(1)
                retry_time = 10
                provider_volume=self._get_provider_volume(provider_volume_id)
                while retry_time > 0:
                    if provider_volume and provider_volume.state == StorageVolumeState.AVAILABLE:
                        break
                    else:
                        time.sleep(1)
                        provider_volume=self._get_provider_volume(provider_volume_id)
                        retry_time = retry_time-1
            #3.attach the volume to vgw host
            try:
                #3.1 get the vgw host
                vgw_host= self._get_provider_node(self.configuration.cgw_host_id)
                if not vgw_host:
                    raise exception_ex.VgwHostNotFound(Vgw_id=self.configuration.cgw_host_id)
                device_name=self._get_next_device_name(vgw_host)
                self.compute_adapter.attach_volume(vgw_host, provider_volume,
                                       device_name)
                #query volume status
                time.sleep(1)
                retry_time = 10
                provider_volume=self._get_provider_volume(provider_volume_id)
                while retry_time > 0:
                    if provider_volume and provider_volume.state == StorageVolumeState.INUSE:
                        break
                    else:
                        time.sleep(1)
                        provider_volume=self._get_provider_volume(provider_volume_id)
                        retry_time = retry_time-1
                
            except Exception as e:
                raise e
           
            conn=rpyc.connect(self.configuration.cgw_host_ip,'1111')
            full_file_path = conn.root.copy_volume_to_file(device_name,volume_id)
            if not full_file_path:
                raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
            push_file_result =conn.root.exposed_push_file_to_vgw(full_file_path,vgw_url)
            if not push_file_result:
                LOG.error('post file file %s  to %s failed' %(push_file_result,vgw_url))
                raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
            conn.close()
            image_utils.upload_volume(context,
                                      image_service,
                                      image_meta,
                                      "/home/upload/volume-empty")
            #4.detach form vgw
            self.adpter.detach_volume(provider_volume)
            time.sleep(1)
            retry_time = 10
            provider_volume=self._get_provider_volume(provider_volume_id)
            while retry_time > 0:
                if provider_volume and provider_volume.state == StorageVolumeState.AVAILABLE:
                    break
                else:
                    time.sleep(1)
                    provider_volume=self._get_provider_volume(provider_volume_id)
                    retry_time = retry_time-1
                    
            #attach the volume back         
#             if origin_provider_volume_state != StorageVolumeState.AVAILABLE:
#                 origin_attach_node = self._get_provider_node(origin_attach_node_id)
#                 
#                 self.adapter.attach_volume(origin_attach_node, provider_volume,
#                                            origin_device_name)
                
        else:
            if not os.path.exists(self.configuration.provider_image_conversion_dir):
                fileutils.ensure_tree(self.configuration.provider_image_conversion_dir)
            provider_volume_id = self._get_provider_volumeid_from_volume(volume)
            task_ret = self.adpter.export_volume(provider_volume_id,
                                                 self.configuration.provider_image_conversion_dir,
                                                 str(image_meta['id']),
                                                 cgw_host_id=self.configuration.cgw_host_id,
                                                 cgw_host_ip=self.configuration.cgw_host_ip,
                                                 cgw_username=self.configuration.cgw_username,
                                                 cgw_certificate=self.configuration.cgw_certificate,
                                                 transfer_station=self.configuration.storage_tmp_dir)
            if not task_ret:
                raise exception_ex.ProviderExportVolumeError
            temp_path = os.path.join(self.configuration.provider_image_conversion_dir, str(image_meta['id']))
            upload_image = temp_path
    
            try:
                image_utils.upload_volume(context, image_service, image_meta,
                                          upload_image)
            finally:
                fileutils.delete_if_exists(upload_image)
コード例 #24
0
ファイル: driver.py プロジェクト: kevin-zhangsen/badam
    def copy_volume_to_image(self, context, volume, image_service, image_meta):
        LOG.error("begin time of copy_volume_to_image is %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
        container_format = image_meta.get("container_format")
        image_name = image_meta.get("name")
        file_name = image_meta.get("id")
        if container_format == "vgw_url":
            LOG.debug("get the vgw url")
            # vgw_url = CONF.vgw.vgw_url.get(container_format)
            kwargs = {
                "auth_url": CONF.keystone_authtoken.keystone_auth_url,
                "tenant_name": CONF.keystone_authtoken.tenant_name,
                "username": CONF.keystone_authtoken.user_name,
                "password": CONF.keystone_authtoken.admin_password,
                "insecure": True,
            }
            keystoneclient = kc.Client(**kwargs)

            vgw_url = self._get_management_url(keystoneclient, image_name, service_type="v2v")

            # vgw_url = 'http://162.3.125.52:9999/'
            volume_id = volume["id"]

            # 1.get the provider_volume at provider cloud
            provider_volume_id = self._get_provider_volumeid_from_volume(volume)
            if not provider_volume_id:
                LOG.error("get provider_volume_id of volume %s error" % volume_id)
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)
            provider_volume = self._get_provider_volume(provider_volume_id)
            if not provider_volume:
                LOG.error("get provider_volume of volume %s at provider cloud error" % volume_id)
                raise exception_ex.ProviderVolumeNotFound(volume_id=volume_id)

            origin_provider_volume_state = provider_volume.extra.get("attachment_status")

            LOG.error("the origin_provider_volume_info is %s" % str(provider_volume.__dict__))
            origin_attach_node_id = None
            origin_device_name = None
            # 2.judge if the volume is available
            if origin_provider_volume_state is not None:
                origin_attach_node_id = provider_volume.extra["instance_id"]
                origin_device_name = provider_volume.extra["device"]
                self.adpter.detach_volume(provider_volume)
                time.sleep(1)
                retry_time = 90
                provider_volume = self._get_provider_volume(provider_volume_id)
                LOG.error("the after detach _volume_info is %s" % str(provider_volume.__dict__))
                while retry_time > 0:
                    if provider_volume and provider_volume.extra.get("attachment_status") is None:
                        break
                    else:
                        time.sleep(2)
                        provider_volume = self._get_provider_volume(provider_volume_id)
                        LOG.error(
                            "the after detach _volume_info is %s,the retry_time is %s"
                            % (str(provider_volume.__dict__), str(retry_time))
                        )
                        retry_time = retry_time - 1
            # 3.attach the volume to vgw host
            try:
                # 3.1 get the vgw host
                vgw_host = self._get_provider_node(self.configuration.cgw_host_id)
                if not vgw_host:
                    raise exception_ex.VgwHostNotFound(Vgw_id=self.configuration.cgw_host_id)
                device_name = self._get_next_device_name(vgw_host)
                LOG.error("**********************************************")
                LOG.error("the volume status %s" % provider_volume.state)
                self.adpter.attach_volume(vgw_host, provider_volume, device_name)
                # query volume status
                time.sleep(1)
                retry_time = 120
                provider_volume = self._get_provider_volume(provider_volume_id)
                while retry_time > 0:
                    if provider_volume and provider_volume.extra.get("attachment_status") == "attached":
                        break
                    else:
                        time.sleep(2)
                        provider_volume = self._get_provider_volume(provider_volume_id)
                        retry_time = retry_time - 1

            except Exception as e:
                raise e
            time.sleep(5)
            conn = rpyc.connect(self.configuration.cgw_host_ip, int(CONF.vgw.rpc_service_port))
            LOG.error(
                "begin time of copy_volume_to_file is %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
            )
            full_file_path = conn.root.copy_volume_to_file(device_name, file_name, CONF.vgw.store_file_dir)
            LOG.error("end time of copy_volume_to_image is %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            # todo exception occured clean env
            if not full_file_path:
                self.adpter.detach_volume(provider_volume)
                conn.close()
                raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
            LOG.error("begin time of push_file_to_vgw is %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            push_file_result = conn.root.exposed_push_file_to_vgw(full_file_path, vgw_url)
            LOG.error("end time of push_file_to_vgw is %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
            if not push_file_result:
                LOG.error("post file file %s  to %s failed" % (push_file_result, vgw_url))
                self.adpter.detach_volume(provider_volume)
                conn.close()
                raise exception_ex.ProviderExportVolumeError(volume_id=volume_id)
            conn.close()
            # create a empty file to glance
            with image_utils.temporary_file() as tmp:
                image_utils.upload_volume(context, image_service, image_meta, tmp)
            fileutils.delete_if_exists(tmp)
            # 4.detach form vgw
            self.adpter.detach_volume(provider_volume)
            time.sleep(1)
            retry_time = 120
            provider_volume = self._get_provider_volume(provider_volume_id)
            while retry_time > 0:
                if provider_volume and provider_volume.extra.get("attachment_status") is None:
                    break
                else:
                    time.sleep(2)
                    provider_volume = self._get_provider_volume(provider_volume_id)
                    retry_time = retry_time - 1
            LOG.error("**********************************************")
            LOG.error("the volume status %s" % provider_volume.state)
            # attach the volume back
            if origin_provider_volume_state is not None:
                origin_attach_node = self._get_provider_node(origin_attach_node_id)

                self.adpter.attach_volume(origin_attach_node, provider_volume, origin_device_name)

        else:
            if not os.path.exists(self.configuration.provider_image_conversion_dir):
                fileutils.ensure_tree(self.configuration.provider_image_conversion_dir)
            provider_volume_id = self._get_provider_volumeid_from_volume(volume)
            task_ret = self.adpter.export_volume(
                provider_volume_id,
                self.configuration.provider_image_conversion_dir,
                str(image_meta["id"]),
                cgw_host_id=self.configuration.cgw_host_id,
                cgw_host_ip=self.configuration.cgw_host_ip,
                cgw_username=self.configuration.cgw_username,
                cgw_certificate=self.configuration.cgw_certificate,
                transfer_station=self.configuration.storage_tmp_dir,
            )
            if not task_ret:
                raise exception_ex.ProviderExportVolumeError
            temp_path = os.path.join(self.configuration.provider_image_conversion_dir, str(image_meta["id"]))
            upload_image = temp_path

            try:
                image_utils.upload_volume(context, image_service, image_meta, upload_image)
            finally:
                fileutils.delete_if_exists(upload_image)
        LOG.error("end time of copy_volume_to_image is %s" % (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
コード例 #25
0
        def inner(*args, **kwargs):
            # NOTE(soren): If we ever go natively threaded, this will be racy.
            #              See http://stackoverflow.com/questions/5390569/dyn
            #              amically-allocating-and-destroying-mutexes
            sem = _semaphores.get(name, semaphore.Semaphore())
            if name not in _semaphores:
                # this check is not racy - we're already holding ref locally
                # so GC won't remove the item and there was no IO switch
                # (only valid in greenthreads)
                _semaphores[name] = sem

            with sem:
                LOG.debug(
                    _('Got semaphore "%(lock)s" for method '
                      '"%(method)s"...'), {
                          'lock': name,
                          'method': f.__name__
                      })

                # NOTE(mikal): I know this looks odd
                if not hasattr(local.strong_store, 'locks_held'):
                    local.strong_store.locks_held = []
                local.strong_store.locks_held.append(name)

                try:
                    if external and not CONF.disable_process_locking:
                        LOG.debug(
                            _('Attempting to grab file lock "%(lock)s" '
                              'for method "%(method)s"...'), {
                                  'lock': name,
                                  'method': f.__name__
                              })
                        cleanup_dir = False

                        # We need a copy of lock_path because it is non-local
                        local_lock_path = lock_path
                        if not local_lock_path:
                            local_lock_path = CONF.lock_path

                        if not local_lock_path:
                            cleanup_dir = True
                            local_lock_path = tempfile.mkdtemp()

                        if not os.path.exists(local_lock_path):
                            fileutils.ensure_tree(local_lock_path)

                        # NOTE(mikal): the lock name cannot contain directory
                        # separators
                        safe_name = name.replace(os.sep, '_')
                        lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
                        lock_file_path = os.path.join(local_lock_path,
                                                      lock_file_name)

                        try:
                            lock = InterProcessLock(lock_file_path)
                            with lock:
                                LOG.debug(
                                    _('Got file lock "%(lock)s" at '
                                      '%(path)s for method '
                                      '"%(method)s"...'), {
                                          'lock': name,
                                          'path': lock_file_path,
                                          'method': f.__name__
                                      })
                                retval = f(*args, **kwargs)
                        finally:
                            LOG.debug(
                                _('Released file lock "%(lock)s" at '
                                  '%(path)s for method "%(method)s"...'), {
                                      'lock': name,
                                      'path': lock_file_path,
                                      'method': f.__name__
                                  })
                            # NOTE(vish): This removes the tempdir if we needed
                            #             to create one. This is used to
                            #             cleanup the locks left behind by unit
                            #             tests.
                            if cleanup_dir:
                                shutil.rmtree(local_lock_path)
                    else:
                        retval = f(*args, **kwargs)

                finally:
                    local.strong_store.locks_held.remove(name)

            return retval
コード例 #26
0
ファイル: tgt.py プロジェクト: BharatKumarK/cinder
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):

        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
        # for now, since we intermittently hit target already exists we're
        # adding some debug info to try and pinpoint what's going on
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = self.configuration.get('iscsi_write_cache', 'on')
        driver = self.iscsi_protocol

        if chap_auth is None:
            volume_conf = self.VOLUME_CONF % (name, path, driver, write_cache)
        else:
            chap_str = 'incominguser %s %s' % chap_auth
            volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name, path,
                                                             driver, chap_str,
                                                             write_cache)
        LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(_LW('Persistence file already exists for volume, '
                            'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug(('Created volume path %(vp)s,\n'
                   'content: %(vc)s'),
                  {'vp': volume_path, 'vc': volume_conf})

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            LOG.debug('Detected old persistence file for volume '
                      '%{vol}s at %{old_name}s',
                      {'vol': vol_id, 'old_name': old_name})
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.

            self._do_tgt_update(name)
        except putils.ProcessExecutionError as e:
            if "target already exists" in e.stderr:
                # Adding the additional Warning message below for a clear
                # ER marker (Ref bug: #1398078).
                LOG.warning(_LW('Could not create target because '
                                'it already exists for volume: %s'), vol_id)
                LOG.debug('Exception was: %s', e)

            else:
                LOG.error(_LE("Failed to create iscsi target for Volume "
                              "ID: %(vol_id)s: %(e)s"),
                          {'vol_id': vol_id, 'e': e})

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        # Grab targets list for debug
        # Consider adding a check for lun 0 and 1 for tgtadm
        # before considering this as valid
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_LE("Failed to create iscsi target for Volume "
                          "ID: %(vol_id)s. Please ensure your tgtd config "
                          "file contains 'include %(volumes_dir)s/*'"), {
                      'vol_id': vol_id,
                      'volumes_dir': volumes_dir, })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
コード例 #27
0
    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):

        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
        # for now, since we intermittently hit target already exists we're
        # adding some debug info to try and pinpoint what's going on
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s" % out)
        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = self.configuration.get('iscsi_write_cache', 'on')
        driver = self.iscsi_protocol

        if chap_auth is None:
            volume_conf = self.VOLUME_CONF % (name, path, driver, write_cache)
        else:
            chap_str = 'incominguser %s %s' % chap_auth
            volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (
                name, path, driver, chap_str, write_cache)
        LOG.debug('Creating iscsi_target for: %s', vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(
                _LW('Persistence file already exists for volume, '
                    'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug(('Created volume path %(vp)s,\n'
                   'content: %(vc)s'), {
                       'vp': volume_path,
                       'vc': volume_conf
                   })

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            LOG.debug(
                'Detected old persistence file for volume '
                '%{vol}s at %{old_name}s', {
                    'vol': vol_id,
                    'old_name': old_name
                })
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.

            self._do_tgt_update(name)
        except putils.ProcessExecutionError as e:
            if "target already exists" in e.stderr:
                # Adding the additional Warning message below for a clear
                # ER marker (Ref bug: #1398078).
                LOG.warning(
                    _LW('Could not create target because '
                        'it already exists for volume: %s'), vol_id)
                LOG.debug('Exception was: %s', e)

            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s: %(e)s"), {
                        'vol_id': vol_id,
                        'e': e
                    })

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        # Grab targets list for debug
        # Consider adding a check for lun 0 and 1 for tgtadm
        # before considering this as valid
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s" % out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s. Please ensure your tgtd config file "
                    "contains 'include %(volumes_dir)s/*'") % {
                        'vol_id': vol_id,
                        'volumes_dir': volumes_dir,
                    })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
コード例 #28
0
ファイル: iscsi.py プロジェクト: netoisstools/cinder
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        if chap_auth is None:
            volume_conf = self.VOLUME_CONF % (name, path)
        else:
            volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (name,
                                                             path, chap_auth)

        LOG.info(_('Creating iscsi_target for: %s') % vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug(_('Created volume path %(vp)s,\n'
                    'content: %(vc)%')
                  % {'vp': volume_path, 'vc': volume_conf})

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # with the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = self._execute('tgt-admin', '--update', name,
                                       run_as_root=True)
            LOG.debug("StdOut from tgt-admin --update: %s", out)
            LOG.debug("StdErr from tgt-admin --update: %s", err)

            # Grab targets list for debug
            # Consider adding a check for lun 0 and 1 for tgtadm
            # before considering this as valid
            (out, err) = self._execute('tgtadm',
                                       '--lld',
                                       'iscsi',
                                       '--op',
                                       'show',
                                       '--mode',
                                       'target',
                                       run_as_root=True)
            LOG.debug("Targets after update: %s" % out)
        except putils.ProcessExecutionError as e:
            LOG.warning(_("Failed to create iscsi target for volume "
                        "id:%(vol_id)s: %(e)s")
                        % {'vol_id': vol_id, 'e': str(e)})

            #Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_("Failed to create iscsi target for volume "
                        "id:%(vol_id)s. Please ensure your tgtd config file "
                        "contains 'include %(volumes_dir)s/*'") % {
                            'vol_id': vol_id,
                            'volumes_dir': volumes_dir,
                        })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
コード例 #29
0
ファイル: iscsi.py プロジェクト: TelekomCloud/cinder
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        fileutils.ensure_tree(CONF.volumes_dir)

        vol_id = name.split(':')[1]
        if chap_auth is None:
            volume_conf = """
                <target %s>
                    backing-store %s
                </target>
            """ % (name, path)
        else:
            volume_conf = """
                <target %s>
                    backing-store %s
                    %s
                </target>
            """ % (name, path, chap_auth)

        LOG.info(_('Creating iscsi_target for: %s') % vol_id)
        volumes_dir = CONF.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            (out, err) = self._execute('tgt-admin',
                                       '--update',
                                       name,
                                       run_as_root=True)
        except exception.ProcessExecutionError as e:
            LOG.error(_("Failed to create iscsi target for volume "
                        "id:%(vol_id)s: %(e)s")
                      % {'vol_id': vol_id, 'e': str(e)})

            #Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (CONF.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_("Failed to create iscsi target for volume "
                        "id:%(vol_id)s. Please ensure your tgtd config file "
                        "contains 'include %(volumes_dir)s/*'") % {
                            'vol_id': vol_id,
                            'volumes_dir': volumes_dir,
                        })
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
コード例 #30
0
ファイル: targets_fixture.py プロジェクト: diogogmt/cinder
    def setUp(self):
        super(TargetDriverFixture, self).setUp()
        self.configuration = conf.Configuration(None)
        self.configuration.append_config_values = mock.Mock(return_value=0)
        self.configuration.safe_get = mock.Mock(side_effect=self.fake_safe_get)
        self.configuration.iscsi_ip_address = '10.9.8.7'
        self.configuration.iscsi_port = 3260

        self.fake_volumes_dir = tempfile.mkdtemp()
        fileutils.ensure_tree(self.fake_volumes_dir)

        self.fake_project_id = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
        self.fake_project_id_2 = 'ed2c1fd4-5fc0-11e4-aa15-123b93f75cba'
        self.fake_volume_id = 'ed2c2222-5fc0-11e4-aa15-123b93f75cba'

        self.addCleanup(self._cleanup)

        self.testvol =\
            {'project_id': self.fake_project_id,
             'name': 'testvol',
             'size': 1,
             'id': self.fake_volume_id,
             'volume_type_id': None,
             'provider_location': '10.10.7.1:3260 '
                                  'iqn.2010-10.org.openstack:'
                                  'volume-%s 0' % self.fake_volume_id,
             'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
                              'c76370d66b 2FE0CQ8J196R',
             'provider_geometry': '512 512',
             'created_at': timeutils.utcnow(),
             'host': 'fake_host@lvm#lvm'}

        self.iscsi_target_prefix = 'iqn.2010-10.org.openstack:'
        self.target_string = ('127.0.0.1:3260,1 ' +
                              self.iscsi_target_prefix +
                              'volume-%s' % self.testvol['id'])

        self.testvol_2 =\
            {'project_id': self.fake_project_id_2,
             'name': 'testvol2',
             'size': 1,
             'id': self.fake_volume_id,
             'volume_type_id': None,
             'provider_location': ('%(ip)s:%(port)d%(iqn)svolume-%(vol)s 2' %
                                   {'ip': self.configuration.iscsi_ip_address,
                                    'port': self.configuration.iscsi_port,
                                    'iqn': self.iscsi_target_prefix,
                                    'vol': self.fake_volume_id}),
             'provider_auth': 'CHAP stack-1-a60e2611875f40199931f2'
                              'c76370d66b 2FE0CQ8J196R',
             'provider_geometry': '512 512',
             'created_at': timeutils.utcnow(),
             'host': 'fake_host@lvm#lvm'}

        self.expected_iscsi_properties = \
            {'auth_method': 'CHAP',
             'auth_password': '******',
             'auth_username': '******',
             'encrypted': False,
             'logical_block_size': '512',
             'physical_block_size': '512',
             'target_discovered': False,
             'target_iqn': 'iqn.2010-10.org.openstack:volume-%s' %
                           self.fake_volume_id,
             'target_lun': 0,
             'target_portal': '10.10.7.1:3260',
             'volume_id': self.fake_volume_id}

        self.VOLUME_ID = '83c2e877-feed-46be-8435-77884fe55b45'
        self.VOLUME_NAME = 'volume-' + self.VOLUME_ID
        self.test_vol = (self.iscsi_target_prefix +
                         self.VOLUME_NAME)