示例#1
0
    def create_cloned_volume(self, volume, src_vref):

        try:
            orig_name = self._get_3par_vol_name(volume['source_volid'])
            vol_name = self._get_3par_vol_name(volume['id'])
            # We need to create a new volume first.  Otherwise you
            # can't delete the original
            new_vol = self.create_volume(volume)

            # make the 3PAR copy the contents.
            # can't delete the original until the copy is done.
            self._copy_volume(orig_name, vol_name)

            return new_vol
        except hpexceptions.HTTPForbidden:
            raise exception.NotAuthorized()
        except hpexceptions.HTTPNotFound:
            raise exception.NotFound()
        except Exception as ex:
            LOG.error(str(ex))
            raise exception.CinderException(ex)

        return None
示例#2
0
    def test_report_state_newly_connected(self):
        host = 'foo'
        binary = 'bar'
        topic = 'test'
        service_create = {
            'host': host,
            'binary': binary,
            'topic': topic,
            'report_count': 0,
            'availability_zone': 'nova'
        }
        service_ref = {
            'host': host,
            'binary': binary,
            'topic': topic,
            'report_count': 0,
            'availability_zone': 'nova',
            'id': 1
        }

        service.db.service_get_by_args(mox.IgnoreArg(), host,
                                       binary).AndRaise(exception.NotFound())
        service.db.service_create(mox.IgnoreArg(),
                                  service_create).AndReturn(service_ref)
        service.db.service_get(mox.IgnoreArg(),
                               service_ref['id']).AndReturn(service_ref)
        service.db.service_update(mox.IgnoreArg(), service_ref['id'],
                                  mox.ContainsKeyValue('report_count', 1))

        self.mox.ReplayAll()
        serv = service.Service(host, binary, topic,
                               'cinder.tests.test_service.FakeManager')
        serv.start()
        serv.model_disconnected = True
        serv.report_state()

        self.assert_(not serv.model_disconnected)
def _handle_bad_status(driver,
                       response,
                       connection_string,
                       method,
                       payload,
                       header,
                       cert_data,
                       sensitive=False,
                       conflict_ok=False):
    if (response.status_code == http_client.BAD_REQUEST
            and connection_string.endswith("api_versions")):
        # Raise the exception, but don't log any error.  We'll just fall
        # back to the old style of determining API version.  We make this
        # request a lot, so logging it is just noise
        raise exception.DateraAPIException
    if response.status_code == http_client.NOT_FOUND:
        raise exception.NotFound(response.json()['message'])
    elif response.status_code in [
            http_client.FORBIDDEN, http_client.UNAUTHORIZED
    ]:
        raise exception.NotAuthorized()
    elif response.status_code == http_client.CONFLICT and conflict_ok:
        # Don't raise, because we're expecting a conflict
        pass
    elif response.status_code == http_client.SERVICE_UNAVAILABLE:
        current_retry = 0
        while current_retry <= driver.retry_attempts:
            LOG.debug("Datera 503 response, trying request again")
            eventlet.sleep(driver.interval)
            resp = driver._request(connection_string, method, payload, header,
                                   cert_data)
            if resp.ok:
                return response.json()
            elif resp.status_code != http_client.SERVICE_UNAVAILABLE:
                driver._raise_response(resp)
    else:
        driver._raise_response(response)
示例#4
0
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):
        # tid and lun are not used

        vol_id = name.split(':')[1]

        LOG.info(_LI('Creating iscsi_target for volume: %s') % vol_id)

        chap_auth_userid = ""
        chap_auth_password = ""
        if chap_auth is not None:
            (chap_auth_userid, chap_auth_password) = chap_auth

        try:
            command_args = ['cinder-rtstool',
                            'create',
                            path,
                            name,
                            chap_auth_userid,
                            chap_auth_password,
                            self.iscsi_protocol == 'iser']
            utils.execute(*command_args, run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(_LE("Failed to create iscsi target for volume "
                          "id:%s.") % vol_id)
            LOG.error(_LE("%s") % e)

            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_LE("Failed to create iscsi target for volume "
                          "id:%s.") % vol_id)
            raise exception.NotFound()

        return tid
示例#5
0
    def update(self, req, id, body):
        """Enable/Disable scheduling for a cluster."""
        # NOTE(geguileo): This method tries to be consistent with services
        # update endpoint API.

        # Let the wsgi middleware convert NotAuthorized exceptions
        context = req.environ['cinder.context']
        context.authorize(policy.UPDATE_POLICY)

        if id not in ('enable', 'disable'):
            raise exception.NotFound(message=_("Unknown action"))

        disabled = id != 'enable'
        disabled_reason = self._disable_cluster(
            req, body=body) if disabled else self._enable_cluster(req,
                                                                  body=body)

        name = body['name']

        binary = body.get('binary', constants.VOLUME_BINARY)

        # Let wsgi handle NotFound exception
        cluster = objects.Cluster.get_by_id(context,
                                            None,
                                            binary=binary,
                                            name=name)
        cluster.disabled = disabled
        cluster.disabled_reason = disabled_reason
        cluster.save()

        # We return summary data plus the disabled reason
        replication_data = req.api_version_request.matches(
            mv.REPLICATION_CLUSTER)
        ret_val = clusters_view.ViewBuilder.summary(cluster, replication_data)
        ret_val['cluster']['disabled_reason'] = disabled_reason

        return ret_val
示例#6
0
    def create_snapshot(self, snapshot, client):
        LOG.debug("Create Snapshot\n%s" % pprint.pformat(snapshot))

        try:
            snap_name = self._get_3par_snap_name(snapshot['id'])
            vol_name = self._get_3par_vol_name(snapshot['volume_id'])

            extra = {'volume_name': snapshot['volume_name']}
            vol_id = snapshot.get('volume_id', None)
            if vol_id:
                extra['volume_id'] = vol_id

            try:
                extra['name'] = snapshot['display_name']
            except AttribteError:
                pass

            try:
                extra['description'] = snapshot['display_description']
            except AttribteError:
                pass

            optional = {'comment': json.dumps(extra), 'readOnly': True}
            if self.config.hp3par_snapshot_expiration:
                optional['expirationHours'] = (
                    self.config.hp3par_snapshot_expiration)

            if self.config.hp3par_snapshot_retention:
                optional['retentionHours'] = (
                    self.config.hp3par_snapshot_retention)

            client.createSnapshot(snap_name, vol_name, optional)
        except hpexceptions.HTTPForbidden:
            raise exception.NotAuthorized()
        except hpexceptions.HTTPNotFound:
            raise exception.NotFound()
示例#7
0
    def test_report_state_newly_connected(self):
        service_ref = {'host': self.host,
                       'binary': self.binary,
                       'topic': self.topic,
                       'report_count': 0,
                       'availability_zone': 'nova',
                       'id': 1}
        with mock.patch.object(objects.service, 'db') as mock_db:
            mock_db.service_get_by_args.side_effect = exception.NotFound()
            mock_db.service_create.return_value = service_ref
            mock_db.service_get.return_value = service_ref

            serv = service.Service(
                self.host,
                self.binary,
                self.topic,
                'cinder.tests.unit.test_service.FakeManager'
            )
            serv.start()
            serv.model_disconnected = True
            serv.report_state()

            self.assertFalse(serv.model_disconnected)
            self.assertTrue(mock_db.service_update.called)
示例#8
0
 def _send_request(self, object_type, key, request):
     try:
         response = urllib2.urlopen(request)
     except (urllib2.HTTPError, ) as exc:
         if exc.code == 400 and hasattr(exc, 'read'):
             error = json.load(exc)
             err_msg = error['message']
             if err_msg.endswith(OBJ_NOT_FOUND_ERR):
                 LOG.warning(
                     _LW("object %(key)s of "
                         "type %(typ)s not found"), {
                             'key': key,
                             'typ': object_type
                         })
                 raise exception.NotFound()
             elif err_msg == VOL_NOT_UNIQUE_ERR:
                 LOG.error(_LE("can't create 2 volumes with the same name"))
                 msg = (_('Volume by this name already exists'))
                 raise exception.VolumeBackendAPIException(data=msg)
             elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
                 LOG.error(_LE("Can't find volume to map %s"), key)
                 raise exception.VolumeNotFound(volume_id=key)
             elif ALREADY_MAPPED_ERR in err_msg:
                 raise exception.XtremIOAlreadyMappedError()
         LOG.error(_LE('Bad response from XMS, %s'), exc.read())
         msg = (_('Exception: %s') % six.text_type(exc))
         raise exception.VolumeDriverException(message=msg)
     if response.code >= 300:
         LOG.error(_LE('bad API response, %s'), response.msg)
         msg = (_('bad response from XMS got http code %(code)d, %(msg)s') %
                {
                    'code': response.code,
                    'msg': response.msg
                })
         raise exception.VolumeBackendAPIException(data=msg)
     return response
示例#9
0
def _read_config(xml_config_file):
    """Read hds driver specific xml config file."""

    if not os.access(xml_config_file, os.R_OK):
        msg = (_("Can't open config file: %s") % xml_config_file)
        raise exception.NotFound(message=msg)

    try:
        root = ETree.parse(xml_config_file).getroot()
    except Exception:
        msg = (_("Error parsing config file: %s") % xml_config_file)
        raise exception.ConfigNotFound(message=msg)

    # mandatory parameters
    config = {}
    arg_prereqs = ['mgmt_ip0', 'username']
    for req in arg_prereqs:
        config[req] = _xml_read(root, req, 'check')

    # optional parameters
    opt_parameters = [
        'hnas_cmd', 'ssh_enabled', 'chap_enabled', 'cluster_admin_ip0'
    ]
    for req in opt_parameters:
        config[req] = _xml_read(root, req)

    if config['chap_enabled'] is None:
        config['chap_enabled'] = HNAS_DEFAULT_CONFIG['chap_enabled']

    if config['ssh_enabled'] == 'True':
        config['ssh_private_key'] = _xml_read(root, 'ssh_private_key', 'check')
        config['ssh_port'] = _xml_read(root, 'ssh_port')
        config['password'] = _xml_read(root, 'password')
        if config['ssh_port'] is None:
            config['ssh_port'] = HNAS_DEFAULT_CONFIG['ssh_port']
    else:
        # password is mandatory when not using SSH
        config['password'] = _xml_read(root, 'password', 'check')

    if config['hnas_cmd'] is None:
        config['hnas_cmd'] = HNAS_DEFAULT_CONFIG['hnas_cmd']

    config['hdp'] = {}
    config['services'] = {}

    # min one needed
    for svc in ['svc_0', 'svc_1', 'svc_2', 'svc_3']:
        if _xml_read(root, svc) is None:
            continue
        service = {'label': svc}

        # none optional
        for arg in ['volume_type', 'hdp', 'iscsi_ip']:
            service[arg] = _xml_read(root, svc + '/' + arg, 'check')
        config['services'][service['volume_type']] = service
        config['hdp'][service['hdp']] = service['hdp']

    # at least one service required!
    if config['services'].keys() is None:
        raise exception.ParameterNotFound(param="No service found")

    return config
示例#10
0
文件: datera.py 项目: SavoBit/cinder
    def _issue_api_request(self,
                           resource_type,
                           method='get',
                           resource=None,
                           body=None,
                           action=None,
                           sensitive=False):
        """All API requests to Datera cluster go through this method.

        :param resource_type: the type of the resource
        :param method: the request verb
        :param resource: the identifier of the resource
        :param body: a dict with options for the action_type
        :param action: the action to perform
        :returns: a dict of the response from the Datera cluster
        """
        host = self.configuration.san_ip
        port = self.configuration.datera_api_port
        api_token = self.configuration.datera_api_token
        api_version = self.configuration.datera_api_version

        payload = json.dumps(body, ensure_ascii=False)
        payload.encode('utf-8')

        if not sensitive:
            LOG.debug("Payload for Datera API call: %s", payload)

        header = {
            'Content-Type': 'application/json; charset=utf-8',
            'auth-token': self.auth_token
        }

        protocol = 'http'
        if self.configuration.driver_use_ssl:
            protocol = 'https'

        # TODO(thingee): Auth method through Auth-Token is deprecated. Remove
        # this and client cert verification stuff in the Liberty release.
        if api_token:
            header['Auth-Token'] = api_token

        client_cert = self.configuration.driver_client_cert
        client_cert_key = self.configuration.driver_client_cert_key
        cert_data = None

        if client_cert:
            protocol = 'https'
            cert_data = (client_cert, client_cert_key)

        connection_string = '%s://%s:%s/v%s/%s' % (protocol, host, port,
                                                   api_version, resource_type)

        if resource is not None:
            connection_string += '/%s' % resource
        if action is not None:
            connection_string += '/%s' % action

        LOG.debug("Endpoint for Datera API call: %s", connection_string)
        try:
            response = getattr(requests, method)(connection_string,
                                                 data=payload,
                                                 headers=header,
                                                 verify=False,
                                                 cert=cert_data)
        except requests.exceptions.RequestException as ex:
            msg = _('Failed to make a request to Datera cluster endpoint due '
                    'to the following reason: %s') % six.text_type(ex.message)
            LOG.error(msg)
            raise exception.DateraAPIException(msg)

        data = response.json()
        if not sensitive:
            LOG.debug("Results of Datera API call: %s", data)

        if not response.ok:
            if response.status_code == 404:
                raise exception.NotFound(data['message'])
            elif response.status_code in [403, 401]:
                raise exception.NotAuthorized()
            else:
                msg = _('Request to Datera cluster returned bad status:'
                        ' %(status)s | %(reason)s') % {
                            'status': response.status_code,
                            'reason': response.reason
                        }
                LOG.error(msg)
                raise exception.DateraAPIException(msg)

        return data
示例#11
0
def xms_request(object_type='volumes',
                request_typ='GET',
                data=None,
                name=None,
                idx=None,
                ver='v1'):
    if object_type == 'snapshots':
        object_type = 'volumes'

    try:
        res = xms_data[object_type]
    except KeyError:
        raise exception.VolumeDriverException
    if request_typ == 'GET':
        if name or idx:
            return get_obj(object_type, name, idx)
        else:
            if data and data.get('full') == 1:
                return {object_type: list(res.values())}
            else:
                return {
                    object_type: [{
                        "href":
                        "/%s/%d" % (object_type, obj['index']),
                        "name":
                        obj.get('name')
                    } for obj in res.values()]
                }
    elif request_typ == 'POST':
        data = fix_data(data, object_type)
        name_key = get_xms_obj_key(data)
        try:
            if name_key and get_xms_obj_by_name(object_type, data[name_key]):
                raise (exception.VolumeBackendAPIException(
                    'Volume by this name already exists'))
        except exception.NotFound:
            pass
        data['index'] = len(xms_data[object_type]) + 1
        xms_data[object_type][data['index']] = data
        # find the name key
        if name_key:
            data['name'] = data[name_key]
        if object_type == 'lun-maps':
            data['ig-name'] = data['ig-id']

        return {
            "links": [{
                "href":
                "/%s/%d" % (object_type, data[typ2id[object_type]][2])
            }]
        }
    elif request_typ == 'DELETE':
        if object_type == 'consistency-group-volumes':
            data = [
                cgv for cgv in xms_data['consistency-group-volumes'].values()
                if cgv['vol-id'] == data['vol-id']
                and cgv['cg-id'] == data['cg-id']
            ][0]
        else:
            data = get_obj(object_type, name, idx)['content']
        if data:
            del xms_data[object_type][data['index']]
        else:
            raise exception.NotFound()
    elif request_typ == 'PUT':
        obj = get_obj(object_type, name, idx)['content']
        data = fix_data(data, object_type)
        del data['index']
        obj.update(data)
示例#12
0
def xms_bad_request(object_type='volumes', request_typ='GET', data=None,
                    name=None, idx=None):
    if request_typ == 'GET':
        raise exception.NotFound()
    elif request_typ == 'POST':
        raise exception.VolumeBackendAPIException('failed to create ig')
示例#13
0
                            "short listing used share."))
        return None

    def _construct_image_nfs_url(self, image_location):
        """Construct direct url for nfs backend.

             It creates direct url from image_location
             which is a tuple with direct_url and locations.
             Returns array of urls with nfs scheme if nfs store
             else returns url. It needs to be verified
             by backend before use.
        """

        direct_url, locations = image_location
        if not direct_url and not locations:
            raise exception.NotFound(_('Image location not present.'))

        urls = []
        if not locations:
            urls.append(direct_url)
        else:
            for location in locations:
                if not location['metadata']:
                    continue
                location_type = location['metadata'].get('type')
                if not location_type or location_type.lower() != "nfs":
                    continue
                share_location = location['metadata'].get('share_location')
                mountpoint = location['metadata'].get('mountpoint')
                if not share_location or not mountpoint:
                    continue
示例#14
0
    def create_iser_target(self, name, tid, lun, path,
                           chap_auth=None, **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        fileutils.ensure_tree(CONF.volumes_dir)

        vol_id = name.split(':')[1]
        if chap_auth is None:
            volume_conf = """
                <target %s>
                    driver iser
                    backing-store %s
                </target>
            """ % (name, path)
        else:
            volume_conf = """
                <target %s>
                    driver iser
                    backing-store %s
                    %s
                </target>
            """ % (name, path, chap_auth)

        LOG.info(_('Creating iser_target for: %s') % vol_id)
        volumes_dir = CONF.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            (out, err) = self._execute('tgt-admin',
                                       '--update',
                                       name,
                                       run_as_root=True)
        except exception.ProcessExecutionError as e:
            LOG.error(_("Failed to create iser target for volume "
                        "id:%(vol_id)s: %(e)s")
                      % {'vol_id': vol_id, 'e': str(e)})

            #Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISERTargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (CONF.iser_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_("Failed to create iser target for volume "
                        "id:%(vol_id)s. Please ensure your tgtd config file "
                        "contains 'include %(volumes_dir)s/*'") %
                      {'vol_id': vol_id, 'volumes_dir': volumes_dir})
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
示例#15
0
def xms_bad_request(object_type='volumes', method='GET', data=None,
                    name=None, idx=None, ver='v1'):
    if method == 'GET':
        raise exception.NotFound()
    elif method == 'POST':
        raise exception.VolumeBackendAPIException('Failed to create ig')
示例#16
0
    def _copy_from_img_service(self, context, volume, image_service,
                               image_id):
        """Copies from the image service using copy offload."""
        LOG.debug("Trying copy from image service using copy offload.")
        image_loc = image_service.get_location(context, image_id)
        image_loc = self._construct_image_nfs_url(image_loc)
        conn, dr = self._check_get_nfs_path_segs(image_loc)
        if conn:
            src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
        else:
            raise exception.NotFound(_("Source host details not found."))
        (__, ___, img_file) = image_loc.rpartition('/')
        src_path = os.path.join(dr, img_file)
        dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
            volume['id']))
        # tmp file is required to deal with img formats
        tmp_img_file = six.text_type(uuid.uuid4())
        col_path = self.configuration.netapp_copyoffload_tool_path
        img_info = image_service.show(context, image_id)
        dst_share = self._get_provider_location(volume['id'])
        self._check_share_can_hold_size(dst_share, img_info['size'])
        run_as_root = self._execute_as_root

        dst_dir = self._get_mount_point_for_share(dst_share)
        dst_img_local = os.path.join(dst_dir, tmp_img_file)
        try:
            # If src and dst share not equal
            if (('%s:%s' % (src_ip, dr)) !=
                    ('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
                dst_img_serv_path = os.path.join(
                    self._get_export_path(volume['id']), tmp_img_file)
                self._execute(col_path, src_ip, dst_ip, src_path,
                              dst_img_serv_path, run_as_root=run_as_root,
                              check_exit_code=0)
            else:
                self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)
            self._discover_file_till_timeout(dst_img_local, timeout=120)
            LOG.debug('Copied image %(img)s to tmp file %(tmp)s.',
                      {'img': image_id, 'tmp': tmp_img_file})
            dst_img_cache_local = os.path.join(dst_dir,
                                               'img-cache-%s' % image_id)
            if img_info['disk_format'] == 'raw':
                LOG.debug('Image is raw %s.', image_id)
                self._clone_file_dst_exists(dst_share, tmp_img_file,
                                            volume['name'], dest_exists=True)
                self._move_nfs_file(dst_img_local, dst_img_cache_local)
                LOG.debug('Copied raw image %(img)s to volume %(vol)s.',
                          {'img': image_id, 'vol': volume['id']})
            else:
                LOG.debug('Image will be converted to raw %s.', image_id)
                img_conv = six.text_type(uuid.uuid4())
                dst_img_conv_local = os.path.join(dst_dir, img_conv)

                # Checking against image size which is approximate check
                self._check_share_can_hold_size(dst_share, img_info['size'])
                try:
                    image_utils.convert_image(dst_img_local,
                                              dst_img_conv_local, 'raw',
                                              run_as_root=run_as_root)
                    data = image_utils.qemu_img_info(dst_img_conv_local,
                                                     run_as_root=run_as_root)
                    if data.file_format != "raw":
                        raise exception.InvalidResults(
                            _("Converted to raw, but format is now %s.")
                            % data.file_format)
                    else:
                        self._clone_file_dst_exists(dst_share, img_conv,
                                                    volume['name'],
                                                    dest_exists=True)
                        self._move_nfs_file(dst_img_conv_local,
                                            dst_img_cache_local)
                        LOG.debug('Copied locally converted raw image'
                                  ' %(img)s to volume %(vol)s.',
                                  {'img': image_id, 'vol': volume['id']})
                finally:
                    if os.path.exists(dst_img_conv_local):
                        self._delete_file_at_path(dst_img_conv_local)
            self._post_clone_image(volume)
        finally:
            if os.path.exists(dst_img_local):
                self._delete_file_at_path(dst_img_local)
示例#17
0
    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility
        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = kwargs.get('write_cache', 'on')
        if chap_auth is None:
            volume_conf = self.VOLUME_CONF % (name, path, write_cache)
        else:
            chap_str = re.sub('^IncomingUser ', 'incominguser ', chap_auth)
            volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (
                name, path, chap_str, write_cache)
        LOG.info(_LI('Creating iscsi_target for: %s') % vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug(('Created volume path %(vp)s,\n'
                   'content: %(vc)s') % {
                       'vp': volume_path,
                       'vc': volume_conf
                   })

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # with the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = self._execute('tgt-admin',
                                       '--update',
                                       name,
                                       run_as_root=True)
            LOG.debug("StdOut from tgt-admin --update: %s", out)
            LOG.debug("StdErr from tgt-admin --update: %s", err)

            # Grab targets list for debug
            # Consider adding a check for lun 0 and 1 for tgtadm
            # before considering this as valid
            (out, err) = self._execute('tgtadm',
                                       '--lld',
                                       'iscsi',
                                       '--op',
                                       'show',
                                       '--mode',
                                       'target',
                                       run_as_root=True)
            LOG.debug("Targets after update: %s" % out)
        except putils.ProcessExecutionError as e:
            LOG.warning(
                _LW("Failed to create iscsi target for volume "
                    "id:%(vol_id)s: %(e)s") % {
                        'vol_id': vol_id,
                        'e': e
                    })

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s. Please ensure your tgtd config file "
                    "contains 'include %(volumes_dir)s/*'") % {
                        'vol_id': vol_id,
                        'volumes_dir': volumes_dir,
                    })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
示例#18
0
def xms_request(object_type='volumes',
                request_typ='GET',
                data=None,
                name=None,
                idx=None):
    if object_type == 'snapshots':
        object_type = 'volumes'

    obj_key = name if name else idx
    if request_typ == 'GET':
        try:
            res = xms_data[object_type]
        except KeyError:
            raise exception.VolumeDriverException
        if name or idx:
            if obj_key not in res:
                raise exception.NotFound()
            return {"content": res[obj_key]}
        else:
            return {
                object_type: [{
                    "href": "/%s/%d" % (object_type, obj['index']),
                    "name": obj.get('name')
                } for obj in res.values()]
            }
    elif request_typ == 'POST':
        data = fix_data(data, object_type)
        data['index'] = len(xms_data[object_type]) + 1
        xms_data[object_type][data['index']] = data
        # find the name key
        name_key = get_xms_obj_key(data)
        if object_type == 'lun-maps':
            data['ig-name'] = data['ig-id']
        if name_key:
            if data[name_key] in xms_data[object_type]:
                raise (exception.VolumeBackendAPIException(
                    'Volume by this name already exists'))
            xms_data[object_type][data[name_key]] = data

        return {
            "links": [{
                "href":
                "/%s/%d" % (object_type, data[typ2id[object_type]][2])
            }]
        }
    elif request_typ == 'DELETE':
        if obj_key in xms_data[object_type]:
            data = xms_data[object_type][obj_key]
            del xms_data[object_type][data['index']]
            del xms_data[object_type][data[typ2id[object_type]][1]]
        else:
            LOG.error('Trying to delete a missing object %s',
                      six.text_type(obj_key))
            raise exception.NotFound()
    elif request_typ == 'PUT':
        if obj_key in xms_data[object_type]:
            obj = xms_data[object_type][obj_key]
            obj.update(data)
            key = get_xms_obj_key(data)
            if key:
                xms_data[object_type][data[key]] = obj
        else:
            LOG.error('Trying to update a missing object %s',
                      six.text_type(obj_key))
            raise exception.NotFound()
示例#19
0
def get_xms_obj_by_name(typ, name):
    for item in xms_data[typ].values():
        if 'name' in item and item['name'] == name:
            return item
    raise exception.NotFound()
示例#20
0
    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):

        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        volumes_dir = self._get_volumes_dir()
        fileutils.ensure_tree(volumes_dir)

        vol_id = name.split(':')[1]

        cfg_port = kwargs.get('portals_port')
        cfg_ips = kwargs.get('portals_ips')

        portals = ','.join(
            map(lambda ip: self._get_portal(ip, cfg_port), cfg_ips))

        if chap_auth is None:
            volume_conf = self.TARGET_FMT % (name, path, portals)
        else:
            volume_conf = self.TARGET_FMT_WITH_CHAP % (name, path, portals,
                                                       '"%s":"%s"' % chap_auth)
        LOG.debug('Creating iscsi_target for: %s', vol_id)
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(
                _LW('Persistence file already exists for volume, '
                    'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug('Created volume path %(vp)s,\n'
                  'content: %(vc)s', {
                      'vp': volume_path,
                      'vc': volume_conf
                  })

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name:
            LOG.debug(
                'Detected old persistence file for volume '
                '%{vol}s at %{old_name}s', {
                    'vol': vol_id,
                    'old_name': old_name
                })
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = utils.execute('iscsictl',
                                       '-S',
                                       'target=%s' % name,
                                       '-f',
                                       volume_path,
                                       '-x',
                                       self.config,
                                       run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s: %(e)s"), {
                        'vol_id': vol_id,
                        'e': e
                    })

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
        finally:
            LOG.debug("StdOut from iscsictl -S: %s", out)
            LOG.debug("StdErr from iscsictl -S: %s", err)

        # Grab targets list for debug
        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s. Please verify your configuration "
                    "in %(volumes_dir)s'"), {
                        'vol_id': vol_id,
                        'volumes_dir': volumes_dir,
                    })
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
示例#21
0
    def create_iscsi_target(self, name, tid, lun, path,
                            chap_auth=None, **kwargs):

        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
        # for now, since we intermittently hit target already exists we're
        # adding some debug info to try and pinpoint what's going on
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = self.configuration.get('iscsi_write_cache', 'on')
        driver = self.iscsi_protocol
        chap_str = ''

        if chap_auth is not None:
            chap_str = 'incominguser %s %s' % chap_auth

        target_flags = self.configuration.get('iscsi_target_flags', '')
        if target_flags:
            target_flags = 'bsoflags ' + target_flags

        volume_conf = self.VOLUME_CONF % {
            'name': name, 'path': path, 'driver': driver,
            'chap_auth': chap_str, 'target_flags': target_flags,
            'write_cache': write_cache}

        LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(_LW('Persistence file already exists for volume, '
                            'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug(('Created volume path %(vp)s,\n'
                   'content: %(vc)s'),
                  {'vp': volume_path, 'vc': volume_conf})

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            LOG.debug('Detected old persistence file for volume '
                      '%{vol}s at %{old_name}s',
                      {'vol': vol_id, 'old_name': old_name})
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.

            self._do_tgt_update(name)
        except putils.ProcessExecutionError as e:
            if "target already exists" in e.stderr:
                # Adding the additional Warning message below for a clear
                # ER marker (Ref bug: #1398078).
                LOG.warning(_LW('Could not create target because '
                                'it already exists for volume: %s'), vol_id)
                LOG.debug('Exception was: %s', e)

            else:
                LOG.error(_LE("Failed to create iscsi target for Volume "
                              "ID: %(vol_id)s: %(e)s"),
                          {'vol_id': vol_id, 'e': e})

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        # Grab targets list for debug
        # Consider adding a check for lun 0 and 1 for tgtadm
        # before considering this as valid
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_LE("Failed to create iscsi target for Volume "
                          "ID: %(vol_id)s. Please ensure your tgtd config "
                          "file contains 'include %(volumes_dir)s/*'"), {
                      'vol_id': vol_id,
                      'volumes_dir': volumes_dir, })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
    def create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot.

        TODO: support using the size from the user.
        """
        LOG.debug("Create Volume from Snapshot\n%s\n%s" % (pprint.pformat(
            volume['display_name']), pprint.pformat(snapshot['display_name'])))

        if snapshot['volume_size'] != volume['size']:
            err = "You cannot change size of the volume.  It must "
            "be the same as the snapshot."
            LOG.error(err)
            raise exception.InvalidInput(reason=err)

        try:
            snap_name = self._get_3par_snap_name(snapshot['id'])
            volume_name = self._get_3par_vol_name(volume['id'])

            extra = {'volume_id': volume['id'], 'snapshot_id': snapshot['id']}

            volume_type = None
            type_id = volume.get('volume_type_id', None)
            vvs_name = None
            qos = {}
            hp3par_keys = {}
            if type_id is not None:
                volume_type = self._get_volume_type(type_id)
                hp3par_keys = self._get_keys_by_volume_type(volume_type)
                vvs_name = self._get_key_value(hp3par_keys, 'vvs')
                if vvs_name is None:
                    qos = self._get_qos_by_volume_type(volume_type)

            name = volume.get('display_name', None)
            if name:
                extra['display_name'] = name

            description = volume.get('display_description', None)
            if description:
                extra['description'] = description

            optional = {'comment': json.dumps(extra), 'readOnly': False}

            self.client.createSnapshot(volume_name, snap_name, optional)
            if qos or vvs_name is not None:
                cpg = self._get_key_value(hp3par_keys, 'cpg',
                                          self.config.hp3par_cpg)
                try:
                    self._add_volume_to_volume_set(volume, volume_name, cpg,
                                                   vvs_name, qos)
                except Exception as ex:
                    # Delete the volume if unable to add it to the volume set
                    self.client.deleteVolume(volume_name)
                    LOG.error(str(ex))
                    raise exception.CinderException(ex.get_description())
        except hpexceptions.HTTPForbidden as ex:
            LOG.error(str(ex))
            raise exception.NotAuthorized()
        except hpexceptions.HTTPNotFound as ex:
            LOG.error(str(ex))
            raise exception.NotFound()
        except Exception as ex:
            LOG.error(str(ex))
            raise exception.CinderException(ex.get_description())
示例#23
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Creates a volume from a snapshot.

        """
        LOG.debug("Create Volume from Snapshot\n%s\n%s" %
                  (pprint.pformat(volume['display_name']),
                   pprint.pformat(snapshot['display_name'])))

        if volume['size'] < snapshot['volume_size']:
            err = ("You cannot reduce size of the volume.  It must "
                   "be greater than or equal to the snapshot.")
            LOG.error(err)
            raise exception.InvalidInput(reason=err)

        try:
            snap_name = self._get_3par_snap_name(snapshot['id'])
            volume_name = self._get_3par_vol_name(volume['id'])

            extra = {'volume_id': volume['id'],
                     'snapshot_id': snapshot['id']}

            volume_type = None
            type_id = volume.get('volume_type_id', None)
            vvs_name = None
            qos = {}
            hp3par_keys = {}
            if type_id is not None:
                volume_type = self._get_volume_type(type_id)
                hp3par_keys = self._get_keys_by_volume_type(volume_type)
                vvs_name = self._get_key_value(hp3par_keys, 'vvs')
                if vvs_name is None:
                    qos = self._get_qos_by_volume_type(volume_type)

            name = volume.get('display_name', None)
            if name:
                extra['display_name'] = name

            description = volume.get('display_description', None)
            if description:
                extra['description'] = description

            optional = {'comment': json.dumps(extra),
                        'readOnly': False}

            self.client.createSnapshot(volume_name, snap_name, optional)

            # Grow the snapshot if needed
            growth_size = volume['size'] - snapshot['volume_size']
            if growth_size > 0:
                try:
                    LOG.debug(_('Converting to base volume type: %s.') %
                              volume['id'])
                    self._convert_to_base_volume(volume)
                    growth_size_mib = growth_size * units.GiB / units.MiB
                    LOG.debug(_('Growing volume: %(id)s by %(size)s GiB.') %
                              {'id': volume['id'], 'size': growth_size})
                    self.client.growVolume(volume_name, growth_size_mib)
                except Exception as ex:
                    LOG.error(_("Error extending volume %(id)s. Ex: %(ex)s") %
                              {'id': volume['id'], 'ex': str(ex)})
                    # Delete the volume if unable to grow it
                    self.client.deleteVolume(volume_name)
                    raise exception.CinderException(ex)

            if qos or vvs_name is not None:
                cpg = self._get_key_value(hp3par_keys, 'cpg',
                                          self.config.hp3par_cpg)
                try:
                    self._add_volume_to_volume_set(volume, volume_name,
                                                   cpg, vvs_name, qos)
                except Exception as ex:
                    # Delete the volume if unable to add it to the volume set
                    self.client.deleteVolume(volume_name)
                    LOG.error(str(ex))
                    raise exception.CinderException(ex)
        except hpexceptions.HTTPForbidden as ex:
            LOG.error(str(ex))
            raise exception.NotAuthorized()
        except hpexceptions.HTTPNotFound as ex:
            LOG.error(str(ex))
            raise exception.NotFound()
        except Exception as ex:
            LOG.error(str(ex))
            raise exception.CinderException(ex)
示例#24
0
文件: iscsi.py 项目: twigs/cinder
class TgtAdm(TargetAdmin):
    """iSCSI target administration using tgtadm."""
    def __init__(self, execute=utils.execute):
        super(TgtAdm, self).__init__('tgtadm', execute)

    def _get_target(self, iqn):
        (out, err) = self._execute('tgt-admin', '--show', run_as_root=True)
        lines = out.split('\n')
        for line in lines:
            if iqn in line:
                parsed = line.split()
                tid = parsed[1]
                return tid[:-1]

        return None

    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):
        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        utils.ensure_tree(FLAGS.volumes_dir)

        vol_id = name.split(':')[1]
        if chap_auth is None:
            volume_conf = """
                <target %s>
                    backing-store %s
                </target>
            """ % (name, path)
        else:
            volume_conf = """
                <target %s>
                    backing-store %s
                    %s
                </target>
            """ % (name, path, chap_auth)

        LOG.info(_('Creating iscsi_target for: %s') % vol_id)
        volumes_dir = FLAGS.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            (out, err) = self._execute('tgt-admin',
                                       '--update',
                                       name,
                                       run_as_root=True)
        except exception.ProcessExecutionError, e:
            LOG.error(
                _("Failed to create iscsi target for volume "
                  "id:%(vol_id)s.") % locals())

            #Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _("Failed to create iscsi target for volume "
                  "id:%(vol_id)s. Please ensure your tgtd config file "
                  "contains 'include %(volumes_dir)s/*'") % locals())
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
示例#25
0
    def _copy_from_img_service(self, context, volume, image_service, image_id):
        """Copies from the image service using copy offload."""

        LOG.debug("Trying copy from image service using copy offload.")
        image_loc = image_service.get_location(context, image_id)
        locations = self._construct_image_nfs_url(image_loc)
        src_ip = None
        selected_loc = None
        cloned = False

        # this will match the first location that has a valid IP on cluster
        for location in locations:
            conn, dr = self._check_get_nfs_path_segs(location)
            if conn:
                try:
                    src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
                    selected_loc = location
                    break
                except exception.NotFound:
                    pass
        if src_ip is None:
            raise exception.NotFound(_("Source host details not found."))
        (__, ___, img_file) = selected_loc.rpartition('/')
        src_path = os.path.join(dr, img_file)

        dst_ip, vol_path = self._get_destination_ip_and_path(volume)
        share_path = vol_path.rsplit("/", 1)[0]
        dst_share = dst_ip + ':' + share_path

        # tmp file is required to deal with img formats
        tmp_img_file = six.text_type(uuid.uuid4())
        col_path = self.configuration.netapp_copyoffload_tool_path
        img_info = image_service.show(context, image_id)
        self._check_share_can_hold_size(dst_share, img_info['size'])
        run_as_root = self._execute_as_root

        dst_dir = self._get_mount_point_for_share(dst_share)
        dst_img_local = os.path.join(dst_dir, tmp_img_file)

        try:
            dst_img_serv_path = os.path.join(share_path, tmp_img_file)
            # Always run copy offload as regular user, it's sufficient
            # and rootwrap doesn't allow copy offload to run as root
            # anyways.
            self._execute(col_path,
                          src_ip,
                          dst_ip,
                          src_path,
                          dst_img_serv_path,
                          run_as_root=False,
                          check_exit_code=0)

            self._discover_file_till_timeout(dst_img_local, timeout=120)
            LOG.debug('Copied image %(img)s to tmp file %(tmp)s.', {
                'img': image_id,
                'tmp': tmp_img_file
            })
            dst_img_cache_local = os.path.join(dst_dir,
                                               'img-cache-%s' % image_id)
            if img_info['disk_format'] == 'raw':
                LOG.debug('Image is raw %s.', image_id)
                self._clone_file_dst_exists(dst_share,
                                            tmp_img_file,
                                            volume['name'],
                                            dest_exists=True)
                self._move_nfs_file(dst_img_local, dst_img_cache_local)
                LOG.debug('Copied raw image %(img)s to volume %(vol)s.', {
                    'img': image_id,
                    'vol': volume['id']
                })
            else:
                LOG.debug('Image will be converted to raw %s.', image_id)
                img_conv = six.text_type(uuid.uuid4())
                dst_img_conv_local = os.path.join(dst_dir, img_conv)

                # Checking against image size which is approximate check
                self._check_share_can_hold_size(dst_share, img_info['size'])
                try:
                    image_utils.convert_image(dst_img_local,
                                              dst_img_conv_local,
                                              'raw',
                                              run_as_root=run_as_root)
                    data = image_utils.qemu_img_info(dst_img_conv_local,
                                                     run_as_root=run_as_root)
                    if data.file_format != "raw":
                        raise exception.InvalidResults(
                            _("Converted to raw, but format is now %s.") %
                            data.file_format)
                    else:
                        self._clone_file_dst_exists(dst_share,
                                                    img_conv,
                                                    volume['name'],
                                                    dest_exists=True)
                        self._move_nfs_file(dst_img_conv_local,
                                            dst_img_cache_local)
                        LOG.debug(
                            'Copied locally converted raw image'
                            ' %(img)s to volume %(vol)s.', {
                                'img': image_id,
                                'vol': volume['id']
                            })
                finally:
                    if os.path.exists(dst_img_conv_local):
                        self._delete_file_at_path(dst_img_conv_local)
            cloned = True
        finally:
            if os.path.exists(dst_img_local):
                self._delete_file_at_path(dst_img_local)

        return cloned
示例#26
0
    def initialize_connection(self, volume, connector):
        # FIXME(shay-halsband): query the cluster index instead of using
        # the 1st one
        try:
            sys = self.req('clusters', idx=1)['content']
        except exception.NotFound:
            msg = _("XtremIO not initialized correctly, no clusters found")
            raise exception.VolumeBackendAPIException(data=msg)
        use_chap = (sys.get('chap-authentication-mode', 'disabled') !=
                    'disabled')
        discovery_chap = (sys.get('chap-discovery-mode', 'disabled') !=
                          'disabled')
        initiator = self._get_initiator(connector)
        try:
            # check if the IG already exists
            ig = self.req('initiator-groups',
                          'GET',
                          name=self._get_ig(connector))['content']
        except exception.NotFound:
            # create an initiator group to hold the the initiator
            data = {'ig-name': self._get_ig(connector)}
            self.req('initiator-groups', 'POST', data)
            try:
                ig = self.req('initiator-groups',
                              name=self._get_ig(connector))['content']
            except exception.NotFound:
                raise (exception.VolumeBackendAPIException(
                    data=_("Failed to create IG, %s") %
                    self._get_ig(connector)))
        try:
            init = self.req('initiators', 'GET', name=initiator)['content']
            if use_chap:
                chap_passwd = init['chap-authentication-initiator-' 'password']
                # delete the initiator to create a new one with password
                if not chap_passwd:
                    LOG.info(
                        _LI('initiator has no password while using chap,'
                            'removing it'))
                    self.req('initiators', 'DELETE', name=initiator)
                    # check if the initiator already exists
                    raise exception.NotFound()
        except exception.NotFound:
            # create an initiator
            data = {
                'initiator-name': initiator,
                'ig-id': initiator,
                'port-address': initiator
            }
            if use_chap:
                data['initiator-authentication-user-name'] = 'chap_user'
                chap_passwd = self._get_password()
                data['initiator-authentication-password'] = chap_passwd
            if discovery_chap:
                data['initiator-discovery-user-name'] = 'chap_user'
                data['initiator-discovery-' 'password'] = self._get_password()
            self.req('initiators', 'POST', data)
        # lun mappping
        lunmap = self.create_lun_map(volume, ig)

        properties = self._get_iscsi_properties(lunmap)

        if use_chap:
            properties['auth_method'] = 'CHAP'
            properties['auth_username'] = '******'
            properties['auth_password'] = chap_passwd

        LOG.debug('init conn params:\n%s', properties)
        return {'driver_volume_type': 'iscsi', 'data': properties}
示例#27
0
                              "id:%s."), vol_id)

=======
        except putils.ProcessExecutionError as e:
            LOG.error(_LE("Failed to create iscsi target for volume "
                          "id:%s.") % vol_id)
            LOG.error(_LE("%s") % e)
>>>>>>> refs/remotes/openstack/stable/kilo
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(_LE("Failed to create iscsi target for volume "
                          "id:%s."), vol_id)
            raise exception.NotFound()

        # We make changes persistent
        self._persist_configuration(vol_id)

        return tid

    def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
        LOG.info(_LI('Removing iscsi_target: %s'), vol_id)
        vol_uuid_name = vol_name
        iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_name)

        try:
            self._execute('cinder-rtstool',
                          'delete',
                          iqn,
示例#28
0
    def req(self,
            object_type='volumes',
            request_typ='GET',
            data=None,
            name=None,
            idx=None):
        if name and idx:
            LOG.error(_("req can't handle both name and index"))
            raise ValueError("can't handle both name and idx")

        url = '%s/%s' % (self.base_url, object_type)
        key = None
        if name:
            url = '%s?%s' % (url, urllib.urlencode({'name': name}))
            key = name
        elif idx:
            url = '%s/%d' % (url, idx)
            key = str(idx)
        if data and request_typ == 'GET':
            url + '?' + urllib.urlencode(data)
            request = urllib2.Request(url)
        elif data:
            LOG.debug('data: %s', json.dumps(data))
            request = urllib2.Request(url, json.dumps(data))
        else:
            request = urllib2.Request(url)
        LOG.debug('quering url: %s', url)
        request.get_method = lambda: request_typ
        request.add_header("Authorization", "Basic %s" % (self.base64_auth, ))
        try:
            response = urllib2.urlopen(request)
        except (urllib2.HTTPError, ) as exc:
            if exc.code == 400 and hasattr(exc, 'read'):
                error = json.load(exc)
                if error['message'].endswith('obj_not_found'):
                    LOG.warning(_("object %(key)s of type %(typ)s not found"),
                                {
                                    'key': key,
                                    'typ': object_type
                                })
                    raise exception.NotFound()
                elif error['message'] == 'vol_obj_name_not_unique':
                    LOG.error(_("can't create 2 volumes with the same name"))
                    raise (exception.InvalidVolumeMetadata(
                        'Volume by this name already exists'))
            LOG.error(_('Bad response from XMS\n%s'), exc.read())
            raise
        if response.code >= 300:
            LOG.error(_('bad API response, %s'), response.msg)
            raise exception.VolumeBackendAPIException(
                data='bad response from XMS got http code %d, %s' %
                (response.code, response.msg))
        str_result = response.read()
        if str_result:
            try:
                return json.loads(str_result)
            except Exception:
                LOG.exception(
                    _('quering %(typ)s, %(req)s failed to '
                      'parse result, return value = %(res)s'), {
                          'typ': object_type,
                          'req': request_typ,
                          'res': str_result
                      })