예제 #1
0
    def build_ini_targ_map(self, wwns, host_id, lun_id):
        engines = self.client.get_all_engines()
        LOG.debug("Get array engines: %s", engines)

        contrs, engine_id = self._get_lun_engine_contrs(engines, lun_id)

        # Check if there is already a port group in the view.
        # If yes and have already considered the engine,
        # we won't change anything about the port group and zone.
        view_name = constants.MAPPING_VIEW_PREFIX + host_id
        portg_name = constants.PORTGROUP_PREFIX + host_id
        view_id = self.client.find_mapping_view(view_name)
        portg_info = self.client.get_portgroup_by_view(view_id)
        portg_id = portg_info[0]['ID'] if portg_info else None

        init_targ_map = {}
        if portg_id:
            description = portg_info[0].get("DESCRIPTION", '')
            engines = description.replace(constants.PORTGROUP_DESCRIP_PREFIX,
                                          "")
            engines = engines.split(',')
            ports = self.client.get_fc_ports_by_portgroup(portg_id)
            if engine_id in engines:
                LOG.debug("Have already selected ports for engine %s, just "
                          "use them.", engine_id)
                return (list(ports.keys()), portg_id, init_targ_map)

        # Filter initiators and ports that connected to fabrics.
        ports_info = self._get_fc_ports_info()
        (fabric_connected_ports, fabric_connected_initiators) = (
            self._filter_by_fabric(wwns, ports_info.keys()))

        # Build a controller->ports map for convenience.
        contr_port_map = self._build_contr_port_map(fabric_connected_ports,
                                                    ports_info)
        # Get the 'best' ports for the given controllers.
        weighted_ports = self._get_weighted_ports(contr_port_map, ports_info,
                                                  contrs)
        if not weighted_ports:
            msg = _("No FC port can be used for LUN %s.") % lun_id
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        # Handle port group.
        port_list = [ports_info[port]['id'] for port in weighted_ports]

        if portg_id:
            # Add engine ID to the description of the port group.
            self.client.append_portg_desc(portg_id, engine_id)
            # Extend the weighted_ports to include the ports already in the
            # port group.
            weighted_ports.extend(list(ports.keys()))
        else:
            portg_id = self._create_new_portg(portg_name, engine_id)

        for port in port_list:
            self.client.add_port_to_portg(portg_id, port)

        for ini in fabric_connected_initiators:
            init_targ_map[ini] = weighted_ports
        LOG.debug("build_ini_targ_map: Port group name: %(portg_name)s, "
                  "init_targ_map: %(map)s.",
                  {"portg_name": portg_name,
                   "map": init_targ_map})
        return weighted_ports, portg_id, init_targ_map
예제 #2
0
파일: cloudbyte.py 프로젝트: suman-d/cinder
    def _retry_volume_operation(self, operation, retries, max_retries, jobid,
                                cb_volume):
        """CloudByte async calls via the FixedIntervalLoopingCall."""

        # Query the CloudByte storage with this jobid
        volume_response = self._queryAsyncJobResult_request(jobid)
        count = retries['count']

        result_res = None
        if volume_response is not None:
            result_res = volume_response.get('queryasyncjobresultresponse')

        if result_res is None:
            msg = (_("Null response received while querying "
                     "for [%(operation)s] based job [%(job)s] "
                     "at CloudByte storage.") % {
                         'operation': operation,
                         'job': jobid
                     })
            raise exception.VolumeBackendAPIException(data=msg)

        status = result_res.get('jobstatus')

        if status == 1:
            LOG.info(
                _LI("CloudByte operation [%(operation)s] succeeded for "
                    "volume [%(cb_volume)s]."), {
                        'operation': operation,
                        'cb_volume': cb_volume
                    })
            raise loopingcall.LoopingCallDone()
        elif status == 2:
            job_result = result_res.get("jobresult")
            err_msg = job_result.get("errortext")
            err_code = job_result.get("errorcode")
            msg = (_("Error in Operation [%(operation)s] "
                     "for volume [%(cb_volume)s] in CloudByte "
                     "storage: [%(cb_error)s], "
                     "error code: [%(error_code)s]."), {
                         'cb_error': err_msg,
                         'error_code': err_code,
                         'cb_volume': cb_volume,
                         'operation': operation
                     })
            raise exception.VolumeBackendAPIException(data=msg)
        elif count == max_retries:
            # All attempts exhausted
            LOG.error(
                _LE("CloudByte operation [%(operation)s] failed"
                    " for volume [%(vol)s]. Exhausted all"
                    " [%(max)s] attempts."), {
                        'operation': operation,
                        'vol': cb_volume,
                        'max': max_retries
                    })
            raise loopingcall.LoopingCallDone(retvalue=False)
        else:
            count += 1
            retries['count'] = count
            LOG.debug(
                "CloudByte operation [%(operation)s] for"
                " volume [%(vol)s]: retry [%(retry)s] of [%(max)s].", {
                    'operation': operation,
                    'vol': cb_volume,
                    'retry': count,
                    'max': max_retries
                })
예제 #3
0
파일: dpl_fc.py 프로젝트: openstack/cinder
    def terminate_connection(self, volume, connector, **kwargs):
        """Disallow connection from connector."""
        """
            connector = {'ip': CONF.my_ip,
                         'host': CONF.host,
                         'initiator': self._initiator,
                         'wwnns': self._fc_wwnns,
                         'wwpns': self._fc_wwpns}
        """
        lstargetWwpns = []
        lsTargets = []
        szwwpns = []
        ret = 0
        info = {'driver_volume_type': 'fibre_channel', 'data': {}}
        LOG.info(
            'terminate_connection volume: %(volume)s, '
            'connector: %(con)s', {
                'volume': volume,
                'con': connector
            })
        # Query targetwwpns.
        # Get all target list of volume.
        for dwwpn in connector['wwpns']:
            szwwpn = self._convertHex2String(dwwpn)
            if len(szwwpn) == 0:
                msg = _('Invalid wwpns format %(wwpns)s') % \
                    {'wwpns': connector['wwpns']}
                raise exception.VolumeBackendAPIException(data=msg)
            szwwpns.append(szwwpn)

        if len(szwwpns) == 0:
            ret = errno.EFAULT
            msg = _('Invalid wwpns format %(wwpns)s') % \
                {'wwpns': connector['wwpns']}
            raise exception.VolumeBackendAPIException(data=msg)
        else:
            for szwwpn in szwwpns:
                lstargetWwpns = self._get_targetwpns(
                    self._conver_uuid2hex(volume['id']), szwwpn)
                lsTargets = list(set(lsTargets + lstargetWwpns))

        # Remove all export target
        try:
            for ptarget in lsTargets:
                ret = self._delete_export_fc(volume['id'], ptarget, szwwpns)
                if ret:
                    break
        except Exception:
            ret = errno.EFAULT
        finally:
            if ret:
                msg = _('Faield to unassign %(volume)s') % (volume['id'])
                raise exception.VolumeBackendAPIException(data=msg)

        # Failed to delete export with fibre channel
        if ret:
            init_targ_map = self._build_initiator_target_map(
                connector, lsTargets)
            info['data'] = {
                'target_wwn': lsTargets,
                'initiator_target_map': init_targ_map
            }
            fczm_utils.remove_fc_zone(info)

        return info
예제 #4
0
    def _modify_replica_synchronization(self,
                                        conn,
                                        repServiceInstanceName,
                                        syncInstanceName,
                                        operation,
                                        extraSpecs,
                                        force=False):
        """Modify the relationship between the clone/snap and source volume.

        Helper function that makes an SMI-S call to break clone relationship
        between the clone volume and the source.

        :param conn: the connection to the ecom server
        :param repServiceInstanceName: instance name of the replication service
        :param syncInstanceName: instance name of the
            SE_StorageSynchronized_SV_SV object
        :param operation: operation code
        :param extraSpecs: additional info
        :param force: force to modify replication synchronization if True
        :returns: int -- return code
        :returns: job object of the replica creation operation
        :raises: VolumeBackendAPIException
        """
        startTime = time.time()

        rc, job = conn.InvokeMethod('ModifyReplicaSynchronization',
                                    repServiceInstanceName,
                                    Operation=operation,
                                    Synchronization=syncInstanceName,
                                    Force=force)

        LOG.debug(
            "_modify_replica_synchronization: %(sv)s "
            "operation: %(operation)s  Return code: %(rc)lu.", {
                'sv': syncInstanceName,
                'operation': operation,
                'rc': rc
            })

        if rc != 0:
            rc, errordesc = self.utils.wait_for_job_complete(
                conn, job, extraSpecs)
            if rc != 0:
                exceptionMessage = (
                    _("Error modify replica synchronization: %(sv)s "
                      "operation: %(operation)s. "
                      "Return code: %(rc)lu.  Error: %(error)s.") % {
                          'sv': syncInstanceName,
                          'operation': operation,
                          'rc': rc,
                          'error': errordesc
                      })
                LOG.error(exceptionMessage)
                raise exception.VolumeBackendAPIException(
                    data=exceptionMessage)

        LOG.debug(
            "InvokeMethod ModifyReplicaSynchronization "
            "took: %(delta)s H:MM:SS.",
            {'delta': self.utils.get_time_delta(startTime, time.time())})

        return rc, job
예제 #5
0
def xms_request(object_type='volumes',
                method='GET',
                data=None,
                name=None,
                idx=None,
                ver='v1'):
    if object_type == 'snapshots':
        object_type = 'volumes'

    try:
        res = xms_data[object_type]
    except KeyError:
        raise exception.VolumeDriverException
    if method == 'GET':
        if name or idx:
            return get_obj(object_type, name, idx)
        else:
            if data and data.get('full') == 1:
                filter_term = data.get('filter')
                if not filter_term:
                    entities = list(res.values())
                else:
                    field, oper, value = filter_term.split(':', 2)
                    comp = xms_filters[oper]
                    entities = [
                        o for o in res.values() if comp(o.get(field), value)
                    ]
                return {object_type: entities}
            else:
                return {
                    object_type: [{
                        "href":
                        "/%s/%d" % (object_type, obj['index']),
                        "name":
                        obj.get('name')
                    } for obj in res.values()]
                }
    elif method == 'POST':
        data = fix_data(data, object_type)
        name_key = get_xms_obj_key(data)
        try:
            if name_key and get_xms_obj_by_name(object_type, data[name_key]):
                raise (exception.VolumeBackendAPIException(
                    'Volume by this name already exists'))
        except exception.NotFound:
            pass
        data['index'] = len(xms_data[object_type]) + 1
        xms_data[object_type][data['index']] = data
        # find the name key
        if name_key:
            data['name'] = data[name_key]
        if object_type == 'lun-maps':
            data['ig-name'] = data['ig-id']

        return {
            "links": [{
                "href":
                "/%s/%d" % (object_type, data[typ2id[object_type]][2])
            }]
        }
    elif method == 'DELETE':
        if object_type == 'consistency-group-volumes':
            data = [
                cgv for cgv in xms_data['consistency-group-volumes'].values()
                if cgv['vol-id'] == data['vol-id']
                and cgv['cg-id'] == data['cg-id']
            ][0]
        else:
            data = get_obj(object_type, name, idx)['content']
        if data:
            del xms_data[object_type][data['index']]
        else:
            raise exception.NotFound()
    elif method == 'PUT':
        obj = get_obj(object_type, name, idx)['content']
        data = fix_data(data, object_type)
        del data['index']
        obj.update(data)
예제 #6
0
 def local_path(self, volume):
     number = self._get_nbd_number(volume)
     if number == -1:
         msg = _('No NBD device for volume %s') % volume['name']
         raise exception.VolumeBackendAPIException(data=msg)
     return self._get_symlink_path(number)
예제 #7
0
    def _map_delete_host(self, map_group_name):

        map_grp_info = {'cMapGrpName': map_group_name}
        ret = self._call_method('GetMapGrpInfo', map_grp_info)
        if ret['returncode'] != zte_pub.ZTE_SUCCESS:
            err_msg = (_('_map_delete_host:get map group info failed. '
                         'group name:%(name)s with Return code: %(ret)s.') % {
                             'name': map_group_name,
                             'ret': ret['returncode']
                         })
            raise exception.VolumeBackendAPIException(data=err_msg)

        sdwhostnum = ret['data']['sdwHostNum']

        if sdwhostnum > 0:
            thostinfo = ret['data']['tHostInfo']
            for hostindex in range(0, int(sdwhostnum)):
                initiator_name = thostinfo[hostindex]['ucHostName']
                host_in_grp = {
                    'ucInitName': initiator_name,
                    'cMapGrpName': map_group_name
                }
                ret = self._call_method('DelHostFromGrp', host_in_grp)
                if ret['returncode'] == zte_pub.ZTE_ERR_GROUP_NOT_EXIST:
                    continue
                if ret['returncode'] not in [
                        zte_pub.ZTE_SUCCESS, zte_pub.ZTE_ERR_HOST_NOT_EXIST
                ]:
                    msg = _('delete host from group failed. ')
                    raise exception.VolumeDriverException(message=msg)

                ret = self._call_method('GetHost',
                                        {"cHostAlias": initiator_name})
                if ret['returncode'] != zte_pub.ZTE_SUCCESS:
                    err_msg = (_('_map_delete_host:get host info failed. '
                                 'host name:%(name)s with Return code: '
                                 '%(ret)s.') % {
                                     'name': initiator_name,
                                     'ret': ret['returncode']
                                 })
                    raise exception.VolumeBackendAPIException(data=err_msg)

                return_data = ret['data']
                portnum = return_data['sdwPortNum']
                for portindex in range(0, int(portnum)):
                    port_host_info = {}
                    port_info = return_data['tPort']
                    port_name = port_info[portindex]['cPortName']
                    port_host_info['cPortName'] = port_name
                    port_host_info['cHostAlias'] = initiator_name

                    ret = self._call_method('DelPortFromHost', port_host_info)
                    if ret['returncode'] != zte_pub.ZTE_SUCCESS:
                        err_msg = (_('delete port from host failed. '
                                     'host name:%(name)s, port name:%(port)s '
                                     'with Return code: %(ret)s.') % {
                                         'name': initiator_name,
                                         'port': port_name,
                                         'ret': ret['returncode']
                                     })
                        raise exception.VolumeBackendAPIException(data=err_msg)

                ret = self._call_method('DelHost',
                                        {"cHostAlias": initiator_name})
                if (ret['returncode'] not in [
                        zte_pub.ZTE_SUCCESS, zte_pub.ZTE_ERR_HOSTNAME_NOT_EXIST
                ]):
                    err_msg = (_('_map_delete_host: delete host failed. '
                                 'host name:%(name)s with Return code: '
                                 '%(ret)s') % {
                                     'name': initiator_name,
                                     'ret': ret['returncode']
                                 })
                    raise exception.VolumeBackendAPIException(data=err_msg)
예제 #8
0
    def smis_get_iscsi_properties(self, volume, connector):
        """Gets iscsi configuration.

        We ideally get saved information in the volume entity, but fall back
        to discovery if need be. Discovery may be completely removed in future
        The properties are:
        :target_discovered:    boolean indicating whether discovery was used
        :target_iqn:    the IQN of the iSCSI target
        :target_portal:    the portal of the iSCSI target
        :target_lun:    the lun of the iSCSI target
        :volume_id:    the UUID of the volume
        :auth_method:, :auth_username:, :auth_password:
            the authentication details. Right now, either auth_method is not
            present meaning no authentication, or auth_method == `CHAP`
            meaning use CHAP with the specified credentials.
        """
        properties = {}

        location = self.smis_do_iscsi_discovery(volume)
        if not location:
            raise exception.InvalidVolume(_("Could not find iSCSI export "
                                          " for volume %(volumeName)s")
                                          % {'volumeName': volume['name']})

        LOG.debug("ISCSI Discovery: Found %s" % (location))
        properties['target_discovered'] = True

        device_info = self.common.find_device_number(volume, connector)

        if device_info is None or device_info['hostlunid'] is None:
            exception_message = (_("Cannot find device number for volume "
                                 "%(volumeName)s")
                                 % {'volumeName': volume['name']})
            raise exception.VolumeBackendAPIException(data=exception_message)

        device_number = device_info['hostlunid']

        LOG.info(_LI(
            "location is: %(location)s") % {'location': location})

        for loc in location:
            results = loc.split(" ")
            properties['target_portal'] = results[0].split(",")[0]
            properties['target_iqn'] = results[1]

        properties['target_lun'] = device_number

        properties['volume_id'] = volume['id']

        LOG.info(_LI("ISCSI properties: %(properties)s")
                 % {'properties': properties})
        LOG.info(_LI("ISCSI volume is: %(volume)s")
                 % {'volume': volume})

        if 'provider_auth' in volume:
            auth = volume['provider_auth']
            LOG.info(_LI("AUTH properties: %(authProps)s")
                     % {'authProps': auth})

            if auth is not None:
                (auth_method, auth_username, auth_secret) = auth.split()

                properties['auth_method'] = auth_method
                properties['auth_username'] = auth_username
                properties['auth_password'] = auth_secret

                LOG.info(_LI("AUTH properties: %s") % (properties))

        return properties
예제 #9
0
    def _delete_back_recursively(self, opvname, opsname):
        """Deletes snapshot by removing its oldest removable parent

        Checks if source volume for this snapshot is hidden:
        If it is hidden and have no other descenents, it calls itself on its
            source snapshot if such exists, or deletes it
        If it is not hidden, trigers delete for snapshot

        :param ovname: origin phisical volume name
        :param osname: origin phisical snapshot name
        """

        if jcom.is_hidden(opvname):
            # Resource is hidden
            snaps = []
            try:
                snaps = self.ra.get_snapshots(opvname)
            except jexc.JDSSResourceNotFoundException:
                LOG.debug('Unable to get physical snapshots related to'
                          ' physical volume %s, volume do not exist',
                          opvname)
                return
            except jexc.JDSSException as err:
                raise exception.VolumeBackendAPIException(err)

            snaps = self._clean_garbage_snapshots(opvname, snaps)

            if len(snaps) > 1:
                # opvname has active snapshots and cant be deleted
                # that is why we delete branch related to opsname
                try:
                    self.ra.delete_snapshot(opvname,
                                            opsname,
                                            recursively_children=True,
                                            recursively_dependents=True,
                                            force_umount=True)
                except jexc.JDSSException as err:
                    raise exception.VolumeBackendAPIException(err)
            else:
                vol = None
                try:
                    vol = self.ra.get_lun(opvname)

                except jexc.JDSSResourceNotFoundException:
                    LOG.debug('volume %s does not exist, it was already'
                              'deleted.', opvname)
                    return
                except jexc.JDSSException as err:
                    raise exception.VolumeBackendAPIException(err)

                if vol['is_clone']:
                    self._delete_back_recursively(
                        jcom.origin_volume(vol['origin']),
                        jcom.origin_snapshot(vol['origin']))
                else:
                    try:
                        self.ra.delete_lun(opvname,
                                           recursively_children=True,
                                           recursively_dependents=True,
                                           force_umount=True)
                    except jexc.JDSSResourceNotFoundException:
                        LOG.debug('volume %s does not exist, it was already'
                                  'deleted.', opvname)
                        return
                    except jexc.JDSSException as err:
                        raise exception.VolumeBackendAPIException(err)
        else:
            # Resource is active
            try:
                self.ra.delete_snapshot(opvname,
                                        opsname,
                                        recursively_children=True,
                                        recursively_dependents=True,
                                        force_umount=True)
            except jexc.JDSSException as err:
                raise exception.VolumeBackendAPIException(err)
예제 #10
0
    def attach_volume_to_node(self, volume_url, node_url):
        LOG.info('Trying attach from node %s to volume %s', node_url,
                 volume_url)
        try:
            volume = self._get_volume(volume_url)
            node = self._get_node(node_url)
            if len(volume.links.endpoints) > 0:
                raise exception.ValidationError(
                    detail=(_("Volume %s already attached") % volume_url))

            node.attach_endpoint(volume.path)
        except sushy_exceptions.InvalidParameterValueError:
            LOG.exception("Attach volume failed (not allowable)")
            raise RSDRetryableException(reason=(_("Not allowed to attach from "
                                                  "%(node)s to %(volume)s.") %
                                                {
                                                    'node': node_url,
                                                    'volume': volume_url
                                                }))
        except Exception:
            LOG.exception("Attach volume failed (attach phase)")
            raise exception.VolumeBackendAPIException(
                data=(_("Attach failed from %(node)s to %(volume)s.") % {
                    'node': node_url,
                    'volume': volume_url
                }))
        try:
            volume.refresh()
            node.refresh()

            v_endpoints = volume.links.endpoints
            v_endpoints = self._get_nqn_endpoints(v_endpoints)
            if len(v_endpoints) != 1:
                raise exception.ValidationError(
                    detail=(_("Attach volume error: %d target nqns") %
                            len(v_endpoints)))
            target_nqn, v_endpoint = v_endpoints[0]
            ip_transports = v_endpoint["IPTransportDetails"]
            if len(ip_transports) != 1:
                raise exception.ValidationError(
                    detail=(_("Attach volume error: %d target ips") %
                            len(ip_transports)))
            ip_transport = ip_transports[0]
            target_ip = ip_transport["IPv4Address"]["Address"]
            target_port = ip_transport["Port"]

            node_system = self.rsdlib.get_system(node.links.computer_system)
            n_endpoints = tuple(
                val["@odata.id"]
                for val in node_system.json["Links"]["Endpoints"])
            n_endpoints = self._get_nqn_endpoints(n_endpoints)
            if len(n_endpoints) == 0:
                raise exception.ValidationError(
                    detail=(_("Attach volume error: %d host nqns") %
                            len(n_endpoints)))
            host_nqn, v_endpoint = n_endpoints[0]

            LOG.info(
                'Attachment successful: Retrieved target IP %s, '
                'target Port %s, target NQN %s and initiator NQN %s',
                target_ip, target_port, target_nqn, host_nqn)
            return (target_ip, target_port, target_nqn, host_nqn)
        except Exception as e:
            LOG.exception("Attach volume failed (post-attach)")
            try:
                node.refresh()
                node.detach_endpoint(volume.path)
                LOG.info('Detached from node %s to volume %s', node_url,
                         volume_url)
            except Exception:
                LOG.exception("Attach volume failed (undo attach)")
                raise exception.VolumeBackendAPIException(data=(
                    _("Undo-attach failed from %(node)s to %(volume)s.") % {
                        'node': node_url,
                        'volume': volume_url
                    }))
            if isinstance(e, exception.ValidationError):
                raise RSDRetryableException(
                    reason=(_("Validation error during post-attach from "
                              "%(node)s to %(volume)s.") % {
                                  'node': node_url,
                                  'volume': volume_url
                              }))
            else:
                raise exception.VolumeBackendAPIException(data=(
                    _("Post-attach failed from %(node)s to %(volume)s.") % {
                        'node': node_url,
                        'volume': volume_url
                    }))
    def _ensure_snapshot_resource_area(self, volume_id):
        """Make sure concerto snapshot resource area exists on volume.

        :param volume_id:  Cinder volume ID corresponding to the backend LUN

        Exceptions:
            VolumeBackendAPIException: if cinder volume does not exist
               on backnd, or SRA could not be created.
        """

        ctxt = context.get_admin_context()
        volume = api.volume_get(ctxt, volume_id)
        pool = None
        if not volume:
            msg = (_("Failed to ensure snapshot resource area, could not "
                   "locate volume for id %s") % volume_id)
            raise exception.VolumeBackendAPIException(data=msg)

        if not self.vmem_mg.snapshot.lun_has_a_snapshot_resource(
           lun=volume_id):
            # Per Concerto documentation, the SRA size should be computed
            # as follows
            #  Size-of-original-LUN        Reserve for SRA
            #   < 500MB                    100%
            #   500MB to 2G                50%
            #   >= 2G                      20%
            # Note: cinder volume.size is in GB, vmemclient wants MB.
            lun_size_mb = volume['size'] * units.Ki
            if lun_size_mb < 500:
                snap_size_mb = lun_size_mb
            elif lun_size_mb < 2000:
                snap_size_mb = 0.5 * lun_size_mb
            else:
                snap_size_mb = 0.2 * lun_size_mb

            snap_size_mb = int(math.ceil(snap_size_mb))
            typeid = volume['volume_type_id']
            if typeid:
                pool = self._get_violin_extra_spec(volume, "storage_pool")

            LOG.debug("Creating SRA of %(ssmb)sMB for lun of %(lsmb)sMB "
                      "on %(vol_id)s.",
                      {'ssmb': snap_size_mb,
                       'lsmb': lun_size_mb,
                       'vol_id': volume_id})

            res = self.vmem_mg.snapshot.create_snapshot_resource(
                lun=volume_id,
                size=snap_size_mb,
                enable_notification=False,
                policy=CONCERTO_DEFAULT_SRA_POLICY,
                enable_expansion=CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION,
                expansion_threshold=CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD,
                expansion_increment=CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT,
                expansion_max_size=CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
                enable_shrink=CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
                storage_pool=pool)

            if (not res['success']):
                msg = (_("Failed to create snapshot resource area on "
                       "volume %(vol)s: %(res)s.") %
                       {'vol': volume_id, 'res': res['msg']})
                raise exception.VolumeBackendAPIException(data=msg)
예제 #12
0
파일: lvm.py 프로젝트: suman-d/cinder
    def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
        """Optimize the migration if the destination is on the same server.

        If the specified host is another back-end on the same server, and
        the volume is not attached, we can do the migration locally without
        going through iSCSI.
        """

        false_ret = (False, None)
        if volume['status'] != 'available':
            return false_ret
        if 'location_info' not in host['capabilities']:
            return false_ret
        info = host['capabilities']['location_info']
        try:
            (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
                info.split(':')
            lvm_mirrors = int(lvm_mirrors)
        except ValueError:
            return false_ret
        if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
            return false_ret

        if dest_vg == self.vg.vg_name:
            message = (_("Refusing to migrate volume ID: %(id)s. Please "
                         "check your configuration because source and "
                         "destination are the same Volume Group: %(name)s.") %
                       {'id': volume['id'], 'name': self.vg.vg_name})
            LOG.error(message)
            raise exception.VolumeBackendAPIException(data=message)

        vg_list = volutils.get_all_volume_groups()
        try:
            next(vg for vg in vg_list if vg['name'] == dest_vg)
        except StopIteration:
            LOG.error(_LE("Destination Volume Group %s does not exist"),
                      dest_vg)
            return false_ret

        helper = utils.get_root_helper()

        lvm_conf_file = self.configuration.lvm_conf_file
        if lvm_conf_file.lower() == 'none':
            lvm_conf_file = None

        dest_vg_ref = lvm.LVM(dest_vg, helper,
                              lvm_type=lvm_type,
                              executor=self._execute,
                              lvm_conf=lvm_conf_file)

        self._create_volume(volume['name'],
                            self._sizestr(volume['size']),
                            lvm_type,
                            lvm_mirrors,
                            dest_vg_ref)
        # copy_volume expects sizes in MiB, we store integer GiB
        # be sure to convert before passing in
        size_in_mb = int(volume['size']) * units.Ki
        try:
            volutils.copy_volume(self.local_path(volume),
                                 self.local_path(volume, vg=dest_vg),
                                 size_in_mb,
                                 self.configuration.volume_dd_blocksize,
                                 execute=self._execute,
                                 sparse=self._sparse_copy_volume)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Volume migration failed due to "
                              "exception: %(reason)s."),
                          {'reason': six.text_type(e)}, resource=volume)
                dest_vg_ref.delete(volume)
        self._delete_volume(volume)
        return (True, None)
예제 #13
0
파일: lvm.py 프로젝트: suman-d/cinder
    def check_for_setup_error(self):
        """Verify that requirements are in place to use LVM driver."""
        if self.vg is None:
            root_helper = utils.get_root_helper()

            lvm_conf_file = self.configuration.lvm_conf_file
            if lvm_conf_file.lower() == 'none':
                lvm_conf_file = None

            try:
                self.vg = lvm.LVM(self.configuration.volume_group,
                                  root_helper,
                                  lvm_type=self.configuration.lvm_type,
                                  executor=self._execute,
                                  lvm_conf=lvm_conf_file)

            except exception.VolumeGroupNotFound:
                message = (_("Volume Group %s does not exist") %
                           self.configuration.volume_group)
                raise exception.VolumeBackendAPIException(data=message)

        vg_list = volutils.get_all_volume_groups(
            self.configuration.volume_group)
        vg_dict = \
            next(vg for vg in vg_list if vg['name'] == self.vg.vg_name)
        if vg_dict is None:
            message = (_("Volume Group %s does not exist") %
                       self.configuration.volume_group)
            raise exception.VolumeBackendAPIException(data=message)

        pool_name = "%s-pool" % self.configuration.volume_group

        if self.configuration.lvm_type == 'auto':
            # Default to thin provisioning if it is supported and
            # the volume group is empty, or contains a thin pool
            # for us to use.
            self.vg.update_volume_group_info()

            self.configuration.lvm_type = 'default'

            if volutils.supports_thin_provisioning():
                if self.vg.get_volume(pool_name) is not None:
                    LOG.info(_LI('Enabling LVM thin provisioning by default '
                                 'because a thin pool exists.'))
                    self.configuration.lvm_type = 'thin'
                elif len(self.vg.get_volumes()) == 0:
                    LOG.info(_LI('Enabling LVM thin provisioning by default '
                                 'because no LVs exist.'))
                    self.configuration.lvm_type = 'thin'

        if self.configuration.lvm_type == 'thin':
            # Specific checks for using Thin provisioned LV's
            if not volutils.supports_thin_provisioning():
                message = _("Thin provisioning not supported "
                            "on this version of LVM.")
                raise exception.VolumeBackendAPIException(data=message)

            if self.vg.get_volume(pool_name) is None:
                try:
                    self.vg.create_thin_pool(pool_name)
                except processutils.ProcessExecutionError as exc:
                    exception_message = (_("Failed to create thin pool, "
                                           "error message was: %s")
                                         % six.text_type(exc.stderr))
                    raise exception.VolumeBackendAPIException(
                        data=exception_message)

            # Enable sparse copy since lvm_type is 'thin'
            self._sparse_copy_volume = True
    def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
        """Optimize the migration if the destination is on the same server.

        If the specified host is another back-end on the same server, and
        the volume is not attached, we can do the migration locally without
        going through iSCSI.
        """

        false_ret = (False, None)
        if volume['status'] != 'available':
            return false_ret
        if 'location_info' not in host['capabilities']:
            return false_ret
        info = host['capabilities']['location_info']
        try:
            (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
                info.split(':')
            lvm_mirrors = int(lvm_mirrors)
        except ValueError:
            return false_ret
        if (dest_type != 'IOArbLVMVolumeDriver' or dest_hostname != self.hostname):
            return false_ret

        if dest_vg != self.vg.vg_name:
            vg_list = volutils.get_all_volume_groups()
            try:
                (vg for vg in vg_list if vg['name'] == dest_vg).next()
            except StopIteration:
                message = (_LE("Destination Volume Group %s does not exist") %
                           dest_vg)
                LOG.error(message)
                return false_ret

            helper = utils.get_root_helper()

            lvm_conf_file = self.configuration.lvm_conf_file
            if lvm_conf_file.lower() == 'none':
                lvm_conf_file = None

            dest_vg_ref = lvm.LVM(dest_vg, helper,
                                  lvm_type=lvm_type,
                                  executor=self._execute,
                                  lvm_conf=lvm_conf_file)

            self.remove_export(ctxt, volume)
            self._create_volume(volume['name'],
                                self._sizestr(volume['size']),
                                lvm_type,
                                lvm_mirrors,
                                dest_vg_ref)

            volutils.copy_volume(self.local_path(volume),
                                 self.local_path(volume, vg=dest_vg),
                                 volume['size'],
                                 self.configuration.volume_dd_blocksize,
                                 execute=self._execute)
            self._delete_volume(volume)
            model_update = self.create_export(ctxt, volume, vg=dest_vg)

            return (True, model_update)
        else:
            message = (_("Refusing to migrate volume ID: %(id)s. Please "
                         "check your configuration because source and "
                         "destination are the same Volume Group: %(name)s."),
                       {'id': volume['id'], 'name': self.vg.vg_name})
            LOG.exception(message)
            raise exception.VolumeBackendAPIException(data=message)
예제 #15
0
    def InvokeMethod(self,
                     MethodName,
                     Service,
                     ElementName=None,
                     InPool=None,
                     ElementType=None,
                     TheElement=None,
                     LUNames=None,
                     Size=None,
                     Type=None,
                     Mode=None,
                     Locality=None,
                     InitiatorPortIDs=None,
                     TargetPortIDs=None,
                     DeviceAccesses=None,
                     SyncType=None,
                     SourceElement=None,
                     TargetElement=None,
                     Operation=None,
                     CopyType=None,
                     Synchronization=None,
                     ProtocolControllers=None,
                     TargetPool=None):
        global MAP_STAT, VOL_STAT
        if MethodName == 'CreateOrModifyElementFromStoragePool':
            VOL_STAT = '1'
            rc = 0
            vol = self._enum_volumes()
            if InPool.get('InstanceID') == 'FUJITSU:RSP0005':
                job = {'TheElement': vol[1].path}
            else:
                job = {'TheElement': vol[0].path}
        elif MethodName == 'ReturnToStoragePool':
            VOL_STAT = '0'
            rc = 0
            job = {}
        elif MethodName == 'GetReplicationRelationships':
            rc = 0
            job = {'Synchronizations': []}
        elif MethodName == 'ExposePaths':
            MAP_STAT = '1'
            rc = 0
            job = {}
        elif MethodName == 'HidePaths':
            MAP_STAT = '0'
            rc = 0
            job = {}
        elif MethodName == 'CreateElementReplica':
            rc = 0
            snap = self._enum_snapshots()
            job = {'TargetElement': snap[0].path}
        elif MethodName == 'CreateReplica':
            rc = 0
            snap = self._enum_snapshots()
            job = {'TargetElement': snap[0].path}
        elif MethodName == 'ModifyReplicaSynchronization':
            rc = 0
            job = {}
        else:
            raise exception.VolumeBackendAPIException(data="invoke method")

        return (rc, job)
예제 #16
0
class TestMisc(scaleio.TestScaleIODriver):
    DOMAIN_NAME = 'PD1'
    POOL_NAME = 'SP1'
    STORAGE_POOLS = ['{}:{}'.format(DOMAIN_NAME, POOL_NAME)]

    def setUp(self):
        """Set up the test case environment.

        Defines the mock HTTPS responses for the REST API calls.
        """
        super(TestMisc, self).setUp()
        self.domain_name_enc = urllib.parse.quote(self.DOMAIN_NAME)
        self.pool_name_enc = urllib.parse.quote(self.POOL_NAME)
        self.ctx = context.RequestContext('fake', 'fake', auth_token=True)

        self.volume = fake_volume.fake_volume_obj(
            self.ctx, **{'name': 'vol1', 'provider_id': fake.PROVIDER_ID}
        )
        self.new_volume = fake_volume.fake_volume_obj(
            self.ctx, **{'name': 'vol2', 'provider_id': fake.PROVIDER2_ID}
        )

        self.HTTPS_MOCK_RESPONSES = {
            self.RESPONSE_MODE.Valid: {
                'types/Domain/instances/getByName::' +
                self.domain_name_enc: '"{}"'.format(self.DOMAIN_NAME).encode(
                    'ascii',
                    'ignore'
                ),
                'types/Pool/instances/getByName::{},{}'.format(
                    self.DOMAIN_NAME,
                    self.POOL_NAME
                ): '"{}"'.format(self.POOL_NAME).encode('ascii', 'ignore'),
                'types/StoragePool/instances/action/querySelectedStatistics': {
                    '"{}"'.format(self.POOL_NAME): {
                        'capacityAvailableForVolumeAllocationInKb': 5000000,
                        'capacityLimitInKb': 16000000,
                        'spareCapacityInKb': 6000000,
                        'thickCapacityInUseInKb': 266,
                        'thinCapacityAllocatedInKm': 0,
                        'snapCapacityInUseInKb': 266,
                    },
                },
                'instances/Volume::{}/action/setVolumeName'.format(
                    self.volume['provider_id']):
                        self.new_volume['provider_id'],
                'instances/Volume::{}/action/setVolumeName'.format(
                    self.new_volume['provider_id']):
                        self.volume['provider_id'],
                'version': '"{}"'.format('2.0.1'),
                'instances/StoragePool::{}'.format(
                    "test_pool"
                ): {
                    'name': 'test_pool',
                    'protectionDomainId': 'test_domain',
                },
                'instances/ProtectionDomain::{}'.format(
                    "test_domain"
                ): {
                    'name': 'test_domain',
                },
            },
            self.RESPONSE_MODE.BadStatus: {
                'types/Domain/instances/getByName::' +
                self.domain_name_enc: self.BAD_STATUS_RESPONSE,
                'instances/Volume::{}/action/setVolumeName'.format(
                    self.volume['provider_id']): mocks.MockHTTPSResponse(
                    {
                        'message': 'Invalid volume.',
                        'httpStatusCode': 400,
                        'errorCode': self.VOLUME_NOT_FOUND_ERROR
                    }, 400),
            },
            self.RESPONSE_MODE.Invalid: {
                'types/Domain/instances/getByName::' +
                self.domain_name_enc: None,
                'instances/Volume::{}/action/setVolumeName'.format(
                    self.volume['provider_id']): mocks.MockHTTPSResponse(
                    {
                        'message': 'Invalid volume.',
                        'httpStatusCode': 400,
                        'errorCode': 0
                    }, 400),
            },
        }

    def test_valid_configuration(self):
        self.driver.check_for_setup_error()

    def test_both_storage_pool(self):
        """Both storage name and ID provided.

        INVALID
        """
        self.driver.configuration.sio_storage_pool_id = "test_pool_id"
        self.driver.configuration.sio_storage_pool_name = "test_pool_name"
        self.assertRaises(exception.InvalidInput,
                          self.driver.check_for_setup_error)

    def test_no_storage_pool(self):
        """No storage name or ID provided.

        VALID as storage_pools are defined
        """
        self.driver.configuration.sio_storage_pool_name = None
        self.driver.configuration.sio_storage_pool_id = None
        self.driver.check_for_setup_error()

    def test_both_domain(self):
        """Both domain and ID are provided

        INVALID
        """
        self.driver.configuration.sio_protection_domain_name = (
            "test_domain_name")
        self.driver.configuration.sio_protection_domain_id = (
            "test_domain_id")
        self.assertRaises(exception.InvalidInput,
                          self.driver.check_for_setup_error)

    def test_no_storage_pools(self):
        """No storage pools.

        VALID as domain and storage pool names are provided
        """
        self.driver.storage_pools = None
        self.driver.check_for_setup_error()

    def test_volume_size_round_true(self):
        self.driver._check_volume_size(1)

    def test_volume_size_round_false(self):
        self.override_config('sio_round_volume_capacity', False,
                             configuration.SHARED_CONF_GROUP)
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver._check_volume_size, 1)

    def test_get_volume_stats_bad_status(self):
        self.driver.storage_pools = self.STORAGE_POOLS
        self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.get_volume_stats, True)

    def test_get_volume_stats_invalid_domain(self):
        self.driver.storage_pools = self.STORAGE_POOLS
        self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
        self.assertRaises(exception.VolumeBackendAPIException,
                          self.driver.get_volume_stats, True)

    def test_get_volume_stats(self):
        self.driver.storage_pools = self.STORAGE_POOLS
        self.driver.get_volume_stats(True)

    def _setup_valid_variant_property(self, property):
        """Setup valid response that returns a variety of property name

        """
        self.HTTPS_MOCK_RESPONSES = {
            self.RESPONSE_MODE.ValidVariant: {
                'types/Domain/instances/getByName::' +
                self.domain_name_enc: '"{}"'.format(self.DOMAIN_NAME).encode(
                    'ascii',
                    'ignore'
                ),
                'types/Pool/instances/getByName::{},{}'.format(
                    self.DOMAIN_NAME,
                    self.POOL_NAME
                ): '"{}"'.format(self.POOL_NAME).encode('ascii', 'ignore'),
                'types/StoragePool/instances/action/querySelectedStatistics': {
                    '"{}"'.format(self.POOL_NAME): {
                        'capacityAvailableForVolumeAllocationInKb': 5000000,
                        'capacityLimitInKb': 16000000,
                        'spareCapacityInKb': 6000000,
                        'thickCapacityInUseInKb': 266,
                        'snapCapacityInUseInKb': 266,
                        property: 0,
                    },
                },
                'instances/Volume::{}/action/setVolumeName'.format(
                    self.volume['provider_id']):
                        self.new_volume['provider_id'],
                'instances/Volume::{}/action/setVolumeName'.format(
                    self.new_volume['provider_id']):
                        self.volume['provider_id'],
                'version': '"{}"'.format('2.0.1'),
                'instances/StoragePool::{}'.format(
                    self.STORAGE_POOL_NAME
                ): '"{}"'.format(self.STORAGE_POOL_ID),
            }
        }

    def test_get_volume_stats_with_varying_properties(self):
        """Test getting volume stats with various property names

        In SIO 3.0, a property was renamed.
        The change is backwards compatible for now but this tests
        ensures that the driver is tolerant of that change
        """
        self.driver.storage_pools = self.STORAGE_POOLS
        self._setup_valid_variant_property("thinCapacityAllocatedInKb")
        self.set_https_response_mode(self.RESPONSE_MODE.ValidVariant)
        self.driver.get_volume_stats(True)
        self._setup_valid_variant_property("nonexistentProperty")
        self.set_https_response_mode(self.RESPONSE_MODE.ValidVariant)
        self.driver.get_volume_stats(True)

    @mock.patch(
        'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver.'
        '_rename_volume',
        return_value=None)
    def test_update_migrated_volume(self, mock_rename):
        test_vol = self.driver.update_migrated_volume(
            self.ctx, self.volume, self.new_volume, 'available')
        mock_rename.assert_called_with(self.new_volume, self.volume['id'])
        self.assertEqual({'_name_id': None, 'provider_location': None},
                         test_vol)

    @mock.patch(
        'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver.'
        '_rename_volume',
        return_value=None)
    def test_update_unavailable_migrated_volume(self, mock_rename):
        test_vol = self.driver.update_migrated_volume(
            self.ctx, self.volume, self.new_volume, 'unavailable')
        self.assertFalse(mock_rename.called)
        self.assertEqual({'_name_id': fake.VOLUME_ID,
                          'provider_location': None},
                         test_vol)

    @mock.patch(
        'cinder.volume.drivers.dell_emc.scaleio.driver.ScaleIODriver.'
        '_rename_volume',
        side_effect=exception.VolumeBackendAPIException(data='Error!'))
    def test_fail_update_migrated_volume(self, mock_rename):
        self.assertRaises(
            exception.VolumeBackendAPIException,
            self.driver.update_migrated_volume,
            self.ctx,
            self.volume,
            self.new_volume,
            'available'
        )
        mock_rename.assert_called_with(self.volume, "ff" + self.volume['id'])

    def test_rename_volume(self):
        rc = self.driver._rename_volume(
            self.volume, self.new_volume['id'])
        self.assertIsNone(rc)

    def test_rename_volume_illegal_syntax(self):
        self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
        rc = self.driver._rename_volume(
            self.volume, self.new_volume['id'])
        self.assertIsNone(rc)

    def test_rename_volume_non_sio(self):
        self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
        rc = self.driver._rename_volume(
            self.volume, self.new_volume['id'])
        self.assertIsNone(rc)

    def test_default_provisioning_type_unspecified(self):
        empty_storage_type = {}
        self.assertEqual(
            'thin',
            self.driver._find_provisioning_type(empty_storage_type))

    @ddt.data((True, 'thin'), (False, 'thick'))
    @ddt.unpack
    def test_default_provisioning_type_thin(self, config_provisioning_type,
                                            expected_provisioning_type):
        self.override_config('san_thin_provision', config_provisioning_type,
                             configuration.SHARED_CONF_GROUP)
        self.driver = mocks.ScaleIODriver(configuration=self.configuration)
        empty_storage_type = {}
        self.assertEqual(
            expected_provisioning_type,
            self.driver._find_provisioning_type(empty_storage_type))
예제 #17
0
    def connect_volume_fc(self, volume, connector):
        """Create map between a volume and a host for FC."""
        wwns = connector['wwpns']
        LOG.info(_LI(
            'initialize_connection_fc, initiator: %(wwpns)s,'
            'volume id: %(id)s.'),
            {'wwpns': wwns,
             'id': volume.id})

        lun_id, _ = huawei_utils.get_volume_lun_id(self.rmt_client, volume)
        if not lun_id:
            msg = _("Can't get volume id. Volume name: %s.") % volume.id
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        original_host_name = connector['host']

        # Create hostgroup if not exist.
        host_id = self.rmt_client.add_host_with_check(original_host_name)

        online_wwns_in_host = (
            self.rmt_client.get_host_online_fc_initiators(host_id))
        online_free_wwns = self.rmt_client.get_online_free_wwns()
        fc_initiators_on_array = self.rmt_client.get_fc_initiator_on_array()
        wwns = [i for i in wwns if i in fc_initiators_on_array]
        for wwn in wwns:
            if (wwn not in online_wwns_in_host
                    and wwn not in online_free_wwns):
                wwns_in_host = (
                    self.rmt_client.get_host_fc_initiators(host_id))
                iqns_in_host = (
                    self.rmt_client.get_host_iscsi_initiators(host_id))
                if not (wwns_in_host or iqns_in_host):
                    self.rmt_client.remove_host(host_id)

                msg = _('Can not add FC port to host.')
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

        for wwn in wwns:
            self.rmt_client.ensure_fc_initiator_added(wwn, host_id,
                                                      connector['host'])

        (tgt_port_wwns, init_targ_map) = (
            self.rmt_client.get_init_targ_map(wwns))

        # Add host into hostgroup.
        hostgroup_id = self.rmt_client.add_host_to_hostgroup(host_id)
        map_info = self.rmt_client.do_mapping(lun_id, hostgroup_id, host_id,
                                              hypermetro_lun=True)
        if not map_info:
            msg = _('Map info is None due to array version '
                    'not supporting hypermetro.')
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        host_lun_id = self.rmt_client.get_host_lun_id(host_id, lun_id)

        # Return FC properties.
        fc_info = {'driver_volume_type': 'fibre_channel',
                   'data': {'target_lun': int(host_lun_id),
                            'target_discovered': True,
                            'target_wwn': tgt_port_wwns,
                            'volume_id': volume.id,
                            'initiator_target_map': init_targ_map,
                            'map_info': map_info},
                   }

        LOG.info(_LI('Remote return FC info is: %s.'), fc_info)

        return fc_info
예제 #18
0
    def initialize_connection(self, volume, connector):
        """Allow connection to connector and return connection info."""
        properties = {}
        properties['target_lun'] = None
        properties['target_discovered'] = True
        properties['target_portal'] = ''
        properties['target_iqn'] = None
        properties['volume_id'] = volume['id']

        dpl_server = self.configuration.san_ip
        dpl_iscsi_port = self.configuration.iscsi_port
        ret, output = self.dpl.assign_vdev(
            self._conver_uuid2hex(volume['id']),
            connector['initiator'].lower(), volume['id'],
            '%s:%d' % (dpl_server, dpl_iscsi_port), 0)

        if ret == errno.EAGAIN:
            ret, event_uuid = self._get_event_uuid(output)
            if len(event_uuid):
                ret = 0
                status = self._wait_event(self.dpl.get_vdev_status,
                                          self._conver_uuid2hex(volume['id']),
                                          event_uuid)
                if status['state'] == 'error':
                    ret = errno.EFAULT
                    msg = _('Flexvisor failed to assign volume %(id)s: '
                            '%(status)s.') % {
                                'id': volume['id'],
                                'status': status
                            }
                    raise exception.VolumeBackendAPIException(data=msg)
            else:
                ret = errno.EFAULT
                msg = _('Flexvisor failed to assign volume %(id)s due to '
                        'unable to query status by event '
                        'id.') % {
                            'id': volume['id']
                        }
                raise exception.VolumeBackendAPIException(data=msg)
        elif ret != 0:
            msg = _('Flexvisor assign volume failed.:%(id)s:'
                    '%(status)s.') % {
                        'id': volume['id'],
                        'status': ret
                    }
            raise exception.VolumeBackendAPIException(data=msg)

        if ret == 0:
            ret, output = self.dpl.get_vdev(self._conver_uuid2hex(
                volume['id']))
        if ret == 0:
            for tgInfo in output['exports']['Network/iSCSI']:
                if tgInfo['permissions'] and \
                        isinstance(tgInfo['permissions'][0], dict):
                    for assign in tgInfo['permissions']:
                        if connector['initiator'].lower() in assign.keys():
                            for tgportal in tgInfo.get('portals', {}):
                                properties['target_portal'] = tgportal
                                break
                            properties['target_lun'] = \
                                assign[connector['initiator'].lower()]
                            break

                    if properties['target_portal'] != '':
                        properties['target_iqn'] = tgInfo['target_identifier']
                        break
                else:
                    if connector['initiator'].lower() in tgInfo['permissions']:
                        for tgportal in tgInfo.get('portals', {}):
                            properties['target_portal'] = tgportal
                            break

                    if properties['target_portal'] != '':
                        properties['target_lun'] = \
                            tgInfo['logical_unit_number']
                        properties['target_iqn'] = \
                            tgInfo['target_identifier']
                        break

        if not (ret == 0 or properties['target_portal']):
            msg = _('Flexvisor failed to assign volume %(volume)s '
                    'iqn %(iqn)s.') % {
                        'volume': volume['id'],
                        'iqn': connector['initiator']
                    }
            raise exception.VolumeBackendAPIException(data=msg)

        return {'driver_volume_type': 'iscsi', 'data': properties}
예제 #19
0
    def _create_group(self, initiator_name, map_group_name):

        map_grp_info = {'cMapGrpName': map_group_name}
        ret = self._call_method('CreateMapGrp', map_grp_info)

        if ((ret['returncode'] == zte_pub.ZTE_SUCCESS)
                or (ret['returncode'] == zte_pub.ZTE_ERR_GROUP_EXIST)):
            host_name = self._translate_host_name(initiator_name)
            host_info = {
                'cHostAlias': host_name,
                'ucOs': 1,
                'ucType': 1,
                'cPortName': initiator_name,
                'sdwMultiPathMode': 1,
                'cMulChapPass': ''
            }

            # create host
            ret = self._call_method('CreateHost', host_info)
            if ret['returncode'] not in [
                    zte_pub.ZTE_SUCCESS, zte_pub.ZTE_ERR_HOSTNAME_EXIST,
                    zte_pub.ZTE_ERR_PORT_EXIST, zte_pub.ZTE_ERR_PORT_EXIST_OLD
            ]:
                err_msg = (_('create host failed. Host name:%(name)s '
                             'with Return code: %(ret)s.') % {
                                 'name': host_name,
                                 'ret': ret['returncode']
                             })
                raise exception.VolumeBackendAPIException(data=err_msg)

            # If port deleted by user, add it.
            port_info = {
                'cHostAlias': host_name,
                'ucType': 1,
                'cPortName': initiator_name,
                'sdwMultiPathMode': 1,
                'cMulChapPass': ''
            }
            ret = self._call_method('AddPortToHost', port_info)
            if ret['returncode'] not in [
                    zte_pub.ZTE_SUCCESS, zte_pub.ZTE_ERR_PORT_EXIST,
                    zte_pub.ZTE_ERR_PORT_EXIST_OLD
            ]:
                err_msg = (_('_create_group:add port failed. Port name: '
                             '%(name)s  with Return code: %(ret)s.') % {
                                 'name': initiator_name,
                                 'ret': ret['returncode']
                             })
                raise exception.VolumeBackendAPIException(data=err_msg)

            host_in_grp = {
                'ucInitName': host_name,
                'cMapGrpName': map_group_name
            }
            ret = self._call_method('AddHostToGrp', host_in_grp)
            if ret['returncode'] not in [
                    zte_pub.ZTE_SUCCESS, zte_pub.ZTE_ERR_HOST_EXIST,
                    zte_pub.ZTE_ERR_HOST_EXIST_OLD
            ]:
                self._delete_group(map_group_name)
                err_msg = (_('_create_group:add host to group failed. '
                             'group name:%(name)s init name :%(init)s '
                             'with Return code: %(ret)s.') % {
                                 'name': map_group_name,
                                 'init': host_name,
                                 'ret': ret['returncode']
                             })
                raise exception.VolumeBackendAPIException(data=err_msg)
        else:
            err_msg = (_('create group failed. Group name:%(name)s '
                         'with Return code: %(ret)s.') % {
                             'name': map_group_name,
                             'ret': ret['returncode']
                         })
            raise exception.VolumeBackendAPIException(data=err_msg)
예제 #20
0
파일: driver.py 프로젝트: wputra/MOS-centos
    def _attach_volume(self, context, volume, properties, remote=False):
        """Attach the volume."""
        if remote:
            # Call remote manager's initialize_connection which includes
            # driver's create_export and initialize_connection
            rpcapi = volume_rpcapi.VolumeAPI()
            conn = rpcapi.initialize_connection(context, volume, properties)
        else:
            # Call local driver's create_export and initialize_connection.
            # NOTE(avishay) This is copied from the manager's code - need to
            # clean this up in the future.
            model_update = None
            try:
                LOG.debug(_("Volume %s: creating export"), volume['id'])
                model_update = self.create_export(context, volume)
                if model_update:
                    volume = self.db.volume_update(context, volume['id'],
                                                   model_update)
            except exception.CinderException as ex:
                if model_update:
                    LOG.exception(_("Failed updating model of volume "
                                    "%(volume_id)s with driver provided model "
                                    "%(model)s") %
                                  {'volume_id': volume['id'],
                                   'model': model_update})
                    raise exception.ExportFailure(reason=ex)

            try:
                conn = self.initialize_connection(volume, properties)
            except Exception as err:
                try:
                    err_msg = (_('Unable to fetch connection information from '
                                 'backend: %(err)s') % {'err': err})
                    LOG.error(err_msg)
                    LOG.debug("Cleaning up failed connect initialization.")
                    self.remove_export(context, volume)
                except Exception as ex:
                    ex_msg = (_('Error encountered during cleanup '
                                'of a failed attach: %(ex)s') % {'ex': ex})
                    LOG.error(err_msg)
                    raise exception.VolumeBackendAPIException(data=ex_msg)
                raise exception.VolumeBackendAPIException(data=err_msg)

        # Use Brick's code to do attach/detach
        use_multipath = self.configuration.use_multipath_for_image_xfer
        device_scan_attempts = self.configuration.num_volume_device_scan_tries
        protocol = conn['driver_volume_type']
        connector = utils.brick_get_connector(protocol,
                                              use_multipath=use_multipath,
                                              device_scan_attempts=
                                              device_scan_attempts,
                                              conn=conn)
        device = connector.connect_volume(conn['data'])
        host_device = device['path']

        if not connector.check_valid_device(host_device):
            raise exception.DeviceUnavailable(path=host_device,
                                              reason=(_("Unable to access "
                                                        "the backend storage "
                                                        "via the path "
                                                        "%(path)s.") %
                                                      {'path': host_device}))
        return {'conn': conn, 'device': device, 'connector': connector}
예제 #21
0
    def create_element_replica(self,
                               conn,
                               repServiceInstanceName,
                               cloneName,
                               syncType,
                               sourceInstance,
                               extraSpecs,
                               targetInstance=None):
        """Make SMI-S call to create replica for source element.

        :param conn: the connection to the ecom server
        :param repServiceInstanceName: replication service
        :param cloneName: clone volume name
        :param syncType: 7=snapshot, 8=clone
        :param sourceInstance: source volume instance
        :param extraSpecs: additional info
        :param targetInstance: target volume instance. Defaults to None
        :returns: int -- rc - return code
        :returns: job - job object of the replica creation operation
        :raises: VolumeBackendAPIException
        """
        startTime = time.time()

        if targetInstance is None:
            LOG.debug(
                "Create targetless replica: %(clone)s "
                "syncType: %(syncType)s  Source: %(source)s.", {
                    'clone': cloneName,
                    'syncType': syncType,
                    'source': sourceInstance.path
                })
            rc, job = conn.InvokeMethod('CreateElementReplica',
                                        repServiceInstanceName,
                                        ElementName=cloneName,
                                        SyncType=syncType,
                                        SourceElement=sourceInstance.path)
        else:
            LOG.debug(
                "Create replica: %(clone)s syncType: %(syncType)s "
                "Source: %(source)s target: %(target)s.", {
                    'clone': cloneName,
                    'syncType': syncType,
                    'source': sourceInstance.path,
                    'target': targetInstance.path
                })
            rc, job = conn.InvokeMethod('CreateElementReplica',
                                        repServiceInstanceName,
                                        ElementName=cloneName,
                                        SyncType=syncType,
                                        SourceElement=sourceInstance.path,
                                        TargetElement=targetInstance.path)

        if rc != 0:
            rc, errordesc = self.utils.wait_for_job_complete(
                conn, job, extraSpecs)
            if rc != 0:
                exceptionMessage = (
                    _("Error Create Cloned Volume: %(cloneName)s "
                      "Return code: %(rc)lu. Error: %(error)s.") % {
                          'cloneName': cloneName,
                          'rc': rc,
                          'error': errordesc
                      })
                LOG.error(exceptionMessage)
                raise exception.VolumeBackendAPIException(
                    data=exceptionMessage)

        LOG.debug(
            "InvokeMethod CreateElementReplica "
            "took: %(delta)s H:MM:SS.",
            {'delta': self.utils.get_time_delta(startTime, time.time())})
        return rc, job
예제 #22
0
    def initialize_connection(self, volume, connector, initiator_data=None):
        """Connect the initiator to a volume"""
        host_uuid = connector['uuid']
        ks_volume = None
        targets = []
        volume_replicas = []
        volume_uuid = volume['id']
        volume_name = volume['name']

        try:
            result = self.kumoscale.host_probe(
                connector['nqn'], connector['uuid'],
                KumoScaleBaseVolumeDriver._convert_host_name(
                    connector['host']), 'Agent', 'cinder-driver-0.1', 30)
        except Exception as e:
            msg = (_("Host %(uuid)s host_probe exception: %(txt)s") % {
                'uuid': connector['uuid'],
                'txt': str(e)
            })
            raise exception.VolumeBackendAPIException(data=msg)

        if result.status != 'Success':
            msg = (_("host_probe for %(uuid)s failed with %(txt)s") % {
                'uuid': connector['uuid'],
                'txt': result.description
            })
            raise exception.VolumeBackendAPIException(data=msg)

        try:
            result = self.kumoscale.publish(host_uuid, volume_uuid)
        except Exception as e:
            msg = (_("Volume %(voluuid)s publish exception: %(txt)s") % {
                'voluuid': volume_uuid,
                'txt': str(e)
            })
            raise exception.VolumeBackendAPIException(data=msg)

        if result.status != "Success" and result.status != 'AlreadyPublished':
            raise exception.VolumeBackendAPIException(data=result.description)

        try:
            result = self.kumoscale.get_volumes_by_uuid(volume_uuid)
        except Exception as e:
            msg = (_("Volume %(voluuid)s fetch exception: %(txt)s") % {
                'voluuid': volume_uuid,
                'txt': str(e)
            })
            raise exception.VolumeBackendAPIException(data=msg)

        if result.status == "Success":
            if len(result.prov_entities) == 0:
                raise exception.VolumeBackendAPIException(
                    data=_("Volume %s not found") % volume_uuid)
            else:
                ks_volume = result.prov_entities[0]
        else:
            msg = (_("get_volumes_by_uuid for %(uuid)s failed with %(txt)s") %
                   {
                       'uuid': volume_uuid,
                       'txt': result.description
                   })
            raise exception.VolumeBackendAPIException(data=msg)

        try:
            result = self.kumoscale.get_targets(host_uuid, ks_volume.uuid)
        except Exception as e:
            msg = (_("Volume %(voluuid)s get targets exception: %(txt)s") % {
                'voluuid': volume_uuid,
                'txt': str(e)
            })
            raise exception.VolumeBackendAPIException(data=msg)

        if result.status == "Success":
            if len(result.prov_entities) == 0:
                raise exception.VolumeBackendAPIException(
                    data=_("Volume %s targets not found") % ks_volume.uuid)
            else:
                targets = result.prov_entities

        ks_volume_replicas = ks_volume.location
        for i in range(len(targets)):
            persistent_id = str(targets[i].backend.persistentID)

            try:
                result = self.kumoscale.get_backend_by_id(persistent_id)
            except Exception as e:
                msg = (_("Backend %(backpid)s exception: %(txt)s") % {
                    'backpid': persistent_id,
                    'txt': str(e)
                })
                raise exception.VolumeBackendAPIException(data=msg)

            if result.status == "Success":
                if len(result.prov_entities) == 0:
                    raise exception.VolumeBackendAPIException(
                        data=_("Backend %s not found") % persistent_id)
                else:
                    backend = result.prov_entities[0]
            else:
                msg = (_("get_backend_by_id for %(pid)s failed with %(txt)s") %
                       {
                           'pid': persistent_id,
                           'txt': result.description
                       })
                raise exception.VolumeBackendAPIException(data=msg)

            str_portals = []
            for p in range(len(backend.portals)):
                portal = backend.portals[p]
                portal_ip = str(portal.ip)
                portal_port = str(portal.port)
                portal_transport = str(portal.transport)
                str_portals.append((portal_ip, portal_port, portal_transport))

            for j in range(len(ks_volume_replicas)):
                ks_replica = ks_volume_replicas[j]
                if str(ks_replica.backend.persistentID) == persistent_id:
                    break

            replica = dict()
            replica['vol_uuid'] = ks_replica.uuid
            replica['target_nqn'] = str(targets[i].targetName)
            replica['portals'] = str_portals

            volume_replicas.append(replica)

        if len(volume_replicas) > 1:  # workaround for limitation
            volume_name = volume_name[:27]

        data = {
            'vol_uuid': volume_uuid,
            'alias': volume_name,
            'writable': ks_volume.writable,
            'volume_replicas': volume_replicas
        }

        if result.status != 'Success':
            raise exception.VolumeBackendAPIException(data=result.description)

        return {'driver_volume_type': 'nvmeof', 'data': data}
예제 #23
0
    def initialize_connection_fc(self, volume, connector):
        """Initializes the connection and returns connection info.

        Assigns the specified volume to a compute node/host so that it can be
        used from that host.

        The driver returns a driver_volume_type of 'fibre_channel'.
        The target_wwn can be a single entry or a list of wwns that
        correspond to the list of remote wwn(s) that will export the volume.
        Example return values:
            {
                'driver_volume_type': 'fibre_channel'
                'data': {
                    'target_discovered': True,
                    'target_lun': 1,
                    'target_wwn': '500a098280feeba5',
                    'access_mode': 'rw',
                    'initiator_target_map': {
                        '21000024ff406cc3': ['500a098280feeba5'],
                        '21000024ff406cc2': ['500a098280feeba5']
                    }
                }
            }

            or

             {
                'driver_volume_type': 'fibre_channel'
                'data': {
                    'target_discovered': True,
                    'target_lun': 1,
                    'target_wwn': ['500a098280feeba5', '500a098290feeba5',
                                   '500a098190feeba5', '500a098180feeba5'],
                    'access_mode': 'rw',
                    'initiator_target_map': {
                        '21000024ff406cc3': ['500a098280feeba5',
                                             '500a098290feeba5'],
                        '21000024ff406cc2': ['500a098190feeba5',
                                             '500a098180feeba5']
                    }
                }
            }
        """

        initiators = [
            fczm_utils.get_formatted_wwn(wwpn) for wwpn in connector['wwpns']
        ]

        eseries_vol = self._get_volume(volume['name_id'])
        mapping = self.map_volume_to_host(volume, eseries_vol, initiators)
        lun_id = mapping['lun']

        initiator_info = self._build_initiator_target_map_fc(connector)
        target_wwpns, initiator_target_map, num_paths = initiator_info

        if target_wwpns:
            msg = ("Successfully fetched target details for LUN %(id)s "
                   "and initiator(s) %(initiators)s.")
            msg_fmt = {'id': volume['id'], 'initiators': initiators}
            LOG.debug(msg, msg_fmt)
        else:
            msg = _('Failed to get LUN target details for the LUN %s.')
            raise exception.VolumeBackendAPIException(data=msg % volume['id'])

        target_info = {
            'driver_volume_type': 'fibre_channel',
            'data': {
                'target_discovered': True,
                'target_lun': int(lun_id),
                'target_wwn': target_wwpns,
                'access_mode': 'rw',
                'initiator_target_map': initiator_target_map
            }
        }

        return target_info
예제 #24
0
    def initialize_connection(self, volume, connector):
        """Driver entry point to attach a volume to an instance.

        Do the LUN masking on the storage system so the initiator can access
        the LUN on the target. Also return the iSCSI properties so the
        initiator can find the LUN. This implementation does not call
        _get_iscsi_properties() to get the properties because cannot store the
        LUN number in the database. We only find out what the LUN number will
        be during this method call so we construct the properties dictionary
        ourselves.
        """
        initiator_name = connector['initiator']
        name = volume['name']
        lun_id = self._map_lun(name, initiator_name, 'iscsi', None)
        msg = _("Mapped LUN %(name)s to the initiator %(initiator_name)s")
        msg_fmt = {'name': name, 'initiator_name': initiator_name}
        LOG.debug(msg % msg_fmt)
        iqn = self._get_iscsi_service_details()
        target_details_list = self._get_target_details()
        msg = _("Succesfully fetched target details for LUN %(name)s and "
                "initiator %(initiator_name)s")
        msg_fmt = {'name': name, 'initiator_name': initiator_name}
        LOG.debug(msg % msg_fmt)

        if not target_details_list:
            msg = _('Failed to get LUN target details for the LUN %s')
            raise exception.VolumeBackendAPIException(data=msg % name)
        target_details = None
        for tgt_detail in target_details_list:
            if tgt_detail.get('interface-enabled', 'true') == 'true':
                target_details = tgt_detail
                break
        if not target_details:
            target_details = target_details_list[0]

        if not target_details['address'] and target_details['port']:
            msg = _('Failed to get target portal for the LUN %s')
            raise exception.VolumeBackendAPIException(data=msg % name)
        if not iqn:
            msg = _('Failed to get target IQN for the LUN %s')
            raise exception.VolumeBackendAPIException(data=msg % name)

        properties = {}
        properties['target_discovered'] = False
        (address, port) = (target_details['address'], target_details['port'])
        properties['target_portal'] = '%s:%s' % (address, port)
        properties['target_iqn'] = iqn
        properties['target_lun'] = lun_id
        properties['volume_id'] = volume['id']

        auth = volume['provider_auth']
        if auth:
            (auth_method, auth_username, auth_secret) = auth.split()
            properties['auth_method'] = auth_method
            properties['auth_username'] = auth_username
            properties['auth_password'] = auth_secret

        return {
            'driver_volume_type': 'iscsi',
            'data': properties,
        }
예제 #25
0
파일: cloudbyte.py 프로젝트: suman-d/cinder
    def create_volume_from_snapshot(self, cloned_volume, snapshot):
        """Create a clone from an existing snapshot."""

        # Getting necessary data from input params
        parent_volume_id = snapshot['volume_id']
        cloned_volume_name = cloned_volume['id'].replace("-", "")

        # CloudByte volume id equals OpenStack volume's provider_id
        cb_volume_id = snapshot.get('volume').get('provider_id')

        # CloudByte snapshot path equals OpenStack snapshot's provider_id
        cb_snapshot_path = snapshot['provider_id']

        params = {
            "id": cb_volume_id,
            "clonename": cloned_volume_name,
            "path": cb_snapshot_path
        }

        LOG.debug(
            "Will create CloudByte clone [%(cb_clone)s] "
            "at CloudByte snapshot path [%(cb_snap)s] "
            "w.r.t parent OpenStack volume [%(stack_vol)s].", {
                'cb_clone': cloned_volume_name,
                'cb_snap': cb_snapshot_path,
                'stack_vol': parent_volume_id
            })

        # Create clone of the snapshot
        clone_dataset_snapshot_res = (self._api_request_for_cloudbyte(
            'cloneDatasetSnapshot', params))

        cb_snap = clone_dataset_snapshot_res.get('cloneDatasetSnapshot')

        cb_vol = {}
        if cb_snap is not None:
            cb_vol = cb_snap.get('filesystem')
        else:
            msg = ("Error: Clone creation failed for "
                   "OpenStack volume [%(vol)s] with CloudByte "
                   "snapshot path [%(path)s]" % {
                       'vol': parent_volume_id,
                       'path': cb_snapshot_path
                   })
            raise exception.VolumeBackendAPIException(data=msg)

        LOG.info(
            _LI("Created a clone [%(cb_clone)s] "
                "at CloudByte snapshot path [%(cb_snap)s] "
                "w.r.t parent OpenStack volume [%(stack_vol)s]."), {
                    'cb_clone': cloned_volume_name,
                    'cb_snap': cb_snapshot_path,
                    'stack_vol': parent_volume_id
                })

        chap_info = {}

        if self.cb_use_chap is True:
            account_name = self.configuration.cb_account_name

            # Get account id of this account
            account_id = self._get_account_id_from_name(account_name)

            chap_info = self._get_chap_info(account_id)

        model_update = self._build_provider_details_from_volume(
            cb_vol, chap_info)

        return model_update
예제 #26
0
 def _add_lun_to_table(self, lun):
     """Adds LUN to cache table."""
     if not isinstance(lun, NetAppLun):
         msg = _("Object is not a NetApp LUN.")
         raise exception.VolumeBackendAPIException(data=msg)
     self.lun_table[lun.name] = lun
예제 #27
0
파일: dpl_fc.py 프로젝트: openstack/cinder
    def initialize_connection(self, volume, connector):
        """Allow connection to connector and return connection info."""
        """
            connector = {'ip': CONF.my_ip,
                         'host': CONF.host,
                         'initiator': self._initiator,
                         'wwnns': self._fc_wwnns,
                         'wwpns': self._fc_wwpns}

        """
        dc_fc = {}
        dc_target = {}
        lsTargetWwpn = []
        output = None
        properties = {}
        preferTargets = {}
        ret = 0
        targetIdentifier = []
        szwwpns = []
        LOG.info(
            'initialize_connection volume: %(volume)s, connector:'
            ' %(connector)s', {
                "volume": volume,
                "connector": connector
            })
        # Get Storage Fiber channel controller
        dc_fc = self._get_fc_channel()

        # Get existed FC target list to decide target wwpn
        dc_target = self._get_targets()
        if len(dc_target) == 0:
            msg = _('Backend storage did not configure fiber channel '
                    'target.')
            raise exception.VolumeBackendAPIException(data=msg)

        for keyFc in dc_fc:
            for targetuuid in dc_target:
                if dc_fc[keyFc]['hardware_address'] == \
                        dc_target[targetuuid]['targetAddr']:
                    preferTargets[targetuuid] = dc_target[targetuuid]
                    break
        # Confirm client wwpn is existed in sns table
        # Covert wwwpns to 'xx:xx:xx:xx:xx:xx:xx:xx' format
        for dwwpn in connector['wwpns']:
            szwwpn = self._convertHex2String(dwwpn)
            if len(szwwpn) == 0:
                msg = _('Invalid wwpns format %(wwpns)s') % \
                    {'wwpns': connector['wwpns']}
                raise exception.VolumeBackendAPIException(data=msg)
            szwwpns.append(szwwpn)

        if len(szwwpns):
            for targetUuid in preferTargets:
                targetWwpn = ''
                targetWwpn = preferTargets.get(targetUuid,
                                               {}).get('targetAddr', '')
                lsTargetWwpn.append(targetWwpn)
        # Use wwpns to assign volume.
        LOG.info('Prefer use target wwpn %(wwpn)s', {'wwpn': lsTargetWwpn})
        # Start to create export in all FC target node.
        assignedTarget = []
        for pTarget in lsTargetWwpn:
            try:
                ret = self._export_fc(volume['id'], str(pTarget), szwwpns,
                                      volume['name'])
                if ret:
                    break
                else:
                    assignedTarget.append(pTarget)
            except Exception as e:
                LOG.error('Failed to export fiber channel target '
                          'due to %s', e)
                ret = errno.EFAULT
                break
        if ret == 0:
            ret, output = self.dpl.get_vdev(self._conver_uuid2hex(
                volume['id']))
        nLun = -1
        if ret == 0:
            try:
                for p in output['exports']['Network/FC']:
                    # check initiator wwpn existed in target initiator list
                    for initI in p.get('permissions', []):
                        for szwpn in szwwpns:
                            if initI.get(szwpn, None):
                                nLun = initI[szwpn]
                                break
                        if nLun != -1:
                            break

                    if nLun != -1:
                        targetIdentifier.append(
                            str(p['target_identifier']).replace(':', ''))

            except Exception:
                msg = _('Invalid connection initialization response of '
                        'volume %(name)s: '
                        '%(output)s') % {
                            'name': volume['name'],
                            'output': output
                        }
                raise exception.VolumeBackendAPIException(data=msg)

        if nLun != -1:
            init_targ_map = self._build_initiator_target_map(
                connector, targetIdentifier)
            properties['target_discovered'] = True
            properties['target_wwn'] = targetIdentifier
            properties['target_lun'] = int(nLun)
            properties['volume_id'] = volume['id']
            properties['initiator_target_map'] = init_targ_map
            LOG.info(
                '%(volume)s assign type fibre_channel, properties '
                '%(properties)s', {
                    'volume': volume['id'],
                    'properties': properties
                })
        else:
            msg = _('Invalid connection initialization response of '
                    'volume %(name)s') % {
                        'name': volume['name']
                    }
            raise exception.VolumeBackendAPIException(data=msg)
        LOG.info(
            'Connect initialization info: '
            '{driver_volume_type: fibre_channel, '
            'data: %(properties)s', {'properties': properties})
        conn_info = {'driver_volume_type': 'fibre_channel', 'data': properties}
        fczm_utils.add_fc_zone(conn_info)
        return conn_info
예제 #28
0
    def initialize_ultrapath_connection(self, volume, connector):
        """Map a volume to a host and return target iSCSI information."""
        def get_targets_ips_info(initiator):
            iscsi_conf = self._get_iscsi_conf(self.configuration)
            target_ip = []

            if iscsi_conf['DefaultTargetIP']:
                for ip in iscsi_conf['DefaultTargetIP'].split(','):
                    target_ip.append(ip)

            if not target_ip:
                msg = (_('get_targets_ips_info: Failed to get target IP '
                         'for initiator %(ini)s, please check config file.')
                       % {'ini': initiator})
                LOG.error(msg)
                raise exception.InvalidInput(reason=msg)

            return self.sshclient.get_tgt_iqn_ultrapath(
                map(lambda x: x.strip(), target_ip))

        msg = (_('initialize_multipath_connection: volume name: %(vol)s, '
                 'host: %(host)s, initiator: %(ini)s')
               % {'vol': volume['name'],
                  'host': connector['host'],
                  'ini': connector['initiator']})
        LOG.debug(msg)
        self.sshclient.update_login_info()
        ips_info = get_targets_ips_info(connector['initiator'])

        # First, add a host if not added before.
        host_id = self.sshclient.add_host(connector['host'], connector['ip'],
                                          connector['initiator'])

        iscsi_conf = self._get_iscsi_conf(self.configuration)
        chapinfo = self.sshclient.find_chap_info(iscsi_conf,
                                                 connector['initiator'])
        used = self.sshclient.is_initiator_used_chap(connector['initiator'])
        if not chapinfo and used:
            msg = (_("Chap is not configed but initiator %s used chap on "
                     "array, please cheak and remove chap for this initiator.")
                   % connector['initiator'])
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        # Then, add the iSCSI port to the host.
        self.sshclient.add_iscsi_port_to_host(host_id, connector, chapinfo)

        # Finally, map the volume to the host.
        lun_id = self.sshclient.check_volume_exist_on_array(volume)
        if not lun_id:
            msg = _("Volume %s not exists on the array.") % volume['id']
            raise exception.VolumeBackendAPIException(data=msg)

        hostlun_id = self.sshclient.map_volume(host_id, lun_id)

        # Change LUN ctr for better performance, just for single path.
        lun_details = self.sshclient.get_lun_details(lun_id)

        target_portal_list = []
        target_iqn_list = []
        for info in ips_info:
            target_portal_list.append('%s:%s' % (info[1], '3260'))
            target_iqn_list.append(info[0])
        properties = {}
        properties['target_discovered'] = False
        properties['target_portal'] = target_portal_list
        properties['target_iqn'] = target_iqn_list
        properties['target_lun'] = int(hostlun_id)
        properties['volume_id'] = volume['id']
        properties['lun_wwn'] = lun_details['LUNWWN']
        properties['target_num'] = len(ips_info)
        properties['description'] = 'huawei'

        if chapinfo:
            properties['auth_method'] = 'CHAP'
            properties['auth_username'] = chapinfo[0]
            properties['auth_password'] = chapinfo[1]

        return {'driver_volume_type': 'iscsi', 'data': properties}
예제 #29
0
 def _backendException(self, e):
     return exception.VolumeBackendAPIException(data=six.text_type(e))
예제 #30
0
    def create_replica(self, local_lun_info, replica_model):
        """Create remote LUN and replication pair.

        Purpose:
            1. create remote lun
            2. create replication pair
            3. enable replication pair
        """
        LOG.debug(('Create replication, local lun info: %(info)s, '
                   'replication model: %(model)s.'), {
                       'info': local_lun_info,
                       'model': replica_model
                   })

        local_lun_id = local_lun_info['ID']
        self.wait_volume_online(self.local_client, local_lun_info)

        # step1, create remote lun
        rmt_lun_info = self.create_rmt_lun(local_lun_info)
        rmt_lun_id = rmt_lun_info['ID']

        # step2, get remote device info
        rmt_dev_id, rmt_dev_name, rmt_dev_sn = self.get_rmt_dev_info()
        if not rmt_lun_id or not rmt_dev_name:
            self._delete_rmt_lun(rmt_lun_id)
            msg = _('Get remote device info failed.')
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        # step3, create replication pair
        try:
            pair_info = self.local_op.create(local_lun_id, rmt_lun_id,
                                             rmt_dev_id, rmt_dev_name,
                                             replica_model)
            pair_id = pair_info['ID']
        except Exception as err:
            with excutils.save_and_reraise_exception():
                LOG.error('Create pair failed. Error: %s.', err)
                self._delete_rmt_lun(rmt_lun_id)

        # step4, start sync manually. If replication type is sync,
        # then wait for sync complete.
        wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL)
        try:
            self.local_driver.sync(pair_id, wait_complete)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                LOG.error('Start synchronization failed. Error: %s.', err)
                self._delete_pair(pair_id)
                self._delete_rmt_lun(rmt_lun_id)

        model_update = {}
        driver_data = {
            'pair_id': pair_id,
            'huawei_sn': rmt_dev_sn,
            'rmt_lun_id': rmt_lun_id,
            'rmt_lun_wwn': rmt_lun_info['WWN']
        }
        model_update['replication_driver_data'] = to_string(driver_data)
        model_update['replication_status'] = 'available'
        LOG.debug('Create replication, return info: %s.', model_update)
        return model_update