예제 #1
0
 def _run_qemu_img(self, command, *params):
     """Executes qemu-img command wrapper"""
     cmd = ['env', 'LC_ALL=C', 'LANG=C', 'qemu-img', command]
     for param in params:
         if param.startswith(self.QEMU_SHEEPDOG_PREFIX):
             # replace 'sheepdog:vdiname[:snapshotname]' to
             #         'sheepdog:addr:port:vdiname[:snapshotname]'
             param = param.replace(self.QEMU_SHEEPDOG_PREFIX,
                                   '%(prefix)s%(addr)s:%(port)s:' %
                                   {'prefix': self.QEMU_SHEEPDOG_PREFIX,
                                    'addr': self.addr, 'port': self.port},
                                   1)
         cmd.append(param)
     try:
         return utils.execute(*cmd)
     except OSError as e:
         with excutils.save_and_reraise_exception():
             if e.errno == errno.ENOENT:
                 msg = _LE('Qemu-img is not installed. '
                           'OSError: command is %(cmd)s.')
             else:
                 msg = _LE('OSError: command is %(cmd)s.')
             LOG.error(msg, {'cmd': tuple(cmd)})
     except processutils.ProcessExecutionError as e:
         raise exception.SheepdogCmdError(
             cmd=e.cmd,
             exit_code=e.exit_code,
             stdout=e.stdout.replace('\n', '\\n'),
             stderr=e.stderr.replace('\n', '\\n'))
예제 #2
0
    def _copy_image_to_volume(self, context, volume_ref, image_id, image_location, image_service):
        """Downloads Glance image to the specified volume."""
        copy_image_to_volume = self.driver.copy_image_to_volume
        volume_id = volume_ref["id"]
        LOG.debug(
            "Attempting download of %(image_id)s (%(image_location)s)" " to volume %(volume_id)s.",
            {"image_id": image_id, "volume_id": volume_id, "image_location": image_location},
        )
        try:
            copy_image_to_volume(context, volume_ref, image_service, image_id)
        except processutils.ProcessExecutionError as ex:
            LOG.exception(
                _LE("Failed to copy image %(image_id)s to volume: " "%(volume_id)s"),
                {"volume_id": volume_id, "image_id": image_id},
            )
            raise exception.ImageCopyFailure(reason=ex.stderr)
        except exception.ImageUnacceptable as ex:
            LOG.exception(_LE("Failed to copy image to volume: %(volume_id)s"), {"volume_id": volume_id})
            raise exception.ImageUnacceptable(ex)
        except Exception as ex:
            LOG.exception(
                _LE("Failed to copy image %(image_id)s to " "volume: %(volume_id)s"),
                {"volume_id": volume_id, "image_id": image_id},
            )
            if not isinstance(ex, exception.ImageCopyFailure):
                raise exception.ImageCopyFailure(reason=ex)
            else:
                raise

        LOG.debug(
            "Downloaded image %(image_id)s (%(image_location)s)" " to volume %(volume_id)s successfully.",
            {"image_id": image_id, "volume_id": volume_id, "image_location": image_location},
        )
예제 #3
0
    def request(self, src_file="", dst_file="", method="", maxretries=10):
        retry = 0
        src_url = self.https_path + "/" + src_file
        dst_url = self.https_path + "/" + dst_file
        request = urllib2.Request(src_url)

        if dst_file != "":
            request.add_header('Destination', dst_url)

        request.add_header("Authorization", "Basic %s" % self.auth_str)

        request.get_method = lambda: method

        LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s',
                  {'method': method, 'src': src_url, 'des': dst_url})

        while retry < maxretries:
            try:
                response = urllib2.urlopen(request, timeout=None)
            except urllib2.HTTPError as err:
                LOG.error(_LE('WebDAV returned with %(code)s error during '
                              '%(method)s call.'),
                          {'code': err.code, 'method': method})

                if err.code == httplib.INTERNAL_SERVER_ERROR:
                    LOG.error(_LE('WebDAV operation failed with error code: '
                                  '%(code)s reason: %(reason)s Retry attempt '
                                  '%(retry)s in progress.'),
                              {'code': err.code,
                               'reason': err.reason,
                               'retry': retry})
                    if retry < maxretries:
                        retry += 1
                        time.sleep(1)
                        continue

                msg = self._lookup_error(err.code)
                raise exception.WebDAVClientError(msg=msg, code=err.code,
                                                  src=src_file, dst=dst_file,
                                                  method=method)

            except httplib.BadStatusLine as err:
                msg = self._lookup_error('BadStatusLine')
                raise exception.WebDAVClientError(msg=msg,
                                                  code='httplib.BadStatusLine',
                                                  src=src_file, dst=dst_file,
                                                  method=method)

            except urllib2.URLError as err:
                reason = ''
                if getattr(err, 'reason'):
                    reason = err.reason

                msg = self._lookup_error('Bad_Gateway')
                raise exception.WebDAVClientError(msg=msg,
                                                  code=reason, src=src_file,
                                                  dst=dst_file, method=method)

            break
        return response
예제 #4
0
    def check_cluster_status(self):
        try:
            (_stdout, _stderr) = self._run_dog('cluster', 'info')
        except exception.SheepdogCmdError as e:
            cmd = e.kwargs['cmd']
            _stderr = e.kwargs['stderr']
            with excutils.save_and_reraise_exception():
                if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
                    msg = _LE('Failed to connect to sheep daemon. '
                              'addr: %(addr)s, port: %(port)s')
                    LOG.error(msg, {'addr': self.addr, 'port': self.port})
                else:
                    LOG.error(_LE('Failed to check cluster status.'
                                  '(command: %s)'), cmd)

        if _stdout.startswith(self.DOG_RESP_CLUSTER_RUNNING):
            LOG.debug('Sheepdog cluster is running.')
            return

        reason = _('Invalid sheepdog cluster status.')
        if _stdout.startswith(self.DOG_RESP_CLUSTER_NOT_FORMATTED):
            reason = _('Cluster is not formatted. '
                       'You should probably perform "dog cluster format".')
        elif _stdout.startswith(self.DOG_RESP_CLUSTER_WAITING):
            reason = _('Waiting for all nodes to join cluster. '
                       'Ensure all sheep daemons are running.')
        raise exception.SheepdogError(reason=reason)
예제 #5
0
    def revert(self, context, result, flow_failures, volume_ref, **kwargs):
        # NOTE(dulek): Revert is occurring and manager need to know if
        # rescheduling happened. We're returning boolean flag that will
        # indicate that. It which will be available in flow engine store
        # through get_revert_result method.

        # If do not want to be rescheduled, just set the volume's status to
        # error and return.
        if not self.do_reschedule:
            common.error_out_volume(context, self.db, volume_ref.id)
            LOG.error(_LE("Volume %s: create failed"), volume_ref.id)
            return False

        # Check if we have a cause which can tell us not to reschedule and
        # set the volume's status to error.
        for failure in flow_failures.values():
            if failure.check(*self.no_reschedule_types):
                common.error_out_volume(context, self.db, volume_ref.id)
                LOG.error(_LE("Volume %s: create failed"), volume_ref.id)
                return False

        # Use a different context when rescheduling.
        if self.reschedule_context:
            cause = list(flow_failures.values())[0]
            context = self.reschedule_context
            try:
                self._pre_reschedule(context, volume_ref)
                self._reschedule(context, cause, volume=volume_ref, **kwargs)
                self._post_reschedule(volume_ref)
                return True
            except exception.CinderException:
                LOG.exception(_LE("Volume %s: rescheduling failed"), volume_ref.id)

        return False
예제 #6
0
    def _delete_lun(self, volume):
        """Deletes a lun.

        :param volume:  volume object provided by the Manager
        """
        success_msgs = ['Delete resource successfully', '']

        LOG.debug("Deleting lun %s.", volume['id'])

        try:
            # If the LUN has ever had a snapshot, it has an SRA and
            # policy that must be deleted first.
            self._delete_lun_snapshot_bookkeeping(volume['id'])

            # TODO(rdl) force the delete for now to deal with pending
            # snapshot issues.  Should revisit later for a better fix.
            self._send_cmd(self.vmem_mg.lun.delete_lun,
                           success_msgs, volume['id'], True)

        except exception.VolumeBackendAPIException:
            LOG.exception(_LE("Lun %s has dependent snapshots, "
                              "skipping lun deletion."), volume['id'])
            raise exception.VolumeIsBusy(volume_name=volume['id'])

        except Exception:
            LOG.exception(_LE("Lun delete for %s failed!"), volume['id'])
            raise
예제 #7
0
 def _post_sub_clone_resize(self, path):
     """Try post sub clone resize in a transactional manner."""
     st_tm_mv, st_nw_mv, st_del_old = None, None, None
     seg = path.split("/")
     LOG.info(_LI("Post clone resize LUN %s"), seg[-1])
     new_lun = 'new-%s' % (seg[-1])
     tmp_lun = 'tmp-%s' % (seg[-1])
     tmp_path = "/vol/%s/%s" % (seg[2], tmp_lun)
     new_path = "/vol/%s/%s" % (seg[2], new_lun)
     try:
         st_tm_mv = self.zapi_client.move_lun(path, tmp_path)
         st_nw_mv = self.zapi_client.move_lun(new_path, path)
         st_del_old = self.zapi_client.destroy_lun(tmp_path)
     except Exception as e:
         if st_tm_mv is None:
             msg = _("Failure staging LUN %s to tmp.")
             raise exception.VolumeBackendAPIException(data=msg % (seg[-1]))
         else:
             if st_nw_mv is None:
                 self.zapi_client.move_lun(tmp_path, path)
                 msg = _("Failure moving new cloned LUN to %s.")
                 raise exception.VolumeBackendAPIException(
                     data=msg % (seg[-1]))
             elif st_del_old is None:
                 LOG.error(_LE("Failure deleting staged tmp LUN %s."),
                           tmp_lun)
             else:
                 LOG.error(_LE("Unknown exception in"
                               " post clone resize LUN %s."), seg[-1])
                 LOG.error(_LE("Exception details: %s"), e)
예제 #8
0
파일: sheepdog.py 프로젝트: Third9/cinder
 def clone(self, src_vdiname, src_snapname, dst_vdiname, size):
     try:
         self._run_qemu_img('create', '-b',
                            'sheepdog:%(src_vdiname)s:%(src_snapname)s' %
                            {'src_vdiname': src_vdiname,
                             'src_snapname': src_snapname},
                            'sheepdog:%s' % dst_vdiname, '%sG' % size)
     except exception.SheepdogCmdError as e:
         cmd = e.kwargs['cmd']
         _stderr = e.kwargs['stderr']
         with excutils.save_and_reraise_exception():
             if self.QEMU_IMG_RESP_ALREADY_EXISTS in _stderr:
                 LOG.error(_LE('Clone volume "%s" already exists. '
                           'Please check the results of "dog vdi list".'),
                           dst_vdiname)
             elif self.QEMU_IMG_RESP_VDI_NOT_FOUND in _stderr:
                 LOG.error(_LE('Src Volume "%s" not found. '
                           'Please check the results of "dog vdi list".'),
                           src_vdiname)
             elif self.QEMU_IMG_RESP_SNAPSHOT_NOT_FOUND in _stderr:
                 LOG.error(_LE('Snapshot "%s" not found. '
                           'Please check the results of "dog vdi list".'),
                           src_snapname)
             elif self.QEMU_IMG_RESP_SIZE_TOO_LARGE in _stderr:
                 LOG.error(_LE('Volume size "%sG" is too large.'), size)
             else:
                 LOG.error(_LE('Failed to clone volume.(command: %s)'), cmd)
예제 #9
0
파일: sheepdog.py 프로젝트: Third9/cinder
 def resize(self, vdiname, size):
     size = int(size) * units.Gi
     try:
         (_stdout, _stderr) = self._run_dog('vdi', 'resize', vdiname, size)
     except exception.SheepdogCmdError as e:
         _stderr = e.kwargs['stderr']
         with excutils.save_and_reraise_exception():
             if _stderr.rstrip('\\n').endswith(
                     self.DOG_RESP_VDI_NOT_FOUND):
                 LOG.error(_LE('Failed to resize vdi. vdi not found. %s'),
                           vdiname)
             elif _stderr.startswith(self.DOG_RESP_VDI_SHRINK_NOT_SUPPORT):
                 LOG.error(_LE('Failed to resize vdi. '
                               'Shrinking vdi not supported. '
                               'vdi: %(vdiname)s new size: %(size)s'),
                           {'vdiname': vdiname, 'size': size})
             elif _stderr.startswith(self.DOG_RESP_VDI_SIZE_TOO_LARGE):
                 LOG.error(_LE('Failed to resize vdi. '
                               'Too large volume size. '
                               'vdi: %(vdiname)s new size: %(size)s'),
                           {'vdiname': vdiname, 'size': size})
             else:
                 LOG.error(_LE('Failed to resize vdi. '
                               'vdi: %(vdiname)s new size: %(size)s'),
                           {'vdiname': vdiname, 'size': size})
예제 #10
0
파일: lvm.py 프로젝트: C2python/cinder
    def create_lv_snapshot(self, name, source_lv_name, lv_type='default'):
        """Creates a snapshot of a logical volume.

        :param name: Name to assign to new snapshot
        :param source_lv_name: Name of Logical Volume to snapshot
        :param lv_type: Type of LV (default or thin)

        """
        source_lvref = self.get_volume(source_lv_name)
        if source_lvref is None:
            LOG.error(_LE("Trying to create snapshot by non-existent LV: %s"),
                      source_lv_name)
            raise exception.VolumeDeviceNotFound(device=source_lv_name)
        cmd = LVM.LVM_CMD_PREFIX + ['lvcreate', '--name', name, '--snapshot',
                                    '%s/%s' % (self.vg_name, source_lv_name)]
        if lv_type != 'thin':
            size = source_lvref['size']
            cmd.extend(['-L', '%sg' % (size)])

        try:
            self._execute(*cmd,
                          root_helper=self._root_helper,
                          run_as_root=True)
        except putils.ProcessExecutionError as err:
            LOG.exception(_LE('Error creating snapshot'))
            LOG.error(_LE('Cmd     :%s'), err.cmd)
            LOG.error(_LE('StdOut  :%s'), err.stdout)
            LOG.error(_LE('StdErr  :%s'), err.stderr)
            raise
예제 #11
0
 def handle_errors(self, response, key, object_type):
     if response.status_code == 400:
         error = response.json()
         err_msg = error.get('message')
         if err_msg.endswith(OBJ_NOT_FOUND_ERR):
             LOG.warning(_LW("object %(key)s of "
                             "type %(typ)s not found, %(err_msg)s"),
                         {'key': key, 'typ': object_type,
                          'err_msg': err_msg, })
             raise exception.NotFound()
         elif err_msg == VOL_NOT_UNIQUE_ERR:
             LOG.error(_LE("can't create 2 volumes with the same name, %s"),
                       err_msg)
             msg = (_('Volume by this name already exists'))
             raise exception.VolumeBackendAPIException(data=msg)
         elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
             LOG.error(_LE("Can't find volume to map %(key)s, %(msg)s"),
                       {'key': key, 'msg': err_msg, })
             raise exception.VolumeNotFound(volume_id=key)
         elif ALREADY_MAPPED_ERR in err_msg:
             raise exception.XtremIOAlreadyMappedError()
         elif err_msg == SYSTEM_BUSY:
             raise exception.XtremIOArrayBusy()
         elif err_msg in (TOO_MANY_OBJECTS, TOO_MANY_SNAPSHOTS_PER_VOL):
             raise exception.XtremIOSnapshotsLimitExceeded()
     msg = _('Bad response from XMS, %s') % response.text
     LOG.error(msg)
     raise exception.VolumeBackendAPIException(message=msg)
예제 #12
0
파일: manager.py 프로젝트: apporc/cinder
    def _cleanup_incomplete_backup_operations(self, ctxt):
        LOG.info(_LI("Cleaning up incomplete backup operations."))
        volumes = self.db.volume_get_all_by_host(ctxt, self.host)

        for volume in volumes:
            try:
                self._cleanup_one_volume(ctxt, volume)
            except Exception:
                LOG.exception(_LE("Problem cleaning up volume %(vol)s."),
                              {'vol': volume['id']})

        # TODO(smulcahy) implement full resume of backup and restore
        # operations on restart (rather than simply resetting)
        backups = objects.BackupList.get_all_by_host(ctxt, self.host)
        for backup in backups:
            try:
                self._cleanup_one_backup(ctxt, backup)
            except Exception:
                LOG.exception(_LE("Problem cleaning up backup %(bkup)s."),
                              {'bkup': backup['id']})
            try:
                self._cleanup_temp_volumes_snapshots_for_one_backup(ctxt,
                                                                    backup)
            except Exception:
                LOG.exception(_LE("Problem cleaning temp volumes and "
                                  "snapshots for backup %(bkup)s."),
                              {'bkup': backup['id']})
예제 #13
0
    def get_nameserver_info(self, ssh_pool):
        """Get name server data from fabric.

        This method will return the connected node port wwn list(local
        and remote) for the given switch fabric

        :param ssh_pool: SSH connections for the current fabric
        """
        cli_output = None
        nsinfo_list = []
        try:
            cli_output = self._get_switch_data(ssh_pool,
                                               zone_constant.NS_SHOW)
        except exception.FCSanLookupServiceException:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed collecting nsshow info for fabric"))
        if cli_output:
            nsinfo_list = self._parse_ns_output(cli_output)
        try:
            cli_output = self._get_switch_data(ssh_pool,
                                               zone_constant.NS_CAM_SHOW)

        except exception.FCSanLookupServiceException:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed collecting nscamshow"))
        if cli_output:
            nsinfo_list.extend(self._parse_ns_output(cli_output))
        LOG.debug("Connector returning nsinfo-%s", nsinfo_list)
        return nsinfo_list
예제 #14
0
파일: srb.py 프로젝트: kaitlin-farr/cinder
    def _detach_file(self, volume):
        name = self._get_volname(volume)
        devname = self._device_name(volume)
        vg = self._get_lvm_vg(volume)
        LOG.debug('Detaching device %s', devname)

        count = self._get_attached_count(volume)
        if count > 1:
            LOG.info(_LI('Reference count of %(volume)s is %(count)d, '
                         'not detaching.'),
                     {'volume': volume['name'], 'count': count})
            return

        message = (_('Could not detach volume %(vol)s from device %(dev)s.')
                   % {'vol': name, 'dev': devname})
        with handle_process_execution_error(
                message=message,
                info_message=_LI('Error detaching Volume'),
                reraise=exception.VolumeBackendAPIException(data=message)):
            try:
                if vg is not None:
                    self._do_deactivate(volume, vg)
            except putils.ProcessExecutionError:
                LOG.error(_LE('Could not deactivate volume group %s'),
                          self._get_volname(volume))
                raise

            try:
                self._do_detach(volume, vg=vg)
            except putils.ProcessExecutionError:
                LOG.error(_LE('Could not detach volume %(vol)s from device '
                              '%(dev)s.'), {'vol': name, 'dev': devname})
                raise

            self._decrement_attached_count(volume)
예제 #15
0
 def _send_request(self, object_type, key, request):
     try:
         response = urllib2.urlopen(request)
     except (urllib2.HTTPError, ) as exc:
         if exc.code == 400 and hasattr(exc, 'read'):
             error = json.load(exc)
             err_msg = error['message']
             if err_msg.endswith(OBJ_NOT_FOUND_ERR):
                 LOG.warning(_LW("object %(key)s of "
                                 "type %(typ)s not found"),
                             {'key': key, 'typ': object_type})
                 raise exception.NotFound()
             elif err_msg == VOL_NOT_UNIQUE_ERR:
                 LOG.error(_LE("can't create 2 volumes with the same name"))
                 msg = (_('Volume by this name already exists'))
                 raise exception.VolumeBackendAPIException(data=msg)
             elif err_msg == VOL_OBJ_NOT_FOUND_ERR:
                 LOG.error(_LE("Can't find volume to map %s"), key)
                 raise exception.VolumeNotFound(volume_id=key)
             elif ALREADY_MAPPED_ERR in err_msg:
                 raise exception.XtremIOAlreadyMappedError()
         LOG.error(_LE('Bad response from XMS, %s'), exc.read())
         msg = (_('Exception: %s') % six.text_type(exc))
         raise exception.VolumeDriverException(message=msg)
     if response.code >= 300:
         LOG.error(_LE('bad API response, %s'), response.msg)
         msg = (_('bad response from XMS got http code %(code)d, %(msg)s') %
                {'code': response.code, 'msg': response.msg})
         raise exception.VolumeBackendAPIException(data=msg)
     return response
 def update_migrated_volume(self, context, volume, new_volume,
                            original_volume_status):
     orig_id = volume['id']
     orig_name = self._attach.volumeName(orig_id)
     temp_id = new_volume['id']
     temp_name = self._attach.volumeName(temp_id)
     vols = {v.name: True for v in self._attach.api().volumesList()}
     if temp_name not in vols:
         LOG.error(_LE('StorPool update_migrated_volume(): it seems '
                       'that the StorPool volume "%(tid)s" was not '
                       'created as part of the migration from '
                       '"%(oid)s".'), {'tid': temp_id, 'oid': orig_id})
         return {'_name_id': new_volume['_name_id'] or new_volume['id']}
     elif orig_name in vols:
         LOG.error(_LE('StorPool update_migrated_volume(): both '
                       'the original volume "%(oid)s" and the migrated '
                       'StorPool volume "%(tid)s" seem to exist on '
                       'the StorPool cluster.'),
                   {'oid': orig_id, 'tid': temp_id})
         return {'_name_id': new_volume['_name_id'] or new_volume['id']}
     else:
         try:
             self._attach.api().volumeUpdate(temp_name,
                                             {'rename': orig_name})
             return {'_name_id': None}
         except spapi.ApiError as e:
             LOG.error(_LE('StorPool update_migrated_volume(): '
                           'could not rename %(tname)s to %(oname)s: '
                           '%(err)s'),
                       {'tname': temp_name, 'oname': orig_name, 'err': e})
             return {'_name_id': new_volume['_name_id'] or new_volume['id']}
예제 #17
0
파일: srb.py 프로젝트: kaitlin-farr/cinder
    def extend_thin_pool(self):
        """Extend the size of the thin provisioning pool.

        This method extends the size of a thin provisioning pool to 95% of the
        size of the VG, if the VG is configured as thin and owns a thin
        provisioning pool.

        :raises: putils.ProcessExecutionError
        """
        if self.vg_thin_pool is None:
            return

        new_size_str = self._calculate_thin_pool_size()
        try:
            cmd = lvm.LVM.LVM_CMD_PREFIX + ['lvextend', '-L', new_size_str,
                                            "%s/%s-pool" % (self.vg_name,
                                                            self.vg_name)]
            self._execute(*cmd,
                          root_helper=self._root_helper,
                          run_as_root=True)
        except putils.ProcessExecutionError as err:
            LOG.exception(_LE('Error extending thin provisioning pool'))
            LOG.error(_LE('Cmd     :%s'), err.cmd)
            LOG.error(_LE('StdOut  :%s'), err.stdout)
            LOG.error(_LE('StdErr  :%s'), err.stderr)
            raise
예제 #18
0
    def delete_volume(self, volume):
        """Delete SolidFire Volume from device.

        SolidFire allows multiple volumes with same name,
        volumeID is what's guaranteed unique.

        """
        sfaccount = self._get_sfaccount(volume['project_id'])
        if sfaccount is None:
            LOG.error(_LE("Account for Volume ID %s was not found on "
                          "the SolidFire Cluster while attempting "
                          "delete_volume operation!"), volume['id'])
            LOG.error(_LE("This usually means the volume was never "
                          "successfully created."))
            return

        params = {'accountID': sfaccount['accountID']}
        sf_vol = self._get_sf_volume(volume['id'], params)

        if sf_vol is not None:
            params = {'volumeID': sf_vol['volumeID']}
            data = self._issue_api_request('DeleteVolume', params)

            if 'result' not in data:
                msg = _("Failed to delete SolidFire Volume: %s") % data
                raise exception.SolidFireAPIException(msg)
        else:
            LOG.error(_LE("Volume ID %s was not found on "
                          "the SolidFire Cluster while attempting "
                          "delete_volume operation!"), volume['id'])
예제 #19
0
    def _delete_lun(self, volume):
        """Deletes a lun.

        :param volume:  volume object provided by the Manager
        """
        success_msgs = ['Delete resource successfully', '']

        LOG.debug("Deleting lun %s.", volume['id'])

        # If the LUN has ever had a snapshot, it has an SRA and
        # policy that must be deleted first.
        self._delete_lun_snapshot_bookkeeping(volume['id'])

        try:
            self._send_cmd(self.vmem_mg.lun.delete_lun,
                           success_msgs, volume['id'])

        except vmemclient.core.error.NoMatchingObjectIdError:
            LOG.debug("Lun %s already deleted, continuing.", volume['id'])

        except exception.ViolinBackendErrExists:
            LOG.exception(_LE("Lun %s has dependent snapshots, "
                              "skipping lun deletion."), volume['id'])
            raise exception.VolumeIsBusy(volume_name=volume['id'])

        except Exception:
            LOG.exception(_LE("Lun delete for %s failed!"), volume['id'])
            raise
예제 #20
0
    def create_cgsnapshot(self, context, cgsnapshot, snapshots):
        """Takes a snapshot of the consistency group.

        :param context: the context of the caller.
        :param cgsnapshot: Information about the snapshot to take.
        :return: Updated model_update, snapshots.
        :raises: VolumeBackendAPIException.
        """
        cgid = cgsnapshot['consistencygroup_id']
        snapshotid = cgsnapshot['id']

        with self._client.open_connection() as api:
            profile = api.find_replay_profile(cgid)
            if profile:
                LOG.debug('profile %s replayid %s', profile, snapshotid)
                if api.snap_cg_replay(profile, snapshotid, 0):
                    snapshots = objects.SnapshotList().get_all_for_cgsnapshot(
                        context, snapshotid)
                    for snapshot in snapshots:
                        snapshot.status = 'available'

                    model_update = {'status': 'available'}

                    return model_update, snapshots

                # That didn't go well.  Tell them why.  Then bomb out.
                LOG.error(_LE('Failed to snap Consistency Group %s'), cgid)
            else:
                LOG.error(_LE('Cannot find Consistency Group %s'), cgid)

        raise exception.VolumeBackendAPIException(
            _('Unable to snap Consistency Group %s') % cgid)
예제 #21
0
    def _get_sf_volume(self, uuid, params):
        data = self._issue_api_request('ListVolumesForAccount', params)
        if 'result' not in data:
            msg = _("Failed to get SolidFire Volume: %s") % data
            raise exception.SolidFireAPIException(msg)

        found_count = 0
        sf_volref = None
        for v in data['result']['volumes']:
            # NOTE(jdg): In the case of "name" we can't
            # update that on manage/import, so we use
            # the uuid attribute
            meta = v.get('attributes')
            alt_id = meta.get('uuid', 'empty')

            if uuid in v['name'] or uuid in alt_id:
                found_count += 1
                sf_volref = v
                LOG.debug("Mapped SolidFire volumeID %(volume_id)s "
                          "to cinder ID %(uuid)s.",
                          {'volume_id': v['volumeID'], 'uuid': uuid})

        if found_count == 0:
            # NOTE(jdg): Previously we would raise here, but there are cases
            # where this might be a cleanup for a failed delete.
            # Until we get better states we'll just log an error
            LOG.error(_LE("Volume %s, not found on SF Cluster."), uuid)

        if found_count > 1:
            LOG.error(_LE("Found %(count)s volumes mapped to id: %(uuid)s."),
                      {'count': found_count,
                       'uuid': uuid})
            raise exception.DuplicateSfVolumeNames(vol_name=uuid)

        return sf_volref
예제 #22
0
파일: ceph.py 프로젝트: carriercomm/cinder
    def _piped_execute(self, cmd1, cmd2):
        """Pipe output of cmd1 into cmd2."""
        LOG.debug("Piping cmd1='%s' into...", ' '.join(cmd1))
        LOG.debug("cmd2='%s'", ' '.join(cmd2))

        try:
            p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
        except OSError as e:
            LOG.error(_LE("Pipe1 failed - %s "), e)
            raise

        # NOTE(dosaboy): ensure that the pipe is blocking. This is to work
        # around the case where evenlet.green.subprocess is used which seems to
        # use a non-blocking pipe.
        flags = fcntl.fcntl(p1.stdout, fcntl.F_GETFL) & (~os.O_NONBLOCK)
        fcntl.fcntl(p1.stdout, fcntl.F_SETFL, flags)

        try:
            p2 = subprocess.Popen(cmd2, stdin=p1.stdout,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.PIPE)
        except OSError as e:
            LOG.error(_LE("Pipe2 failed - %s "), e)
            raise

        p1.stdout.close()
        stdout, stderr = p2.communicate()
        return p2.returncode, stderr
예제 #23
0
파일: api.py 프로젝트: apporc/cinder
    def _create_cg_from_cgsnapshot(self, context, group, cgsnapshot):
        try:
            snapshots = objects.SnapshotList.get_all_for_cgsnapshot(
                context, cgsnapshot.id)

            if not snapshots:
                msg = _("Cgsnahost is empty. No consistency group "
                        "will be created.")
                raise exception.InvalidConsistencyGroup(reason=msg)

            for snapshot in snapshots:
                kwargs = {}
                kwargs['availability_zone'] = group.availability_zone
                kwargs['cgsnapshot'] = cgsnapshot
                kwargs['consistencygroup'] = group
                kwargs['snapshot'] = snapshot
                volume_type_id = snapshot.volume_type_id
                if volume_type_id:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, volume_type_id)

                # Since cgsnapshot is passed in, the following call will
                # create a db entry for the volume, but will not call the
                # volume manager to create a real volume in the backend yet.
                # If error happens, taskflow will handle rollback of quota
                # and removal of volume entry in the db.
                try:
                    self.volume_api.create(context,
                                           snapshot.volume_size,
                                           None,
                                           None,
                                           **kwargs)
                except exception.CinderException:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE("Error occurred when creating volume "
                                      "entry from snapshot in the process of "
                                      "creating consistency group %(group)s "
                                      "from cgsnapshot %(cgsnap)s."),
                                  {'group': group.id,
                                   'cgsnap': cgsnapshot.id})
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(_LE("Error occurred when creating consistency "
                                  "group %(group)s from cgsnapshot "
                                  "%(cgsnap)s."),
                              {'group': group.id,
                               'cgsnap': cgsnapshot.id})

        volumes = self.db.volume_get_all_by_group(context,
                                                  group.id)
        for vol in volumes:
            # Update the host field for the volume.
            self.db.volume_update(context, vol['id'],
                                  {'host': group.get('host')})

        self.volume_rpcapi.create_consistencygroup_from_src(
            context, group, cgsnapshot)
예제 #24
0
    def create_volume_from_snapshot(self, volume, snapshot):
        try:
            ds_snapshot_uuid = (self._get_metadata_value
                                (snapshot, self.METADATA_DS_SNAPSHOT_UUID))

            out = self.exec_webapi('SYNO.Core.ISCSI.LUN',
                                   'clone_snapshot',
                                   1,
                                   src_lun_uuid=snapshot['volume']['name'],
                                   snapshot_uuid=ds_snapshot_uuid,
                                   cloned_lun_name=volume['name'],
                                   clone_type='CINDER')

            self.check_response(out)

        except exception.SnapshotMetadataNotFound:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to get snapshot UUID. [%s]'),
                              snapshot['id'])
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to create_volume_from_snapshot. '
                                  '[%s]'),
                              snapshot['id'])

        if not self._check_lun_status_normal(volume['name']):
            message = (_('Volume [%(vol)s] snapshot [%(snapshot)s] status '
                         'is not healthy.') %
                       {'vol': snapshot['volume']['name'],
                        'snapshot': ds_snapshot_uuid})
            raise exception.VolumeDriverException(message=message)

        if snapshot['volume_size'] < volume['size']:
            self.extend_volume(volume, volume['size'])
예제 #25
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Create volume from snapshot.

        - search for snapshot and retention_policy
        - create a view from snapshot and attach view
        - create a volume and attach volume
        - copy data from attached view to attached volume
        - detach volume and view and finally delete view
        """
        snap_name = self.get_snap_name(snapshot.id)
        view_name = self.get_view_name(volume.id)
        vol_name = self.get_volume_name(volume.id)
        cview = src_attach_info = dest_attach_info = None
        rpolicy = self.get_policy()
        properties = utils.brick_get_connector_properties()
        LOG.debug("Searching for snapshot: %s in K2.", snap_name)
        snap_rs = self.client.search("snapshots", short_name=snap_name)
        if hasattr(snap_rs, 'hits') and snap_rs.total != 0:
            snap = snap_rs.hits[0]
            LOG.debug("Creating a view: %(view)s from snapshot: %(snap)s",
                      {'view': view_name, 'snap': snap_name})
            try:
                cview = self.client.new("snapshots",
                                        short_name=view_name,
                                        source=snap, retention_policy=rpolicy,
                                        is_exposable=True).save()
            except Exception as ex:
                LOG.exception(_LE("Creating a view: %(view)s from snapshot: "
                                  "%(snap)s failed"), {"view": view_name,
                                                       "snap": snap_name})
                raise exception.KaminarioCinderDriverException(
                    reason=six.text_type(ex.message))

        else:
            msg = _("Snapshot: %s search failed in K2.") % snap_name
            LOG.error(msg)
            raise exception.KaminarioCinderDriverException(reason=msg)

        try:
            conn = self.initialize_connection(cview, properties)
            src_attach_info = self._connect_device(conn)
            self.create_volume(volume)
            conn = self.initialize_connection(volume, properties)
            dest_attach_info = self._connect_device(conn)
            vol_utils.copy_volume(src_attach_info['device']['path'],
                                  dest_attach_info['device']['path'],
                                  snapshot.volume.size * units.Ki,
                                  self.configuration.volume_dd_blocksize,
                                  sparse=True)
            self.terminate_connection(volume, properties)
            self.terminate_connection(cview, properties)
        except Exception as ex:
            self.terminate_connection(cview, properties)
            self.terminate_connection(volume, properties)
            cview.delete()
            self.delete_volume(volume)
            LOG.exception(_LE("Copy to volume: %(vol)s from view: %(view)s "
                              "failed"), {"vol": vol_name, "view": view_name})
            raise exception.KaminarioCinderDriverException(
                reason=six.text_type(ex.message))
예제 #26
0
    def delete_cgsnapshot(self, context, cgsnapshot, snapshots):
        """Deletes a consistency group snapshot."""

        client = self._login()
        snap_name_base = "snapshot-" + cgsnapshot.id

        snapshot_model_updates = []
        for i, snapshot in enumerate(snapshots):
            snapshot_update = {'id': snapshot['id']}
            try:
                snap_name = snap_name_base + "-" + six.text_type(i)
                snap_info = client.getSnapshotByName(snap_name)
                client.deleteSnapshot(snap_info['id'])
                snapshot_update['status'] = 'deleted'
            except hpeexceptions.HTTPServerError as ex:
                in_use_msg = ('cannot be deleted because it is a clone '
                              'point')
                if in_use_msg in ex.get_description():
                    LOG.error(_LE("The snapshot cannot be deleted because "
                                  "it is a clone point."))
                snapshot_update['status'] = 'error'
            except Exception as ex:
                LOG.error(_LE("There was an error deleting snapshot %(id)s: "
                              "%(error)."),
                          {'id': snapshot['id'],
                           'error': six.text_type(ex)})
                snapshot_update['status'] = 'error'
            snapshot_model_updates.append(snapshot_update)

        self._logout(client)

        model_update = {'status': cgsnapshot.status}

        return model_update, snapshot_model_updates
예제 #27
0
파일: dsware.py 프로젝트: NetApp/cinder
    def _create_volume(self, volume_id, volume_size, isThin, volume_host):
        pool_id = 0
        result = 1

        # Query Dsware version.
        retcode = self.dsware_client.query_dsware_version()
        # Old version.
        if retcode == OLD_VERSION:
            pool_id = 0
        # New version.
        elif retcode == NEW_VERSION:
            pool_info = self._get_poolid_from_host(volume_host)
            if pool_info != self.pool_type:
                pool_id = int(pool_info)
        # Query Dsware version failed!
        else:
            LOG.error(_LE("Query Dsware version fail!"))
            msg = (_("Query Dsware version failed! Retcode is %s.") %
                   retcode)
            raise exception.VolumeBackendAPIException(data=msg)

        try:
            result = self.dsware_client.create_volume(
                volume_id, pool_id, volume_size, int(isThin))
        except Exception as e:
            LOG.exception(_LE("Create volume error, details is: %s."), e)
            raise

        if result != 0:
            msg = _("Dsware create volume failed! Result is: %s.") % result
            raise exception.VolumeBackendAPIException(data=msg)
예제 #28
0
파일: api.py 프로젝트: apporc/cinder
    def _create_cg_from_source_cg(self, context, group, source_cg):
        try:
            source_vols = self.db.volume_get_all_by_group(context,
                                                          source_cg.id)

            if not source_vols:
                msg = _("Source CG is empty. No consistency group "
                        "will be created.")
                raise exception.InvalidConsistencyGroup(reason=msg)

            for source_vol in source_vols:
                kwargs = {}
                kwargs['availability_zone'] = group.availability_zone
                kwargs['source_cg'] = source_cg
                kwargs['consistencygroup'] = group
                kwargs['source_volume'] = source_vol
                volume_type_id = source_vol.get('volume_type_id')
                if volume_type_id:
                    kwargs['volume_type'] = volume_types.get_volume_type(
                        context, volume_type_id)

                # Since source_cg is passed in, the following call will
                # create a db entry for the volume, but will not call the
                # volume manager to create a real volume in the backend yet.
                # If error happens, taskflow will handle rollback of quota
                # and removal of volume entry in the db.
                try:
                    self.volume_api.create(context,
                                           source_vol['size'],
                                           None,
                                           None,
                                           **kwargs)
                except exception.CinderException:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE("Error occurred when creating cloned "
                                      "volume in the process of creating "
                                      "consistency group %(group)s from "
                                      "source CG %(source_cg)s."),
                                  {'group': group.id,
                                   'source_cg': source_cg.id})
        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(_LE("Error occurred when creating consistency "
                                  "group %(group)s from source CG "
                                  "%(source_cg)s."),
                              {'group': group.id,
                               'source_cg': source_cg.id})

        volumes = self.db.volume_get_all_by_group(context,
                                                  group.id)
        for vol in volumes:
            # Update the host field for the volume.
            self.db.volume_update(context, vol['id'],
                                  {'host': group.host})

        self.volume_rpcapi.create_consistencygroup_from_src(context, group,
                                                            None, source_cg)
예제 #29
0
 def delete_snapshot(self, vdiname, snapname):
     try:
         (_stdout, _stderr) = self._run_dog('vdi', 'delete', '-s',
                                            snapname, vdiname)
         if _stderr.rstrip().endswith(self.DOG_RESP_SNAPSHOT_NOT_FOUND):
             LOG.warning(_LW('Snapshot "%s" not found.'), snapname)
         elif _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND):
             LOG.warning(_LW('Volume "%s" not found.'), vdiname)
         elif _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
             # NOTE(tishizaki)
             # Dog command does not return error_code although
             # dog command cannot connect to sheep process.
             # That is a Sheepdog's bug.
             # To avoid a Sheepdog's bug, now we need to check stderr.
             # If Sheepdog has been fixed, this check logic is needed
             # by old Sheepdog users.
             reason = (_('Failed to connect to sheep daemon. '
                       'addr: %(addr)s, port: %(port)s'),
                       {'addr': self.addr, 'port': self.port})
             raise exception.SheepdogError(reason=reason)
     except exception.SheepdogCmdError as e:
         cmd = e.kwargs['cmd']
         _stderr = e.kwargs['stderr']
         with excutils.save_and_reraise_exception():
             if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
                 msg = _LE('Failed to connect to sheep daemon. '
                           'addr: %(addr)s, port: %(port)s')
                 LOG.error(msg, {'addr': self.addr, 'port': self.port})
             else:
                 LOG.error(_LE('Failed to delete snapshot. (command: %s)'),
                           cmd)
예제 #30
0
    def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start):
        lock = basic_lib.get_process_lock(self.hsnm_lock_file)
        with nested(self.hsnm_lock, lock):
            ret, stdout, stderr = self.exec_command("env", args=args, printflag=printflag)

        if not ret or noretry:
            raise loopingcall.LoopingCallDone((ret, stdout, stderr))

        if time.time() - start >= timeout:
            LOG.error(_LE("snm2 command timeout."))
            raise loopingcall.LoopingCallDone((ret, stdout, stderr))

        if (
            re.search("DMEC002047", stderr)
            or re.search("DMEC002048", stderr)
            or re.search("DMED09000A", stderr)
            or re.search("DMED090026", stderr)
            or re.search("DMED0E002B", stderr)
            or re.search("DMER03006A", stderr)
            or re.search("DMER030080", stderr)
            or re.search("DMER0300B8", stderr)
            or re.search("DMER0800CF", stderr)
            or re.search("DMER0800D[0-6D]", stderr)
            or re.search("DMES052602", stderr)
        ):
            LOG.error(_LE("Unexpected error occurs in snm2."))
            raise loopingcall.LoopingCallDone((ret, stdout, stderr))
예제 #31
0
    def _do_initialize_connection(self, volume, connector):
        """Perform necessary work to make an iSCSI connection.

        To be able to create an iSCSI connection from a given host to a
        volume, we must:
        1. Translate the given iSCSI name to a host name
        2. Create new host on the storage system if it does not yet exist
        3. Map the volume to the host if it is not already done
        4. Return the connection information for relevant nodes (in the
        proper I/O group)
        """
        LOG.debug(
            'enter: initialize_connection: volume %(vol)s with connector'
            ' %(conn)s', {
                'vol': volume['id'],
                'conn': connector
            })

        volume_name = volume['name']

        # Check if a host object is defined for this host name
        host_name = self._helpers.get_host_from_connector(connector)
        if host_name is None:
            # Host does not exist - add a new host to Storwize/SVC
            host_name = self._helpers.create_host(connector)

        chap_secret = self._helpers.get_chap_secret_for_host(host_name)
        chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
        if chap_enabled and chap_secret is None:
            chap_secret = self._helpers.add_chap_secret_to_host(host_name)
        elif not chap_enabled and chap_secret:
            LOG.warning(
                _LW('CHAP secret exists for host but CHAP is '
                    'disabled.'))

        volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
        if volume_attributes is None:
            msg = (_('initialize_connection: Failed to get attributes'
                     ' for volume %s.') % volume_name)
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        multihostmap = self.configuration.storwize_svc_multihostmap_enabled
        lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
                                               multihostmap)
        try:
            preferred_node = volume_attributes['preferred_node_id']
            IO_group = volume_attributes['IO_group_id']
        except KeyError as e:
            LOG.error(
                _LE('Did not find expected column name in '
                    'lsvdisk: %s.'), e)
            raise exception.VolumeBackendAPIException(
                data=_('initialize_connection: Missing volume attribute for '
                       'volume %s.') % volume_name)

        try:
            # Get preferred node and other nodes in I/O group
            preferred_node_entry = None
            io_group_nodes = []
            for node in self._state['storage_nodes'].values():
                if self.protocol not in node['enabled_protocols']:
                    continue
                if node['id'] == preferred_node:
                    preferred_node_entry = node
                if node['IO_group'] == IO_group:
                    io_group_nodes.append(node)

            if not len(io_group_nodes):
                msg = (_('initialize_connection: No node found in '
                         'I/O group %(gid)s for volume %(vol)s.') % {
                             'gid': IO_group,
                             'vol': volume_name
                         })
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

            if not preferred_node_entry:
                # Get 1st node in I/O group
                preferred_node_entry = io_group_nodes[0]
                LOG.warning(
                    _LW('initialize_connection: Did not find a '
                        'preferred node for volume %s.'), volume_name)

            properties = {}
            properties['target_discovered'] = False
            properties['target_lun'] = lun_id
            properties['volume_id'] = volume['id']

            if len(preferred_node_entry['ipv4']):
                ipaddr = preferred_node_entry['ipv4'][0]
            else:
                ipaddr = preferred_node_entry['ipv6'][0]
            properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
            properties['target_iqn'] = preferred_node_entry['iscsi_name']
            if chap_secret:
                properties['auth_method'] = 'CHAP'
                properties['auth_username'] = connector['initiator']
                properties['auth_password'] = chap_secret
                properties['discovery_auth_method'] = 'CHAP'
                properties['discovery_auth_username'] = (
                    connector['initiator'])
                properties['discovery_auth_password'] = chap_secret

        except Exception:
            with excutils.save_and_reraise_exception():
                self._do_terminate_connection(volume, connector)
                LOG.error(
                    _LE('initialize_connection: Failed '
                        'to collect return '
                        'properties for volume %(vol)s and connector '
                        '%(conn)s.\n'), {
                            'vol': volume,
                            'conn': connector
                        })

        LOG.debug(
            'leave: initialize_connection:\n volume: %(vol)s\n '
            'connector %(conn)s\n properties: %(prop)s', {
                'vol': volume['id'],
                'conn': connector,
                'prop': properties
            })

        return {
            'driver_volume_type': 'iscsi',
            'data': properties,
        }
예제 #32
0
    def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
        LOG.info(_LI('Removing iscsi_target for Volume ID: %s'), vol_id)
        vol_uuid_file = vol_name
        volume_path = os.path.join(self.volumes_dir, vol_uuid_file)
        if not os.path.exists(volume_path):
            LOG.warning(
                _LW('Volume path %s does not exist, '
                    'nothing to remove.'), volume_path)
            return

        if os.path.isfile(volume_path):
            iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file)
        else:
            raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
        try:
            # NOTE(vish): --force is a workaround for bug:
            #             https://bugs.launchpad.net/cinder/+bug/1159948
            utils.execute('tgt-admin',
                          '--force',
                          '--delete',
                          iqn,
                          run_as_root=True)
        except putils.ProcessExecutionError as e:
            non_fatal_errors = ("can't find the target",
                                "access control rule does not exist")

            if any(error in e.stderr for error in non_fatal_errors):
                LOG.warning(
                    _LW("Failed target removal because target or "
                        "ACL's couldn't be found for iqn: %s."), iqn)
            else:
                LOG.error(
                    _LE("Failed to remove iscsi target for Volume "
                        "ID: %(vol_id)s: %(e)s"), {
                            'vol_id': vol_id,
                            'e': e
                        })
                raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
        # NOTE(jdg): There's a bug in some versions of tgt that
        # will sometimes fail silently when using the force flag
        #    https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343
        # For now work-around by checking if the target was deleted,
        # if it wasn't, try again without the force.

        # This will NOT do any good for the case of mutliple sessions
        # which the force was aded for but it will however address
        # the cases pointed out in bug:
        #    https://bugs.launchpad.net/cinder/+bug/1304122
        if self._get_target(iqn):
            try:
                LOG.warning(
                    _LW('Silent failure of target removal '
                        'detected, retry....'))
                utils.execute('tgt-admin', '--delete', iqn, run_as_root=True)
            except putils.ProcessExecutionError as e:
                LOG.error(
                    _LE("Failed to remove iscsi target for Volume "
                        "ID: %(vol_id)s: %(e)s"), {
                            'vol_id': vol_id,
                            'e': e
                        })
                raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)

        # NOTE(jdg): This *should* be there still but incase
        # it's not we don't care, so just ignore it if was
        # somehow deleted between entry of this method
        # and here
        if os.path.exists(volume_path):
            os.unlink(volume_path)
        else:
            LOG.debug(
                'Volume path %s not found at end, '
                'of remove_iscsi_target.', volume_path)
예제 #33
0
    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):

        # Note(jdg) tid and lun aren't used by TgtAdm but remain for
        # compatibility

        # NOTE(jdg): Remove this when we get to the bottom of bug: #1398078
        # for now, since we intermittently hit target already exists we're
        # adding some debug info to try and pinpoint what's going on
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        fileutils.ensure_tree(self.volumes_dir)

        vol_id = name.split(':')[1]
        write_cache = self.configuration.get('iscsi_write_cache', 'on')
        driver = self.iscsi_protocol

        if chap_auth is None:
            volume_conf = self.VOLUME_CONF % (name, path, driver, write_cache)
        else:
            chap_str = 'incominguser %s %s' % chap_auth
            volume_conf = self.VOLUME_CONF_WITH_CHAP_AUTH % (
                name, path, driver, chap_str, write_cache)
        LOG.debug('Creating iscsi_target for Volume ID: %s', vol_id)
        volumes_dir = self.volumes_dir
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(
                _LW('Persistence file already exists for volume, '
                    'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug(('Created volume path %(vp)s,\n'
                   'content: %(vc)s'), {
                       'vp': volume_path,
                       'vc': volume_conf
                   })

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name is not None:
            LOG.debug(
                'Detected old persistence file for volume '
                '%{vol}s at %{old_name}s', {
                    'vol': vol_id,
                    'old_name': old_name
                })
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.

            self._do_tgt_update(name)
        except putils.ProcessExecutionError as e:
            if "target already exists" in e.stderr:
                # Adding the additional Warning message below for a clear
                # ER marker (Ref bug: #1398078).
                LOG.warning(
                    _LW('Could not create target because '
                        'it already exists for volume: %s'), vol_id)
                LOG.debug('Exception was: %s', e)

            else:
                LOG.error(
                    _LE("Failed to create iscsi target for Volume "
                        "ID: %(vol_id)s: %(e)s"), {
                            'vol_id': vol_id,
                            'e': e
                        })

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        # Grab targets list for debug
        # Consider adding a check for lun 0 and 1 for tgtadm
        # before considering this as valid
        (out, err) = utils.execute('tgtadm',
                                   '--lld',
                                   'iscsi',
                                   '--op',
                                   'show',
                                   '--mode',
                                   'target',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _LE("Failed to create iscsi target for Volume "
                    "ID: %(vol_id)s. Please ensure your tgtd config "
                    "file contains 'include %(volumes_dir)s/*'"), {
                        'vol_id': vol_id,
                        'volumes_dir': volumes_dir,
                    })
            raise exception.NotFound()

        # NOTE(jdg): Sometimes we have some issues with the backing lun
        # not being created, believe this is due to a device busy
        # or something related, so we're going to add some code
        # here that verifies the backing lun (lun 1) was created
        # and we'll try and recreate it if it's not there
        if not self._verify_backing_lun(iqn, tid):
            try:
                self._recreate_backing_lun(iqn, tid, name, path)
            except putils.ProcessExecutionError:
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

            # Finally check once more and if no go, fail and punt
            if not self._verify_backing_lun(iqn, tid):
                os.unlink(volume_path)
                raise exception.ISCSITargetCreateFailed(volume_id=vol_id)

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
예제 #34
0
    def initialize_connection(self, volume, connector):
        """Perform the necessary work so that an iSCSI/FC connection can
        be made.

        To be able to create an iSCSI/FC connection from a given host to a
        volume, we must:
        1. Translate the given iSCSI name or WWNN to a host name
        2. Create new host on the storage system if it does not yet exist
        3. Map the volume to the host if it is not already done
        4. Return the connection information for relevant nodes (in the
           proper I/O group)

        """

        LOG.debug(
            'enter: initialize_connection: volume %(vol)s with connector'
            ' %(conn)s', {
                'vol': volume['id'],
                'conn': connector
            })

        vol_opts = self._get_vdisk_params(volume['volume_type_id'])
        volume_name = volume['name']

        # Delete irrelevant connection information that later could result
        # in unwanted behaviour. For example, if FC is used yet the hosts
        # return iSCSI data, the driver will try to create the iSCSI connection
        # which can result in a nice error about reaching the per-host maximum
        # iSCSI initiator limit.
        # First make a copy so we don't mess with a caller's connector.
        connector = connector.copy()
        if vol_opts['protocol'] == 'FC':
            connector.pop('initiator', None)
        elif vol_opts['protocol'] == 'iSCSI':
            connector.pop('wwnns', None)
            connector.pop('wwpns', None)

        # Check if a host object is defined for this host name
        host_name = self._helpers.get_host_from_connector(connector)
        if host_name is None:
            # Host does not exist - add a new host to Storwize/SVC
            host_name = self._helpers.create_host(connector)

        if vol_opts['protocol'] == 'iSCSI':
            chap_secret = self._helpers.get_chap_secret_for_host(host_name)
            chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
            if chap_enabled and chap_secret is None:
                chap_secret = self._helpers.add_chap_secret_to_host(host_name)
            elif not chap_enabled and chap_secret:
                LOG.warning(
                    _LW('CHAP secret exists for host but CHAP is '
                        'disabled'))

        volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
        if volume_attributes is None:
            msg = (_('initialize_connection: Failed to get attributes'
                     ' for volume %s') % volume_name)
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        multihostmap = self.configuration.storwize_svc_multihostmap_enabled
        lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
                                               multihostmap)
        try:
            preferred_node = volume_attributes['preferred_node_id']
            IO_group = volume_attributes['IO_group_id']
        except KeyError as e:
            LOG.error(
                _LE('Did not find expected column name in '
                    'lsvdisk: %s') % e)
            msg = (_('initialize_connection: Missing volume '
                     'attribute for volume %s') % volume_name)
            raise exception.VolumeBackendAPIException(data=msg)

        try:
            # Get preferred node and other nodes in I/O group
            preferred_node_entry = None
            io_group_nodes = []
            for node in self._state['storage_nodes'].itervalues():
                if vol_opts['protocol'] not in node['enabled_protocols']:
                    continue
                if node['id'] == preferred_node:
                    preferred_node_entry = node
                if node['IO_group'] == IO_group:
                    io_group_nodes.append(node)

            if not len(io_group_nodes):
                msg = (_('initialize_connection: No node found in '
                         'I/O group %(gid)s for volume %(vol)s') % {
                             'gid': IO_group,
                             'vol': volume_name
                         })
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

            if not preferred_node_entry and not vol_opts['multipath']:
                # Get 1st node in I/O group
                preferred_node_entry = io_group_nodes[0]
                LOG.warn(
                    _LW('initialize_connection: Did not find a preferred '
                        'node for volume %s') % volume_name)

            properties = {}
            properties['target_discovered'] = False
            properties['target_lun'] = lun_id
            properties['volume_id'] = volume['id']
            if vol_opts['protocol'] == 'iSCSI':
                type_str = 'iscsi'
                if len(preferred_node_entry['ipv4']):
                    ipaddr = preferred_node_entry['ipv4'][0]
                else:
                    ipaddr = preferred_node_entry['ipv6'][0]
                properties['target_portal'] = '%s:%s' % (ipaddr, '3260')
                properties['target_iqn'] = preferred_node_entry['iscsi_name']
                if chap_secret:
                    properties['auth_method'] = 'CHAP'
                    properties['auth_username'] = connector['initiator']
                    properties['auth_password'] = chap_secret
                    properties['discovery_auth_method'] = 'CHAP'
                    properties['discovery_auth_username'] = (
                        connector['initiator'])
                    properties['discovery_auth_password'] = chap_secret
            else:
                type_str = 'fibre_channel'
                conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)

                # If conn_wwpns is empty, then that means that there were
                # no target ports with visibility to any of the initiators.
                # We will either fail the attach, or return all target
                # ports, depending on the value of the
                # storwize_svc_npiv_compatibity_mode flag.
                if len(conn_wwpns) == 0:
                    npiv_compat = self.configuration.\
                        storwize_svc_npiv_compatibility_mode
                    if not npiv_compat:
                        msg = (_('Could not get FC connection information for '
                                 'the host-volume connection. Is the host '
                                 'configured properly for FC connections?'))
                        LOG.error(msg)
                        raise exception.VolumeBackendAPIException(data=msg)
                    else:
                        for node in self._state['storage_nodes'].itervalues():
                            conn_wwpns.extend(node['WWPN'])

                if not vol_opts['multipath']:
                    # preferred_node_entry can have a list of WWPNs while only
                    # one WWPN may be available on the storage host.  Here we
                    # walk through the nodes until we find one that works,
                    # default to the first WWPN otherwise.
                    for WWPN in preferred_node_entry['WWPN']:
                        if WWPN in conn_wwpns:
                            properties['target_wwn'] = WWPN
                            break
                    else:
                        LOG.warning(
                            _LW('Unable to find a preferred node match'
                                ' for node %(node)s in the list of '
                                'available WWPNs on %(host)s. '
                                'Using first available.') % {
                                    'node': preferred_node,
                                    'host': host_name
                                })
                        properties['target_wwn'] = conn_wwpns[0]
                else:
                    properties['target_wwn'] = conn_wwpns

                i_t_map = self._make_initiator_target_map(
                    connector['wwpns'], conn_wwpns)
                properties['initiator_target_map'] = i_t_map

                # specific for z/VM, refer to cinder bug 1323993
                if "zvm_fcp" in connector:
                    properties['zvm_fcp'] = connector['zvm_fcp']
        except Exception:
            with excutils.save_and_reraise_exception():
                self.terminate_connection(volume, connector)
                LOG.error(
                    _LE('initialize_connection: Failed '
                        'to collect return '
                        'properties for volume %(vol)s and connector '
                        '%(conn)s.\n'), {
                            'vol': volume,
                            'conn': connector
                        })

        LOG.debug(
            'leave: initialize_connection:\n volume: %(vol)s\n '
            'connector %(conn)s\n properties: %(prop)s', {
                'vol': volume['id'],
                'conn': connector,
                'prop': properties
            })

        return {
            'driver_volume_type': type_str,
            'data': properties,
        }
예제 #35
0
    def _do_initialize_connection(self, volume, connector):
        """Perform necessary work to make a FC connection.

        To be able to create an FC connection from a given host to a
        volume, we must:
        1. Translate the given WWNN to a host name
        2. Create new host on the storage system if it does not yet exist
        3. Map the volume to the host if it is not already done
        4. Return the connection information for relevant nodes (in the
           proper I/O group)

        """
        LOG.debug(
            'enter: initialize_connection: volume %(vol)s with connector'
            ' %(conn)s', {
                'vol': volume['id'],
                'conn': connector
            })

        volume_name = volume['name']

        # Check if a host object is defined for this host name
        host_name = self._helpers.get_host_from_connector(connector)
        if host_name is None:
            # Host does not exist - add a new host to Storwize/SVC
            host_name = self._helpers.create_host(connector)

        volume_attributes = self._helpers.get_vdisk_attributes(volume_name)
        if volume_attributes is None:
            msg = (_('initialize_connection: Failed to get attributes'
                     ' for volume %s.') % volume_name)
            LOG.error(msg)
            raise exception.VolumeDriverException(message=msg)

        multihostmap = self.configuration.storwize_svc_multihostmap_enabled
        lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
                                               multihostmap)
        try:
            preferred_node = volume_attributes['preferred_node_id']
            IO_group = volume_attributes['IO_group_id']
        except KeyError as e:
            LOG.error(
                _LE('Did not find expected column name in '
                    'lsvdisk: %s.'), e)
            raise exception.VolumeBackendAPIException(
                data=_('initialize_connection: Missing volume attribute for '
                       'volume %s.') % volume_name)

        try:
            # Get preferred node and other nodes in I/O group
            preferred_node_entry = None
            io_group_nodes = []
            for node in self._state['storage_nodes'].values():
                if node['id'] == preferred_node:
                    preferred_node_entry = node
                if node['IO_group'] == IO_group:
                    io_group_nodes.append(node)

            if not len(io_group_nodes):
                msg = (_('initialize_connection: No node found in '
                         'I/O group %(gid)s for volume %(vol)s.') % {
                             'gid': IO_group,
                             'vol': volume_name
                         })
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)

            if not preferred_node_entry:
                # Get 1st node in I/O group
                preferred_node_entry = io_group_nodes[0]
                LOG.warning(
                    _LW('initialize_connection: Did not find a '
                        'preferred node for volume %s.'), volume_name)

            properties = {}
            properties['target_discovered'] = False
            properties['target_lun'] = lun_id
            properties['volume_id'] = volume['id']

            conn_wwpns = self._helpers.get_conn_fc_wwpns(host_name)

            # If conn_wwpns is empty, then that means that there were
            # no target ports with visibility to any of the initiators
            # so we return all target ports.
            if len(conn_wwpns) == 0:
                for node in self._state['storage_nodes'].values():
                    conn_wwpns.extend(node['WWPN'])

            properties['target_wwn'] = conn_wwpns

            i_t_map = self._make_initiator_target_map(connector['wwpns'],
                                                      conn_wwpns)
            properties['initiator_target_map'] = i_t_map

            # specific for z/VM, refer to cinder bug 1323993
            if "zvm_fcp" in connector:
                properties['zvm_fcp'] = connector['zvm_fcp']
        except Exception:
            with excutils.save_and_reraise_exception():
                self._do_terminate_connection(volume, connector)
                LOG.error(
                    _LE('initialize_connection: Failed '
                        'to collect return '
                        'properties for volume %(vol)s and connector '
                        '%(conn)s.\n'), {
                            'vol': volume,
                            'conn': connector
                        })

        LOG.debug(
            'leave: initialize_connection:\n volume: %(vol)s\n '
            'connector %(conn)s\n properties: %(prop)s', {
                'vol': volume['id'],
                'conn': connector,
                'prop': properties
            })

        return {
            'driver_volume_type': 'fibre_channel',
            'data': properties,
        }
예제 #36
0
    def _get_service_target(self, volume):
        """Get the available service parameters

           Get the available service parameters for a given volume using
           its type.
           :param volume: dictionary volume reference
        """

        hdp = self._get_service(volume)
        info = _loc_info(volume['provider_location'])
        (arid, lun_name) = info['id_lu']

        evsid = self.bend.get_evs(self.config['hnas_cmd'],
                                  self.config['mgmt_ip0'],
                                  self.config['username'],
                                  self.config['password'], hdp)
        svc_label = utils.extract_host(volume['host'], level='pool')
        svc = self.config['services'][svc_label]

        LOG.info(_LI("_get_service_target hdp: %s."), hdp)
        LOG.info(_LI("config[services]: %s."), self.config['services'])

        mapped, lunid, tgt = self.bend.check_lu(self.config['hnas_cmd'],
                                                self.config['mgmt_ip0'],
                                                self.config['username'],
                                                self.config['password'],
                                                lun_name, hdp)

        LOG.info(_LI("Target is %(map)s! Targetlist = %(tgtl)s."), {
            'map': "mapped" if mapped else "not mapped",
            'tgtl': tgt
        })

        # The volume is already mapped to a LUN, so no need to create any
        # targets
        if mapped:
            service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'],
                       svc['port'], hdp, tgt['alias'], tgt['secret'])
            return service

        # Each EVS can have up to 32 targets. Each target can have up to 32
        # LUNs attached and have the name format 'evs<id>-tgt<0-N>'. We run
        # from the first 'evs1-tgt0' until we find a target that is not already
        # created in the BE or is created but have slots to place new targets.
        found_tgt = False
        for i in range(0, MAX_HNAS_ISCSI_TARGETS):
            tgt_alias = 'evs' + evsid + '-tgt' + six.text_type(i)
            # TODO(erlon): we need to go to the BE 32 times here
            tgt_exist, tgt = self.bend.check_target(self.config['hnas_cmd'],
                                                    self.config['mgmt_ip0'],
                                                    self.config['username'],
                                                    self.config['password'],
                                                    hdp, tgt_alias)
            if tgt_exist and len(tgt['luns']) < 32 or not tgt_exist:
                # Target exists and has free space or, target does not exist
                # yet. Proceed and use the target or create a target using this
                # name.
                found_tgt = True
                break

        # If we've got here and found_tgt is not True, we run out of targets,
        # raise and go away.
        if not found_tgt:
            LOG.error(_LE("No more targets avaliable."))
            raise exception.NoMoreTargets(param=tgt_alias)

        LOG.info(_LI("Using target label: %s."), tgt_alias)

        # Check if we have a secret stored for this target so we don't have to
        # go to BE on every query
        if 'targets' not in self.config.keys():
            self.config['targets'] = {}

        if tgt_alias not in self.config['targets'].keys():
            self.config['targets'][tgt_alias] = {}

        tgt_info = self.config['targets'][tgt_alias]

        # HNAS - one time lookup
        # see if the client supports CHAP authentication and if
        # iscsi_secret has already been set, retrieve the secret if
        # available, otherwise generate and store
        if self.config['chap_enabled'] == 'True':
            # It may not exist, create and set secret.
            if 'iscsi_secret' not in tgt_info.keys():
                LOG.info(_LI("Retrieving secret for service: %s."), tgt_alias)

                out = self.bend.get_targetsecret(self.config['hnas_cmd'],
                                                 self.config['mgmt_ip0'],
                                                 self.config['username'],
                                                 self.config['password'],
                                                 tgt_alias, hdp)
                tgt_info['iscsi_secret'] = out
                if tgt_info['iscsi_secret'] == "":
                    randon_secret = utils.generate_password()[0:15]
                    tgt_info['iscsi_secret'] = randon_secret
                    self.bend.set_targetsecret(self.config['hnas_cmd'],
                                               self.config['mgmt_ip0'],
                                               self.config['username'],
                                               self.config['password'],
                                               tgt_alias, hdp,
                                               tgt_info['iscsi_secret'])

                    LOG.info(_LI("Set tgt CHAP secret for service: %s."),
                             tgt_alias)
        else:
            # We set blank password when the client does not
            # support CHAP. Later on, if the client tries to create a new
            # target that does not exists in the backend, we check for this
            # value and use a temporary dummy password.
            if 'iscsi_secret' not in tgt_info.keys():
                # Warns in the first time
                LOG.info(_LI("CHAP authentication disabled."))

            tgt_info['iscsi_secret'] = ""

        if 'tgt_iqn' not in tgt_info:
            LOG.info(_LI("Retrieving target for service: %s."), tgt_alias)

            out = self.bend.get_targetiqn(self.config['hnas_cmd'],
                                          self.config['mgmt_ip0'],
                                          self.config['username'],
                                          self.config['password'], tgt_alias,
                                          hdp, tgt_info['iscsi_secret'])
            tgt_info['tgt_iqn'] = out

        self.config['targets'][tgt_alias] = tgt_info

        service = (svc['iscsi_ip'], svc['iscsi_port'], svc['ctl'], svc['port'],
                   hdp, tgt_alias, tgt_info['iscsi_secret'])

        return service
예제 #37
0
    def initialize_connection(self, volume, connector):
        '''Initializes the connection and returns connection info.

        Assign any created volume to a compute node/host so that it can be
        used from that host.

        The  driver returns a driver_volume_type of 'fibre_channel'.
        The target_wwn can be a single entry or a list of wwns that
        correspond to the list of remote wwn(s) that will export the volume.
        '''

        # We use id to name the volume name as it is a
        # known unique name.
        volume_name = volume.get('id')
        LOG.debug('Initialize connection: %s', volume_name)
        with self._client.open_connection() as api:
            try:
                ssn = api.find_sc(self.configuration.dell_sc_ssn)
                # Find our server.
                wwpns = connector.get('wwpns')
                for wwn in wwpns:
                    scserver = api.find_server(ssn, wwn)
                    if scserver is not None:
                        break

                # No? Create it.
                if scserver is None:
                    server_folder = self.configuration.dell_sc_server_folder
                    scserver = api.create_server_multiple_hbas(
                        ssn, server_folder, wwpns)
                # Find the volume on the storage center.
                scvolume = api.find_volume(ssn, volume_name)
                if scserver is not None and scvolume is not None:
                    mapping = api.map_volume(scvolume, scserver)
                    if mapping is not None:
                        # Since we just mapped our volume we had best update
                        # our sc volume object.
                        scvolume = api.find_volume(ssn, volume_name)
                        lun, targets, init_targ_map = api.find_wwns(
                            scvolume, scserver)
                        if lun is not None and len(targets) > 0:
                            data = {
                                'driver_volume_type': 'fibre_channel',
                                'data': {
                                    'target_lun': lun,
                                    'target_discovered': True,
                                    'target_wwn': targets,
                                    'initiator_target_map': init_targ_map
                                }
                            }
                            LOG.debug('Return FC data:')
                            LOG.debug(data)
                            return data
                        LOG.error(_LE('Lun mapping returned null!'))

            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Failed to initialize connection '))

        # We get here because our mapping is none so blow up.
        raise exception.VolumeBackendAPIException(_('unable to map volume'))
예제 #38
0
    def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
        LOG.info(_LI('Removing iscsi_target for: %s'), vol_id)
        vol_uuid_file = vol_name
        volume_path = os.path.join(self._get_volumes_dir(), vol_uuid_file)
        if not os.path.exists(volume_path):
            LOG.warning(
                _LW('Volume path %s does not exist, '
                    'nothing to remove.'), volume_path)
            return

        if os.path.isfile(volume_path):
            iqn = '%s%s' % (self.iscsi_target_prefix, vol_uuid_file)
        else:
            raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)

        target_exists = False
        try:
            (out, err) = utils.execute('iscsictl',
                                       '-c',
                                       'target=%s' % iqn,
                                       run_as_root=True)
            LOG.debug("StdOut from iscsictl -c: %s", out)
            LOG.debug("StdErr from iscsictl -c: %s", err)
        except putils.ProcessExecutionError as e:
            if "NOT found" in e.stdout:
                LOG.info(
                    _LI("No iscsi target present for volume "
                        "id:%(vol_id)s: %(e)s"), {
                            'vol_id': vol_id,
                            'e': e
                        })
                return
            else:
                raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
        else:
            target_exists = True

        try:
            utils.execute('iscsictl',
                          '-s',
                          'target=%s' % iqn,
                          run_as_root=True)
        except putils.ProcessExecutionError as e:
            # There exists a race condition where multiple calls to
            # remove_iscsi_target come in simultaneously. If we can poll
            # for a target successfully but it is gone before we can remove
            # it, fail silently
            if "is not found" in e.stderr and target_exists:
                LOG.info(
                    _LI("No iscsi target present for volume "
                        "id:%(vol_id)s: %(e)s"), {
                            'vol_id': vol_id,
                            'e': e
                        })
                return
            else:
                LOG.error(
                    _LE("Failed to remove iscsi target for volume "
                        "id:%(vol_id)s: %(e)s"), {
                            'vol_id': vol_id,
                            'e': e
                        })
                raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)

        # Carried over from tgt
        # NOTE(jdg): This *should* be there still but incase
        # it's not we don't care, so just ignore it if was
        # somehow deleted between entry of this method
        # and here
        if os.path.exists(volume_path):
            os.unlink(volume_path)
        else:
            LOG.debug(
                'Volume path %s not found at end, '
                'of remove_iscsi_target.', volume_path)
예제 #39
0
    def request(self, path, request, body=None, **kwargs):
        """Make an HTTP request and return the results

        :param path: Path used with the initialized URL to make a request
        :param request: HTTP request type (GET, POST, PUT, DELETE)
        :param body: HTTP body of request
        :key accept: Set HTTP 'Accept' header with this value
        :key base_path: Override the base_path for this request
        :key content: Set HTTP 'Content-Type' header with this value
        """
        out_hdrs = dict.copy(self.headers)
        if kwargs.get("accept"):
            out_hdrs['accept'] = kwargs.get("accept")

        if body:
            if isinstance(body, dict):
                body = str(json.dumps(body))

        if body and len(body):
            out_hdrs['content-length'] = len(body)

        zfssaurl = self._path(path, kwargs.get("base_path"))
        req = urllib.request.Request(zfssaurl, body, out_hdrs)
        req.get_method = lambda: request
        maxreqretries = kwargs.get("maxreqretries", 10)
        retry = 0
        response = None

        LOG.debug('Request: %(request)s %(url)s', {
            'request': request,
            'url': zfssaurl
        })
        LOG.debug('Out headers: %s', out_hdrs)
        if body and body != '':
            LOG.debug('Body: %s', body)

        context = None
        if hasattr(ssl, '_create_unverified_context'):
            context = ssl._create_unverified_context()
        else:
            context = None

        while retry < maxreqretries:
            try:
                if context:
                    response = urllib.request.urlopen(req,
                                                      timeout=self.timeout,
                                                      context=context)
                else:
                    response = urllib.request.urlopen(req,
                                                      timeout=self.timeout)
            except urllib.error.HTTPError as err:
                if err.code == http_client.NOT_FOUND:
                    LOG.debug('REST Not Found: %s', err.code)
                else:
                    LOG.error(_LE('REST Not Available: %s'), err.code)

                if err.code == http_client.SERVICE_UNAVAILABLE and \
                   retry < maxreqretries:
                    retry += 1
                    time.sleep(1)
                    LOG.error(_LE('Server Busy retry request: %s'), retry)
                    continue
                if (err.code == http_client.UNAUTHORIZED or
                    err.code == http_client.INTERNAL_SERVER_ERROR) and \
                   '/access/v1' not in zfssaurl:
                    try:
                        LOG.error(
                            _LE('Authorizing request: %(zfssaurl)s '
                                'retry: %(retry)d .'), {
                                    'zfssaurl': zfssaurl,
                                    'retry': retry
                                })
                        self._authorize()
                        req.add_header('x-auth-session',
                                       self.headers['x-auth-session'])
                    except RestClientError:
                        pass
                    retry += 1
                    time.sleep(1)
                    continue

                return RestResult(err=err)

            except urllib.error.URLError as err:
                LOG.error(_LE('URLError: %s'), err.reason)
                raise RestClientError(-1,
                                      name="ERR_URLError",
                                      message=err.reason)

            break

        if (response and (response.getcode() == http_client.SERVICE_UNAVAILABLE
                          and retry >= maxreqretries)):
            raise RestClientError(response.getcode(),
                                  name="ERR_HTTPError",
                                  message="REST Not Available: Disabled")

        return RestResult(response=response)
예제 #40
0
    def get_san_context(self, target_wwn_list):
        """Lookup SAN context for visible end devices.

        Look up each SAN configured and return a map of SAN (fabric IP) to
        list of target WWNs visible to the fabric.
        """
        formatted_target_list = []
        fabric_map = {}
        fabrics = [x.strip() for x in self.
                   configuration.fc_fabric_names.split(',')]
        LOG.debug("Fabric List: %s", fabrics)
        LOG.debug("Target wwn List: %s", target_wwn_list)
        if len(fabrics) > 0:
            for t in target_wwn_list:
                formatted_target_list.append(
                    zm_utils.get_formatted_wwn(t.lower()))
            LOG.debug("Formatted Target wwn List: %s", formatted_target_list)
            for fabric_name in fabrics:
                fabric_ip = self.fabric_configs[fabric_name].safe_get(
                    'cisco_fc_fabric_address')
                fabric_user = self.fabric_configs[fabric_name].safe_get(
                    'cisco_fc_fabric_user')
                fabric_pwd = self.fabric_configs[fabric_name].safe_get(
                    'cisco_fc_fabric_password')
                fabric_port = self.fabric_configs[fabric_name].safe_get(
                    'cisco_fc_fabric_port')
                zoning_vsan = self.fabric_configs[fabric_name].safe_get(
                    'cisco_zoning_vsan')

                # Get name server data from fabric and get the targets
                # logged in.
                nsinfo = None
                try:
                    conn = importutils.import_object(
                        self.configuration.cisco_sb_connector,
                        ipaddress=fabric_ip,
                        username=fabric_user,
                        password=fabric_pwd, port=fabric_port,
                        vsan=zoning_vsan)
                    nsinfo = conn.get_nameserver_info()
                    LOG.debug("show fcns database info from fabric: %s",
                              nsinfo)
                    conn.cleanup()
                except exception.CiscoZoningCliException:
                    with excutils.save_and_reraise_exception():
                        LOG.exception(_LE("Error getting show fcns database "
                                          "info."))
                except Exception:
                    msg = _("Failed to get show fcns database info.")
                    LOG.exception(msg)
                    raise exception.FCZoneDriverException(msg)
                visible_targets = filter(
                    lambda x: x in formatted_target_list, nsinfo)

                if visible_targets:
                    LOG.info(_LI("Filtered targets for SAN is: %s"),
                             {fabric_name: visible_targets})
                    # getting rid of the ':' before returning
                    for idx, elem in enumerate(visible_targets):
                        visible_targets[idx] = six.text_type(
                            visible_targets[idx]).replace(':', '')
                    fabric_map[fabric_name] = visible_targets
                else:
                    LOG.debug("No targets are in the fcns info for SAN %s",
                              fabric_name)
        LOG.debug("Return SAN context output: %s", fabric_map)
        return fabric_map
예제 #41
0
    def migrate_volume(self, ctxt, volume, host, thin=False, mirror_count=0):
        """Optimize the migration if the destination is on the same server.

        If the specified host is another back-end on the same server, and
        the volume is not attached, we can do the migration locally without
        going through iSCSI.
        """

        false_ret = (False, None)
        if volume['status'] != 'available':
            return false_ret
        if 'location_info' not in host['capabilities']:
            return false_ret
        info = host['capabilities']['location_info']
        try:
            (dest_type, dest_hostname, dest_vg, lvm_type, lvm_mirrors) =\
                info.split(':')
            lvm_mirrors = int(lvm_mirrors)
        except ValueError:
            return false_ret
        if (dest_type != 'LVMVolumeDriver' or dest_hostname != self.hostname):
            return false_ret

        if dest_vg != self.vg.vg_name:
            vg_list = volutils.get_all_volume_groups()
            try:
                (vg for vg in vg_list if vg['name'] == dest_vg).next()
            except StopIteration:
                message = (_LE("Destination Volume Group %s does not exist") %
                           dest_vg)
                LOG.error(message)
                return false_ret

            helper = utils.get_root_helper()

            lvm_conf_file = self.configuration.lvm_conf_file
            if lvm_conf_file.lower() == 'none':
                lvm_conf_file = None

            dest_vg_ref = lvm.LVM(dest_vg, helper,
                                  lvm_type=lvm_type,
                                  executor=self._execute,
                                  lvm_conf=lvm_conf_file)

            self.remove_export(ctxt, volume)
            self._create_volume(volume['name'],
                                self._sizestr(volume['size']),
                                lvm_type,
                                lvm_mirrors,
                                dest_vg_ref)

            volutils.copy_volume(self.local_path(volume),
                                 self.local_path(volume, vg=dest_vg),
                                 volume['size'],
                                 self.configuration.volume_dd_blocksize,
                                 execute=self._execute)
            self._delete_volume(volume)
            model_update = self.create_export(ctxt, volume, vg=dest_vg)

            return (True, model_update)
        else:
            message = (_("Refusing to migrate volume ID: %(id)s. Please "
                         "check your configuration because source and "
                         "destination are the same Volume Group: %(name)s."),
                       {'id': volume['id'], 'name': self.vg.vg_name})
            LOG.exception(message)
            raise exception.VolumeBackendAPIException(data=message)
예제 #42
0
    def create_iscsi_target(self,
                            name,
                            tid,
                            lun,
                            path,
                            chap_auth=None,
                            **kwargs):

        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets prior to update: %s", out)
        volumes_dir = self._get_volumes_dir()
        fileutils.ensure_tree(volumes_dir)

        vol_id = name.split(':')[1]

        cfg_port = kwargs.get('portals_port')
        cfg_ips = kwargs.get('portals_ips')

        portals = ','.join(
            map(lambda ip: self._get_portal(ip, cfg_port), cfg_ips))

        if chap_auth is None:
            volume_conf = self.TARGET_FMT % (name, path, portals)
        else:
            volume_conf = self.TARGET_FMT_WITH_CHAP % (name, path, portals,
                                                       '"%s":"%s"' % chap_auth)
        LOG.debug('Creating iscsi_target for: %s', vol_id)
        volume_path = os.path.join(volumes_dir, vol_id)

        if os.path.exists(volume_path):
            LOG.warning(
                _LW('Persistence file already exists for volume, '
                    'found file at: %s'), volume_path)
        f = open(volume_path, 'w+')
        f.write(volume_conf)
        f.close()
        LOG.debug('Created volume path %(vp)s,\n'
                  'content: %(vc)s', {
                      'vp': volume_path,
                      'vc': volume_conf
                  })

        old_persist_file = None
        old_name = kwargs.get('old_name', None)
        if old_name:
            LOG.debug(
                'Detected old persistence file for volume '
                '%{vol}s at %{old_name}s', {
                    'vol': vol_id,
                    'old_name': old_name
                })
            old_persist_file = os.path.join(volumes_dir, old_name)

        try:
            # With the persistent tgts we create them
            # by creating the entry in the persist file
            # and then doing an update to get the target
            # created.
            (out, err) = utils.execute('iscsictl',
                                       '-S',
                                       'target=%s' % name,
                                       '-f',
                                       volume_path,
                                       '-x',
                                       self.config,
                                       run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s: %(e)s"), {
                        'vol_id': vol_id,
                        'e': e
                    })

            # Don't forget to remove the persistent file we created
            os.unlink(volume_path)
            raise exception.ISCSITargetCreateFailed(volume_id=vol_id)
        finally:
            LOG.debug("StdOut from iscsictl -S: %s", out)
            LOG.debug("StdErr from iscsictl -S: %s", err)

        # Grab targets list for debug
        (out, err) = utils.execute('iscsictl',
                                   '-c',
                                   'target=ALL',
                                   run_as_root=True)
        LOG.debug("Targets after update: %s", out)

        iqn = '%s%s' % (self.iscsi_target_prefix, vol_id)
        tid = self._get_target(iqn)
        if tid is None:
            LOG.error(
                _LE("Failed to create iscsi target for volume "
                    "id:%(vol_id)s. Please verify your configuration "
                    "in %(volumes_dir)'"), {
                        'vol_id': vol_id,
                        'volumes_dir': volumes_dir,
                    })
            raise exception.NotFound()

        if old_persist_file is not None and os.path.exists(old_persist_file):
            os.unlink(old_persist_file)

        return tid
예제 #43
0
    def add_zones(self, zones, activate, active_zone_set=None):
        """Add zone configuration.

        This method will add the zone configuration passed by user.
            input params:
            zones - zone names mapped to members.
            zone members are colon separated but case-insensitive
            {   zonename1:[zonememeber1,zonemember2,...],
                zonename2:[zonemember1, zonemember2,...]...}
            e.g: {'openstack50060b0000c26604201900051ee8e329':
                    ['50:06:0b:00:00:c2:66:04', '20:19:00:05:1e:e8:e3:29']
                }
            activate - True/False
            active_zone_set - active zone set dict retrieved from
                              get_active_zone_set method
        """
        LOG.debug("Add Zones - Zones passed: %s", zones)
        cfg_name = None
        iterator_count = 0
        zone_with_sep = ''
        if not active_zone_set:
            active_zone_set = self.get_active_zone_set()
            LOG.debug("Active zone set: %s", active_zone_set)
        zone_list = active_zone_set[zone_constant.CFG_ZONES]
        LOG.debug("zone list: %s", zone_list)
        for zone in zones.keys():
            # If zone exists, its an update. Delete & insert
            # TODO(skolathur): This still need to be optimized
            # to an update call later. Now we just handled the
            # same zone name with same zone members.
            if (zone in zone_list):
                if set(zones[zone]) == set(zone_list[zone]):
                    break
                try:
                    self.delete_zones(zone, activate, active_zone_set)
                except exception.BrocadeZoningCliException:
                    with excutils.save_and_reraise_exception():
                        LOG.error(_LE("Deleting zone failed %s"), zone)
                LOG.debug("Deleted Zone before insert : %s", zone)
            zone_members_with_sep = ';'.join(
                str(member) for member in zones[zone])
            LOG.debug("Forming command for add zone")
            cmd = 'zonecreate "%(zone)s", "%(zone_members_with_sep)s"' % {
                'zone': zone,
                'zone_members_with_sep': zone_members_with_sep
            }
            LOG.debug("Adding zone, cmd to run %s", cmd)
            self.apply_zone_change(cmd.split())
            LOG.debug("Created zones on the switch")
            if (iterator_count > 0):
                zone_with_sep += ';'
            iterator_count += 1
            zone_with_sep += zone
        if not zone_with_sep:
            return
        try:
            # Get active zone set from device, as some of the zones
            # could be deleted.
            active_zone_set = self.get_active_zone_set()
            cfg_name = active_zone_set[zone_constant.ACTIVE_ZONE_CONFIG]
            cmd = None
            if not cfg_name:
                cfg_name = zone_constant.OPENSTACK_CFG_NAME
                cmd = 'cfgcreate "%(zoneset)s", "%(zones)s"' \
                    % {'zoneset': cfg_name, 'zones': zone_with_sep}
            else:
                cmd = 'cfgadd "%(zoneset)s", "%(zones)s"' \
                    % {'zoneset': cfg_name, 'zones': zone_with_sep}
            LOG.debug("New zone %s", cmd)
            self.apply_zone_change(cmd.split())
            if activate:
                self.activate_zoneset(cfg_name)
            else:
                self._cfg_save()
        except Exception as e:
            self._cfg_trans_abort()
            msg = _("Creating and activating zone set failed: "
                    "(Zone set=%(cfg_name)s error=%(err)s).") % {
                        'cfg_name': cfg_name,
                        'err': six.text_type(e)
                    }
            LOG.error(msg)
            raise exception.BrocadeZoningCliException(reason=msg)
예제 #44
0
    def create_volume_from_snapshot(self, volume, snapshot):
        """Create volume from snapshot.

        - search for snapshot and retention_policy
        - create a view from snapshot and attach view
        - create a volume and attach volume
        - copy data from attached view to attached volume
        - detach volume and view and finally delete view
        """
        snap_name = self.get_snap_name(snapshot.id)
        view_name = self.get_view_name(volume.id)
        vol_name = self.get_volume_name(volume.id)
        cview = src_attach_info = dest_attach_info = None
        rpolicy = self.get_policy()
        properties = utils.brick_get_connector_properties()
        LOG.debug("Searching for snapshot: %s in K2.", snap_name)
        snap_rs = self.client.search("snapshots", short_name=snap_name)
        if hasattr(snap_rs, 'hits') and snap_rs.total != 0:
            snap = snap_rs.hits[0]
            LOG.debug("Creating a view: %(view)s from snapshot: %(snap)s", {
                'view': view_name,
                'snap': snap_name
            })
            try:
                cview = self.client.new("snapshots",
                                        short_name=view_name,
                                        source=snap,
                                        retention_policy=rpolicy,
                                        is_exposable=True).save()
            except Exception as ex:
                LOG.exception(
                    _LE("Creating a view: %(view)s from snapshot: "
                        "%(snap)s failed"), {
                            "view": view_name,
                            "snap": snap_name
                        })
                raise exception.KaminarioCinderDriverException(
                    reason=six.text_type(ex.message))

        else:
            msg = _("Snapshot: %s search failed in K2.") % snap_name
            LOG.error(msg)
            raise exception.KaminarioCinderDriverException(reason=msg)

        try:
            conn = self.initialize_connection(cview, properties)
            src_attach_info = self._connect_device(conn)
            self.create_volume(volume)
            conn = self.initialize_connection(volume, properties)
            dest_attach_info = self._connect_device(conn)
            vol_utils.copy_volume(src_attach_info['device']['path'],
                                  dest_attach_info['device']['path'],
                                  snapshot.volume.size * units.Ki,
                                  self.configuration.volume_dd_blocksize,
                                  sparse=True)
            self.terminate_connection(volume, properties)
            self.terminate_connection(cview, properties)
        except Exception as ex:
            self.terminate_connection(cview, properties)
            self.terminate_connection(volume, properties)
            cview.delete()
            self.delete_volume(volume)
            LOG.exception(
                _LE("Copy to volume: %(vol)s from view: %(view)s "
                    "failed"), {
                        "vol": vol_name,
                        "view": view_name
                    })
            raise exception.KaminarioCinderDriverException(
                reason=six.text_type(ex.message))
예제 #45
0
    def _cast_create_consistencygroup(self, context, group, request_spec_list,
                                      filter_properties_list):

        try:
            for request_spec in request_spec_list:
                volume_type = request_spec.get('volume_type', None)
                volume_type_id = None
                if volume_type:
                    volume_type_id = volume_type.get('id', None)

                specs = {}
                if volume_type_id:
                    qos_specs = volume_types.get_volume_type_qos_specs(
                        volume_type_id)
                    specs = qos_specs['qos_specs']
                if not specs:
                    # to make sure we don't pass empty dict
                    specs = None

                volume_properties = {
                    'size':
                    0,  # Need to populate size for the scheduler
                    'user_id':
                    context.user_id,
                    'project_id':
                    context.project_id,
                    'status':
                    'creating',
                    'attach_status':
                    'detached',
                    'encryption_key_id':
                    request_spec.get('encryption_key_id', None),
                    'display_description':
                    request_spec.get('description', None),
                    'display_name':
                    request_spec.get('name', None),
                    'volume_type_id':
                    volume_type_id,
                }

                request_spec['volume_properties'] = volume_properties
                request_spec['qos_specs'] = specs

        except Exception:
            with excutils.save_and_reraise_exception():
                try:
                    group.destroy()
                finally:
                    LOG.error(
                        _LE("Error occurred when building "
                            "request spec list for consistency group "
                            "%s."), group.id)

        # Cast to the scheduler and let it handle whatever is needed
        # to select the target host for this group.
        self.scheduler_rpcapi.create_consistencygroup(
            context,
            CONF.volume_topic,
            group,
            request_spec_list=request_spec_list,
            filter_properties_list=filter_properties_list)
예제 #46
0
    def _ssh_execute(self, cmd_list, check_exit_code=True, attempts=1):
        """Execute cli with status update.

        Executes CLI commands such as cfgsave where status return is expected.
        """
        utils.check_ssh_injection(cmd_list)
        command = ' '.join(cmd_list)

        if not self.sshpool:
            self.sshpool = ssh_utils.SSHPool(self.switch_ip,
                                             self.switch_port,
                                             None,
                                             self.switch_user,
                                             self.switch_pwd,
                                             self.switch_key,
                                             min_size=1,
                                             max_size=5)
        stdin, stdout, stderr = None, None, None
        LOG.debug("Executing command via ssh: %s", command)
        last_exception = None
        try:
            with self.sshpool.item() as ssh:
                while attempts > 0:
                    attempts -= 1
                    try:
                        stdin, stdout, stderr = ssh.exec_command(command)
                        stdin.write("%s\n" % zone_constant.YES)
                        channel = stdout.channel
                        exit_status = channel.recv_exit_status()
                        LOG.debug("Exit Status from ssh: %s", exit_status)
                        # exit_status == -1 if no exit code was returned
                        if exit_status != -1:
                            LOG.debug('Result was %s', exit_status)
                            if check_exit_code and exit_status != 0:
                                raise processutils.ProcessExecutionError(
                                    exit_code=exit_status,
                                    stdout=stdout,
                                    stderr=stderr,
                                    cmd=command)
                            else:
                                return True
                        else:
                            return True
                    except Exception as e:
                        LOG.exception(_LE('Error executing SSH command.'))
                        last_exception = e
                        greenthread.sleep(random.randint(20, 500) / 100.0)
                LOG.debug("Handling error case after "
                          "SSH: %s", last_exception)
                try:
                    raise processutils.ProcessExecutionError(
                        exit_code=last_exception.exit_code,
                        stdout=last_exception.stdout,
                        stderr=last_exception.stderr,
                        cmd=last_exception.cmd)
                except AttributeError:
                    raise processutils.ProcessExecutionError(
                        exit_code=-1,
                        stdout="",
                        stderr="Error running SSH command",
                        cmd=command)
        except Exception as e:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Error executing command via ssh: %s"), e)
        finally:
            if stdin:
                stdin.flush()
                stdin.close()
            if stdout:
                stdout.close()
            if stderr:
                stderr.close()
예제 #47
0
    def reset_status(self, context, backup, status):
        """Reset volume backup status.

        :param context: running context
        :param backup: The backup object for reset status operation
        :param status: The status to be set
        :raises: InvalidBackup
        :raises: BackupVerifyUnsupportedDriver
        :raises: AttributeError
        """
        LOG.info(_LI('Reset backup status started, backup_id: '
                     '%(backup_id)s, status: %(status)s.'),
                 {'backup_id': backup.id,
                  'status': status})

        backup_service = self._map_service_to_driver(backup.service)
        LOG.info(_LI('Backup service: %s.'), backup_service)
        if backup_service is not None:
            configured_service = self.driver_name
            if backup_service != configured_service:
                err = _('Reset backup status aborted, the backup service'
                        ' currently configured [%(configured_service)s] '
                        'is not the backup service that was used to create'
                        ' this backup [%(backup_service)s].') % \
                    {'configured_service': configured_service,
                     'backup_service': backup_service}
                raise exception.InvalidBackup(reason=err)
            # Verify backup
            try:
                # check whether the backup is ok or not
                if (status == fields.BackupStatus.AVAILABLE
                        and backup['status'] != fields.BackupStatus.RESTORING):
                    # check whether we could verify the backup is ok or not
                    if isinstance(backup_service,
                                  driver.BackupDriverWithVerify):
                        backup_service.verify(backup.id)
                        backup.status = status
                        backup.save()
                    # driver does not support verify function
                    else:
                        msg = (_('Backup service %(configured_service)s '
                                 'does not support verify. Backup id'
                                 ' %(id)s is not verified. '
                                 'Skipping verify.') %
                               {'configured_service': self.driver_name,
                                'id': backup.id})
                        raise exception.BackupVerifyUnsupportedDriver(
                            reason=msg)
                # reset status to error or from restoring to available
                else:
                    if (status == fields.BackupStatus.ERROR or
                        (status == fields.BackupStatus.AVAILABLE and
                            backup.status == fields.BackupStatus.RESTORING)):
                        backup.status = status
                        backup.save()
            except exception.InvalidBackup:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Backup id %s is not invalid. "
                                  "Skipping reset."), backup.id)
            except exception.BackupVerifyUnsupportedDriver:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Backup service %(configured_service)s '
                                  'does not support verify. Backup id '
                                  '%(id)s is not verified. '
                                  'Skipping verify.'),
                              {'configured_service': self.driver_name,
                               'id': backup.id})
            except AttributeError:
                msg = (_('Backup service %(service)s does not support '
                         'verify. Backup id %(id)s is not verified. '
                         'Skipping reset.') %
                       {'service': self.driver_name,
                        'id': backup.id})
                LOG.error(msg)
                raise exception.BackupVerifyUnsupportedDriver(
                    reason=msg)

            # Needs to clean temporary volumes and snapshots.
            try:
                self._cleanup_temp_volumes_snapshots_for_one_backup(
                    context, backup)
            except Exception:
                LOG.exception(_LE("Problem cleaning temp volumes and "
                                  "snapshots for backup %(bkup)s."),
                              {'bkup': backup.id})

            # send notification to ceilometer
            notifier_info = {'id': backup.id, 'update': {'status': status}}
            notifier = rpc.get_notifier('backupStatusUpdate')
            notifier.info(context, "backups.reset_status.end",
                          notifier_info)
예제 #48
0
파일: hnas_iscsi.py 프로젝트: Jchuan/cinder
    def _get_service_target(self, volume):
        """Gets the available service parameters

        Gets the available service parameters for a given volume using its
        type.
        :param volume: dictionary volume reference
        :returns: service target information or raises error
        :raises: NoMoreTargets
        """
        fs_label = self._get_service(volume)
        evs_id = self.backend.get_evs(fs_label)

        svc_label = utils.extract_host(volume.host, level='pool')
        svc = self.config['services'][svc_label]

        lu_info = self.backend.check_lu(volume.name, fs_label)

        # The volume is already mapped to a LU, so no need to create any
        # targets
        if lu_info['mapped']:
            service = (svc['iscsi_ip'], svc['iscsi_port'], svc['evs'],
                       svc['port'], fs_label, lu_info['tgt']['alias'],
                       lu_info['tgt']['secret'])
            LOG.info(
                _LI("Volume %(vol_name)s already mapped on target "
                    "%(tgt)s to LUN %(lunid)s."), {
                        'vol_name': volume.name,
                        'tgt': lu_info['tgt']['alias'],
                        'lunid': lu_info['id']
                    })
            return service

        # Each EVS can have up to 32 targets. Each target can have up to 32
        # LUs attached and have the name format 'evs<id>-tgt<0-N>'. We run
        # from the first 'evs1-tgt0' until we find a target that is not already
        # created in the BE or is created but have slots to place new LUs.
        tgt_alias = ''
        for i in range(0, MAX_HNAS_ISCSI_TARGETS):
            tgt_alias = 'evs' + evs_id + '-tgt' + six.text_type(i)
            tgt = self.backend.check_target(fs_label, tgt_alias)

            if (tgt['found']
                    and len(tgt['tgt']['lus']) < MAX_HNAS_LUS_PER_TARGET
                    or not tgt['found']):
                # Target exists and has free space or, target does not exist
                # yet. Proceed and use the target or create a target using this
                # name.
                break
        else:
            # If we've got here, we run out of targets, raise and go away.
            LOG.error(_LE("No more targets available."))
            raise exception.NoMoreTargets(param=tgt_alias)

        LOG.info(_LI("Using target label: %(tgt)s."), {'tgt': tgt_alias})

        # Check if we have a secret stored for this target so we don't have to
        # go to BE on every query
        if 'targets' not in self.config.keys():
            self.config['targets'] = {}

        if tgt_alias not in self.config['targets'].keys():
            self.config['targets'][tgt_alias] = {}

        tgt_info = self.config['targets'][tgt_alias]

        # HNAS - one time lookup
        # see if the client supports CHAP authentication and if
        # iscsi_secret has already been set, retrieve the secret if
        # available, otherwise generate and store
        if self.config['chap_enabled']:
            # CHAP support is enabled. Tries to get the target secret.
            if 'iscsi_secret' not in tgt_info.keys():
                LOG.info(_LI("Retrieving secret for service: %(tgt)s."),
                         {'tgt': tgt_alias})
                out = self.backend.get_target_secret(tgt_alias, fs_label)
                tgt_info['iscsi_secret'] = out

                # CHAP supported and the target has no secret yet. So, the
                # secret is created for the target
                if tgt_info['iscsi_secret'] == "":
                    random_secret = utils.generate_password()[0:15]
                    tgt_info['iscsi_secret'] = random_secret

                    LOG.info(_LI("Set tgt CHAP secret for service: %(tgt)s."),
                             {'tgt': tgt_alias})
        else:
            # We set blank password when the client does not
            # support CHAP. Later on, if the client tries to create a new
            # target that does not exist in the backend, we check for this
            # value and use a temporary dummy password.
            if 'iscsi_secret' not in tgt_info.keys():
                # Warns in the first time
                LOG.info(_LI("CHAP authentication disabled."))

            tgt_info['iscsi_secret'] = "''"

        # If the target does not exist, it should be created
        if not tgt['found']:
            self.backend.create_target(tgt_alias, fs_label,
                                       tgt_info['iscsi_secret'])
        elif (tgt['tgt']['secret'] == "" and self.config['chap_enabled']):
            # The target exists, has no secret and chap is enabled
            self.backend.set_target_secret(tgt_alias, fs_label,
                                           tgt_info['iscsi_secret'])

        if 'tgt_iqn' not in tgt_info:
            LOG.info(_LI("Retrieving IQN for service: %(tgt)s."),
                     {'tgt': tgt_alias})

            out = self.backend.get_target_iqn(tgt_alias, fs_label)
            tgt_info['tgt_iqn'] = out

        self.config['targets'][tgt_alias] = tgt_info

        service = (svc['iscsi_ip'], svc['iscsi_port'], svc['evs'], svc['port'],
                   fs_label, tgt_alias, tgt_info['iscsi_secret'])

        return service
예제 #49
0
    def migrate_volume_to_storage_pool(self, conn,
                                       storageRelocationServiceInstanceName,
                                       volumeInstanceName,
                                       targetPoolInstanceName, extraSpecs):
        """Given the storage system name, get the storage relocation service.

        :param conn: the connection to the ecom server
        :param storageRelocationServiceInstanceName: the storage relocation
            service
        :param volumeInstanceName: the volume to be migrated
        :param targetPoolInstanceName: the target pool to migrate the
            volume to.
        :param extraSpecs: additional info
        :returns: int -- rc, return code
        :raises: VolumeBackendAPIException
        """
        LOG.debug(
            "Volume instance name is %(volumeInstanceName)s. "
            "Pool instance name is : %(targetPoolInstanceName)s. ", {
                'volumeInstanceName': volumeInstanceName,
                'targetPoolInstanceName': targetPoolInstanceName
            })
        rc = -1
        try:
            rc = self._migrate_volume(conn,
                                      storageRelocationServiceInstanceName,
                                      volumeInstanceName,
                                      targetPoolInstanceName, extraSpecs)
        except Exception as ex:
            if 'source of a migration session' in six.text_type(ex):
                try:
                    rc = self._terminate_migrate_session(
                        conn, volumeInstanceName, extraSpecs)
                except Exception as ex:
                    LOG.error(_LE('Exception: %s.'), ex)
                    exceptionMessage = (
                        _("Failed to terminate migrate session."))
                    LOG.error(exceptionMessage)
                    raise exception.VolumeBackendAPIException(
                        data=exceptionMessage)
                try:
                    rc = self._migrate_volume(
                        conn, storageRelocationServiceInstanceName,
                        volumeInstanceName, targetPoolInstanceName, extraSpecs)
                except Exception as ex:
                    LOG.error(_LE('Exception: %s'), ex)
                    exceptionMessage = (
                        _("Failed to migrate volume for the second time."))
                    LOG.error(exceptionMessage)
                    raise exception.VolumeBackendAPIException(
                        data=exceptionMessage)

            else:
                LOG.error(_LE('Exception: %s'), ex)
                exceptionMessage = (
                    _("Failed to migrate volume for the first time."))
                LOG.error(exceptionMessage)
                raise exception.VolumeBackendAPIException(
                    data=exceptionMessage)

        return rc
예제 #50
0
    def create_from_src(self,
                        context,
                        name,
                        description=None,
                        cgsnapshot_id=None,
                        source_cgid=None):
        check_policy(context, 'create')
        cgsnapshot = None
        orig_cg = None
        if cgsnapshot_id:
            try:
                cgsnapshot = self.db.cgsnapshot_get(context, cgsnapshot_id)
            except exception.CgSnapshotNotFound:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("CG snapshot %(cgsnap)s not found when "
                            "creating consistency group %(cg)s from "
                            "source."), {
                                'cg': name,
                                'cgsnap': cgsnapshot_id
                            })
            orig_cg = objects.ConsistencyGroup.get_by_id(
                context, cgsnapshot['consistencygroup_id'])

        source_cg = None
        if source_cgid:
            try:
                source_cg = objects.ConsistencyGroup.get_by_id(
                    context, source_cgid)
            except exception.ConsistencyGroupNotFound:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE("Source CG %(source_cg)s not found when "
                            "creating consistency group %(cg)s from "
                            "source."), {
                                'cg': name,
                                'source_cg': source_cgid
                            })

        kwargs = {
            'user_id': context.user_id,
            'project_id': context.project_id,
            'status': "creating",
            'name': name,
            'description': description,
            'cgsnapshot_id': cgsnapshot_id,
            'source_cgid': source_cgid,
        }

        if orig_cg:
            kwargs['volume_type_id'] = orig_cg.volume_type_id
            kwargs['availability_zone'] = orig_cg.availability_zone
            kwargs['host'] = orig_cg.host

        if source_cg:
            kwargs['volume_type_id'] = source_cg.volume_type_id
            kwargs['availability_zone'] = source_cg.availability_zone
            kwargs['host'] = source_cg.host

        group = None
        try:
            group = objects.ConsistencyGroup(context=context, **kwargs)
            group.create()
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(
                    _LE("Error occurred when creating consistency group"
                        " %(cg)s from cgsnapshot %(cgsnap)s."), {
                            'cg': name,
                            'cgsnap': cgsnapshot_id
                        })

        # Update quota for consistencygroups
        self.update_quota(context, group, 1)

        if not group.host:
            msg = _("No host to create consistency group %s.") % group.id
            LOG.error(msg)
            raise exception.InvalidConsistencyGroup(reason=msg)

        if cgsnapshot:
            self._create_cg_from_cgsnapshot(context, group, cgsnapshot)
        elif source_cg:
            self._create_cg_from_source_cg(context, group, source_cg)

        return group
예제 #51
0
    def _do_initialize_connection(self, volume, connector):
        """Perform necessary work to make an iSCSI connection.

        To be able to create an iSCSI connection from a given host to a
        volume, we must:
        1. Translate the given iSCSI name to a host name
        2. Create new host on the storage system if it does not yet exist
        3. Map the volume to the host if it is not already done
        4. Return the connection information for relevant nodes (in the
        proper I/O group)
        """
        LOG.debug(
            'enter: initialize_connection: volume %(vol)s with connector'
            ' %(conn)s', {
                'vol': volume['id'],
                'conn': connector
            })

        volume_name = volume['name']

        # Check if a host object is defined for this host name
        host_name = self._helpers.get_host_from_connector(connector)
        if host_name is None:
            # Host does not exist - add a new host to Storwize/SVC
            host_name = self._helpers.create_host(connector)

        chap_secret = self._helpers.get_chap_secret_for_host(host_name)
        chap_enabled = self.configuration.storwize_svc_iscsi_chap_enabled
        if chap_enabled and chap_secret is None:
            chap_secret = self._helpers.add_chap_secret_to_host(host_name)
        elif not chap_enabled and chap_secret:
            LOG.warning(
                _LW('CHAP secret exists for host but CHAP is '
                    'disabled.'))

        multihostmap = self.configuration.storwize_svc_multihostmap_enabled
        lun_id = self._helpers.map_vol_to_host(volume_name, host_name,
                                               multihostmap)

        try:
            properties = self._get_single_iscsi_data(volume, connector, lun_id,
                                                     chap_secret)
            multipath = connector.get('multipath', False)
            if multipath:
                properties = self._get_multi_iscsi_data(
                    volume, connector, lun_id, properties)
        except Exception:
            with excutils.save_and_reraise_exception():
                self._do_terminate_connection(volume, connector)
                LOG.error(
                    _LE('initialize_connection: Failed '
                        'to collect return '
                        'properties for volume %(vol)s and connector '
                        '%(conn)s.\n'), {
                            'vol': volume,
                            'conn': connector
                        })

        LOG.debug(
            'leave: initialize_connection:\n volume: %(vol)s\n '
            'connector: %(conn)s\n properties: %(prop)s', {
                'vol': volume['id'],
                'conn': connector,
                'prop': properties
            })

        return {
            'driver_volume_type': 'iscsi',
            'data': properties,
        }
예제 #52
0
    def delete_backup(self, context, backup):
        """Delete volume backup from configured backup service."""
        LOG.info(_LI('Delete backup started, backup: %s.'), backup.id)

        self._notify_about_backup_usage(context, backup, "delete.start")
        backup.host = self.host
        backup.save()

        expected_status = fields.BackupStatus.DELETING
        actual_status = backup.status
        if actual_status != expected_status:
            err = _('Delete_backup aborted, expected backup status '
                    '%(expected_status)s but got %(actual_status)s.') \
                % {'expected_status': expected_status,
                   'actual_status': actual_status}
            self._update_backup_error(backup, context, err)
            raise exception.InvalidBackup(reason=err)

        backup_service = self._map_service_to_driver(backup['service'])
        if backup_service is not None:
            configured_service = self.driver_name
            if backup_service != configured_service:
                err = _('Delete backup aborted, the backup service currently'
                        ' configured [%(configured_service)s] is not the'
                        ' backup service that was used to create this'
                        ' backup [%(backup_service)s].')\
                    % {'configured_service': configured_service,
                       'backup_service': backup_service}
                self._update_backup_error(backup, context, err)
                raise exception.InvalidBackup(reason=err)

            try:
                backup_service = self.service.get_backup_driver(context)
                backup_service.delete(backup)
            except Exception as err:
                with excutils.save_and_reraise_exception():
                    self._update_backup_error(backup, context,
                                              six.text_type(err))

        # Get reservations
        try:
            reserve_opts = {
                'backups': -1,
                'backup_gigabytes': -backup.size,
            }
            reservations = QUOTAS.reserve(context,
                                          project_id=backup.project_id,
                                          **reserve_opts)
        except Exception:
            reservations = None
            LOG.exception(_LE("Failed to update usages deleting backup"))

        backup.destroy()
        # If this backup is incremental backup, handle the
        # num_dependent_backups of parent backup
        if backup.parent_id:
            parent_backup = objects.Backup.get_by_id(context,
                                                     backup.parent_id)
            if parent_backup.has_dependent_backups:
                parent_backup.num_dependent_backups -= 1
                parent_backup.save()
        # Commit the reservations
        if reservations:
            QUOTAS.commit(context, reservations,
                          project_id=backup.project_id)

        LOG.info(_LI('Delete backup finished, backup %s deleted.'), backup.id)
        self._notify_about_backup_usage(context, backup, "delete.end")
예제 #53
0
    def initialize_connection(self, volume, connector):
        # We use id to name the volume name as it is a
        # known unique name.
        volume_name = volume.get('id')
        initiator_name = connector.get('initiator')
        multipath = connector.get('multipath', False)
        LOG.debug('initialize_ connection: %(n)s:%(i)s', {
            'n': volume_name,
            'i': initiator_name
        })

        with self._client.open_connection() as api:
            try:
                ssn = api.find_sc(self.configuration.dell_sc_ssn)
                # Find our server.
                server = api.find_server(ssn, initiator_name)
                # No? Create it.
                if server is None:
                    server_folder = self.configuration.dell_sc_server_folder
                    server = api.create_server(ssn, server_folder,
                                               initiator_name)
                # Find the volume on the storage center.
                scvolume = api.find_volume(ssn, volume_name)

                # if we have a server and a volume lets bring them together.
                if server is not None and scvolume is not None:
                    mapping = api.map_volume(scvolume, server)
                    if mapping is not None:
                        # Since we just mapped our volume we had best update
                        # our sc volume object.
                        scvolume = api.find_volume(ssn, volume_name)

                        if multipath:
                            # Just return our properties with all the mappings
                            idx, iscsiproperties = (api.find_iscsi_properties(
                                scvolume, None, None))
                            return {
                                'driver_volume_type': 'iscsi',
                                'data': iscsiproperties
                            }
                        else:
                            # Only return the iqn for the user specified port.
                            ip = self.configuration.iscsi_ip_address
                            port = self.configuration.iscsi_port
                            idx, iscsiproperties = (api.find_iscsi_properties(
                                scvolume, ip, port))
                            properties = {}
                            properties['target_discovered'] = False
                            portals = iscsiproperties['target_portals']
                            # We'll key off of target_portals.  If we have
                            # one listed we can assume that we found what
                            # we are looking for.  Otherwise error.
                            if len(portals) > 0:
                                properties['target_portal'] = portals[idx]
                                properties['target_iqn'] = (
                                    iscsiproperties['target_iqns'][idx])
                                properties['target_lun'] = (
                                    iscsiproperties['target_luns'][idx])
                                properties['access_mode'] = (
                                    iscsiproperties['access_mode'])
                                LOG.debug(properties)
                                return {
                                    'driver_volume_type': 'iscsi',
                                    'data': properties
                                }
                            else:
                                LOG.error(
                                    _LE('Volume mapped to invalid path.'))
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.error(
                        _LE('Failed to initialize connection '
                            ' %(i)s %(n)s'), {
                                'i': initiator_name,
                                'n': volume_name
                            })

        # We get here because our mapping is none or we have no valid iqn to
        # return so blow up.
        raise exception.VolumeBackendAPIException(_('Unable to map volume'))
예제 #54
0
 def _get_device_number(self, path):
     try:
         return utils.get_blkdev_major_minor(path)
     except exception.Error as e:
         LOG.error(_LE('Failed to get device number for throttling: '
                       '%(error)s'), {'error': e})
예제 #55
0
    def retype(self, context, volume, new_type, diff, host):
        """Convert the volume to be of the new type.

        :param ctxt: Context
        :param volume: A dictionary describing the volume to migrate
        :param new_type: A dictionary describing the volume type to convert to
        :param diff: A dictionary with the difference between the two types
        :param host: A dictionary describing the host to migrate to, where
                     host['host'] is its name, and host['capabilities'] is a
                     dictionary of its reported capabilities.
        """
        LOG.debug(
            'Retype volume request %(vol)s to be %(type)s '
            '(host: %(host)s), diff %(diff)s.', {
                'vol': volume['name'],
                'type': new_type,
                'host': host,
                'diff': diff
            })

        options = dict(compression='compression',
                       dedup='dedup',
                       description='nms:description')

        retyped = False
        migrated = False
        model_update = None

        src_backend = self.__class__.__name__
        dst_backend = host['capabilities']['location_info'].split(':')[0]
        if src_backend != dst_backend:
            LOG.warning(
                _LW('Cannot retype from %(src_backend)s to '
                    '%(dst_backend)s.'), {
                        'src_backend': src_backend,
                        'dst_backend': dst_backend
                    })
            return False

        hosts = (volume['host'], host['host'])
        old, new = hosts
        if old != new:
            migrated, provider_location = self.migrate_volume(
                context, volume, host)

        if not migrated:
            provider_location = volume['provider_location']
            nms = self.share2nms[provider_location]
        else:
            nms_url = host['capabilities']['nms_url']
            nms = self._get_nms_for_url(nms_url)
            model_update = provider_location
            provider_location = provider_location['provider_location']

        share = provider_location.split(':')[1].split('volumes/')[1]
        folder = '%(share)s/%(volume)s' % {
            'share': share,
            'volume': volume['name']
        }

        for opt in options:
            old, new = diff.get('extra_specs').get(opt, (False, False))
            if old != new:
                LOG.debug('Changing %(opt)s from %(old)s to %(new)s.', {
                    'opt': opt,
                    'old': old,
                    'new': new
                })
                try:
                    nms.folder.set_child_prop(folder, options[opt], new)
                    retyped = True
                except exception.NexentaException:
                    LOG.error(
                        _LE('Error trying to change %(opt)s'
                            ' from %(old)s to %(new)s'), {
                                'opt': opt,
                                'old': old,
                                'new': new
                            })
                    return False, None
        return retyped or migrated, model_update
    def _copy_vdisk_data(self, src_vdisk_name, src_vdisk_id, dest_vdisk_name,
                         dest_vdisk_id):
        """Copy data from src vdisk to dest vdisk.

        To be able to copy data between vdisks, we must ensure that both
        vdisks have been mapped to host. If vdisk has not been mapped,
        it must be mapped firstly. When data copy completed, vdisk
        should be restored to previous mapped or non-mapped status.
        """

        LOG.debug('enter: _copy_vdisk_data: %(src)s -> %(dest)s.', {
            'src': src_vdisk_name,
            'dest': dest_vdisk_name
        })

        connector = utils.brick_get_connector_properties()
        (src_map, src_lun_id) = self._is_vdisk_map(src_vdisk_name, connector)
        (dest_map, dest_lun_id) = self._is_vdisk_map(dest_vdisk_name,
                                                     connector)

        src_map_device = None
        src_properties = None
        dest_map_device = None
        dest_properties = None

        try:
            if not src_map:
                src_lun_id = self._map_vdisk_to_host(src_vdisk_name, connector)
            if not dest_map:
                dest_lun_id = self._map_vdisk_to_host(dest_vdisk_name,
                                                      connector)
            src_properties = self._get_vdisk_map_properties(
                connector, src_lun_id, src_vdisk_name, src_vdisk_id,
                self._get_vdisk_params(None))
            src_map_device = self._scan_device(src_properties)

            dest_properties = self._get_vdisk_map_properties(
                connector, dest_lun_id, dest_vdisk_name, dest_vdisk_id,
                self._get_vdisk_params(None))
            dest_map_device = self._scan_device(dest_properties)

            src_vdisk_attr = self._get_vdisk_attributes(src_vdisk_name)

            # vdisk capacity is bytes, translate into MB
            size_in_mb = int(src_vdisk_attr['capacity']) / units.Mi
            volume_utils.copy_volume(src_map_device['path'],
                                     dest_map_device['path'], size_in_mb,
                                     self.configuration.volume_dd_blocksize)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Failed to copy %(src)s to %(dest)s.'), {
                    'src': src_vdisk_name,
                    'dest': dest_vdisk_name
                })
        finally:
            if not dest_map:
                self._unmap_vdisk_from_host(dest_vdisk_name, connector)
                self._remove_device(dest_properties, dest_map_device)
            if not src_map:
                self._unmap_vdisk_from_host(src_vdisk_name, connector)
                self._remove_device(src_properties, src_map_device)

        LOG.debug('leave: _copy_vdisk_data: %(src)s -> %(dest)s.', {
            'src': src_vdisk_name,
            'dest': dest_vdisk_name
        })
예제 #57
0
    def initialize_connection(self, volume, connector):
        """Allow connection to connector and return connection info."""
        """
            connector = {'ip': CONF.my_ip,
                         'host': CONF.host,
                         'initiator': self._initiator,
                         'wwnns': self._fc_wwnns,
                         'wwpns': self._fc_wwpns}

        """
        dc_fc = {}
        dc_target = {}
        lsTargetWwpn = []
        output = None
        properties = {}
        preferTargets = {}
        ret = 0
        targetIdentifier = []
        szwwpns = []
        LOG.info(
            _LI('initialize_connection volume: %(volume)s, connector:'
                ' %(connector)s'), {
                    "volume": volume,
                    "connector": connector
                })
        # Get Storage Fiber channel controller
        dc_fc = self._get_fc_channel()

        # Get existed FC target list to decide target wwpn
        dc_target = self._get_targets()
        if len(dc_target) == 0:
            msg = _('Backend storage did not configure fiber channel '
                    'target.')
            raise exception.VolumeBackendAPIException(data=msg)

        for keyFc in dc_fc.keys():
            for targetuuid in dc_target.keys():
                if dc_fc[keyFc]['hardware_address'] == \
                        dc_target[targetuuid]['targetAddr']:
                    preferTargets[targetuuid] = dc_target[targetuuid]
                    break
        # Confirm client wwpn is existed in sns table
        # Covert wwwpns to 'xx:xx:xx:xx:xx:xx:xx:xx' format
        for dwwpn in connector['wwpns']:
            szwwpn = self._convertHex2String(dwwpn)
            if len(szwwpn) == 0:
                msg = _('Invalid wwpns format %(wwpns)s') % \
                    {'wwpns': connector['wwpns']}
                raise exception.VolumeBackendAPIException(data=msg)
            szwwpns.append(szwwpn)

        if len(szwwpns):
            for targetUuid in preferTargets.keys():
                targetWwpn = ''
                targetWwpn = preferTargets.get(targetUuid,
                                               {}).get('targetAddr', '')
                lsTargetWwpn.append(targetWwpn)
        # Use wwpns to assign volume.
        LOG.info(_LI('Prefer use target wwpn %(wwpn)s'),
                 {'wwpn': lsTargetWwpn})
        # Start to create export in all FC target node.
        assignedTarget = []
        for pTarget in lsTargetWwpn:
            try:
                ret = self._export_fc(volume['id'], str(pTarget), szwwpns,
                                      volume['name'])
                if ret:
                    break
                else:
                    assignedTarget.append(pTarget)
            except Exception as e:
                LOG.error(
                    _LE('Failed to export fiber channel target '
                        'due to %s'), e)
                ret = errno.EFAULT
                break
        if ret == 0:
            ret, output = self.dpl.get_vdev(self._conver_uuid2hex(
                volume['id']))
        nLun = -1
        if ret == 0:
            try:
                for p in output['exports']['Network/FC']:
                    # check initiator wwpn existed in target initiator list
                    for initI in p.get('permissions', []):
                        for szwpn in szwwpns:
                            if initI.get(szwpn, None):
                                nLun = initI[szwpn]
                                break
                        if nLun != -1:
                            break

                    if nLun != -1:
                        targetIdentifier.append(
                            str(p['target_identifier']).replace(':', ''))

            except Exception:
                msg = _('Invalid connection initialization response of '
                        'volume %(name)s: '
                        '%(output)s') % {
                            'name': volume['name'],
                            'output': output
                        }
                raise exception.VolumeBackendAPIException(data=msg)

        if nLun != -1:
            init_targ_map = self._build_initiator_target_map(
                connector, targetIdentifier)
            properties['target_discovered'] = True
            properties['target_wwn'] = targetIdentifier
            properties['target_lun'] = int(nLun)
            properties['volume_id'] = volume['id']
            properties['initiator_target_map'] = init_targ_map
            LOG.info(
                _LI('%(volume)s assign type fibre_channel, properties '
                    '%(properties)s'), {
                        'volume': volume['id'],
                        'properties': properties
                    })
        else:
            msg = _('Invalid connection initialization response of '
                    'volume %(name)s') % {
                        'name': volume['name']
                    }
            raise exception.VolumeBackendAPIException(data=msg)
        LOG.info(
            _LI('Connect initialization info: '
                '{driver_volume_type: fibre_channel, '
                'data: %(properties)s'), {'properties': properties})
        return {'driver_volume_type': 'fibre_channel', 'data': properties}
예제 #58
0
def output_err(msg_id, **kwargs):
    msg = HBSD_ERR_MSG.get(msg_id) % kwargs

    LOG.error(_LE("MSGID%(id)04d-E: %(msg)s"), {'id': msg_id, 'msg': msg})

    return msg
예제 #59
0
    def request(self,
                src_file="",
                dst_file="",
                method="",
                maxretries=10,
                data=""):
        retry = 0
        src_url = self.https_path + "/" + src_file
        dst_url = self.https_path + "/" + dst_file
        request = urllib.request.Request(url=src_url, data=data)

        if dst_file != "":
            request.add_header('Destination', dst_url)
        if method == "PROPPATCH":
            request.add_header('Translate', 'F')

        request.add_header("Authorization", "Basic %s" % self.auth_str)

        request.get_method = lambda: method

        LOG.debug('Sending WebDAV request:%(method)s %(src)s %(des)s', {
            'method': method,
            'src': src_url,
            'des': dst_url
        })

        while retry < maxretries:
            try:
                response = urllib.request.urlopen(request, timeout=None)
            except urllib.error.HTTPError as err:
                LOG.error(
                    _LE('WebDAV returned with %(code)s error during '
                        '%(method)s call.'), {
                            'code': err.code,
                            'method': method
                        })

                if err.code == http_client.INTERNAL_SERVER_ERROR:
                    LOG.error(
                        _LE('WebDAV operation failed with error code: '
                            '%(code)s reason: %(reason)s Retry attempt '
                            '%(retry)s in progress.'), {
                                'code': err.code,
                                'reason': err.reason,
                                'retry': retry
                            })
                    if retry < maxretries:
                        retry += 1
                        time.sleep(1)
                        continue

                msg = self._lookup_error(err.code)
                raise exception.WebDAVClientError(msg=msg,
                                                  code=err.code,
                                                  src=src_file,
                                                  dst=dst_file,
                                                  method=method)

            except http_client.BadStatusLine as err:
                msg = self._lookup_error('BadStatusLine')
                code = 'http_client.BadStatusLine'
                raise exception.WebDAVClientError(msg=msg,
                                                  code=code,
                                                  src=src_file,
                                                  dst=dst_file,
                                                  method=method)

            except urllib.error.URLError as err:
                reason = ''
                if getattr(err, 'reason'):
                    reason = err.reason

                msg = self._lookup_error('Bad_Gateway')
                raise exception.WebDAVClientError(msg=msg,
                                                  code=reason,
                                                  src=src_file,
                                                  dst=dst_file,
                                                  method=method)

            break
        return response
예제 #60
0
    def create_replica(self, local_lun_info, replica_model):
        """Create remote LUN and replication pair.

        Purpose:
            1. create remote lun
            2. create replication pair
            3. enable replication pair
        """
        LOG.debug(('Create replication, local lun info: %(info)s, '
                   'replication model: %(model)s.'), {
                       'info': local_lun_info,
                       'model': replica_model
                   })

        local_lun_id = local_lun_info['ID']
        self.wait_volume_online(self.local_client, local_lun_info)

        # step1, create remote lun
        rmt_lun_info = self.create_rmt_lun(local_lun_info)
        rmt_lun_id = rmt_lun_info['ID']

        # step2, get remote device info
        rmt_dev_id, rmt_dev_name, rmt_dev_sn = self.get_rmt_dev_info()
        if not rmt_lun_id or not rmt_dev_name:
            self._delete_rmt_lun(rmt_lun_id)
            msg = _('Get remote device info failed.')
            LOG.error(msg)
            raise exception.VolumeBackendAPIException(data=msg)

        # step3, create replication pair
        try:
            pair_info = self.local_op.create(
                local_lun_id,
                rmt_lun_id,
                rmt_dev_id,
                rmt_dev_name,
                replica_model,
                self.conf.replica_sync_speed,
            )
            pair_id = pair_info['ID']
        except Exception as err:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Create pair failed. Error: %s.'), err)
                self._delete_rmt_lun(rmt_lun_id)

        # step4, start sync manually. If replication type is sync,
        # then wait for sync complete.
        wait_complete = (replica_model == constants.REPLICA_SYNC_MODEL)
        try:
            self.local_driver.sync(pair_id, wait_complete)
        except Exception as err:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE('Start synchronization failed. Error: %s.'), err)
                self._delete_pair(pair_id)
                self._delete_rmt_lun(rmt_lun_id)

        model_update = {}
        driver_data = {
            'pair_id': pair_id,
            'huawei_sn': rmt_dev_sn,
            'rmt_lun_id': rmt_lun_id,
            'rmt_lun_wwn': rmt_lun_info['WWN']
        }
        model_update['replication_driver_data'] = to_string(driver_data)
        model_update['replication_status'] = 'available'
        LOG.debug('Create replication, return info: %s.', model_update)
        return model_update