Example #1
0
    def remove_iscsi_target(self, tid, lun, vol_id, vol_name, **kwargs):
        LOG.info(_LI('Removing iscsi_target for: %s') % vol_id)
        vol_uuid_file = vol_name
        volume_path = os.path.join(self.volumes_dir, vol_uuid_file)
        if not os.path.exists(volume_path):
            LOG.warning(_LW('Volume path %s does not exist, '
                            'nothing to remove.') % volume_path)
            return

        if os.path.isfile(volume_path):
            iqn = '%s%s' % (self.iscsi_target_prefix,
                            vol_uuid_file)
        else:
            raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
        try:
            # NOTE(vish): --force is a workaround for bug:
            #             https://bugs.launchpad.net/cinder/+bug/1159948
            self._execute('tgt-admin',
                          '--force',
                          '--delete',
                          iqn,
                          run_as_root=True)
        except putils.ProcessExecutionError as e:
            LOG.error(_LE("Failed to remove iscsi target for volume "
                          "id:%(vol_id)s: %(e)s")
                      % {'vol_id': vol_id, 'e': e})
            raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
        # NOTE(jdg): There's a bug in some versions of tgt that
        # will sometimes fail silently when using the force flag
        #    https://bugs.launchpad.net/ubuntu/+source/tgt/+bug/1305343
        # For now work-around by checking if the target was deleted,
        # if it wasn't, try again without the force.

        # This will NOT do any good for the case of mutliple sessions
        # which the force was aded for but it will however address
        # the cases pointed out in bug:
        #    https://bugs.launchpad.net/cinder/+bug/1304122
        if self._get_target(iqn):
            try:
                LOG.warning(_LW('Silent failure of target removal '
                                'detected, retry....'))
                self._execute('tgt-admin',
                              '--delete',
                              iqn,
                              run_as_root=True)
            except putils.ProcessExecutionError as e:
                LOG.error(_LE("Failed to remove iscsi target for volume "
                              "id:%(vol_id)s: %(e)s")
                          % {'vol_id': vol_id, 'e': e})
                raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)

        # NOTE(jdg): This *should* be there still but incase
        # it's not we don't care, so just ignore it if was
        # somehow deleted between entry of this method
        # and here
        if os.path.exists(volume_path):
            os.unlink(volume_path)
        else:
            LOG.debug('Volume path %s not found at end, '
                      'of remove_iscsi_target.' % volume_path)
Example #2
0
    def delete_snapshot(self, snapshot):
        snapshotname = huawei_utils.encode_name(snapshot['id'])
        volume_name = huawei_utils.encode_name(snapshot['volume_id'])

        LOG.info(_LI(
            'stop_snapshot: snapshot name: %(snapshot)s, '
            'volume name: %(volume)s.'),
            {'snapshot': snapshotname,
             'volume': volume_name},)

        snapshot_id = snapshot.get('provider_location', None)
        if snapshot_id is None:
            snapshot_id = self.restclient.get_snapshotid_by_name(snapshotname)

        if snapshot_id is not None:
            if self.restclient.check_snapshot_exist(snapshot_id):
                self.restclient.stop_snapshot(snapshot_id)
                self.restclient.delete_snapshot(snapshot_id)
            else:
                LOG.warning(_LW("Can't find snapshot on the array."))
        else:
            LOG.warning(_LW("Can't find snapshot on the array."))
            return False

        return True
Example #3
0
    def delete(self, backup):
        """Delete the given backup from swift."""
        container = backup['container']
        LOG.debug('delete started, backup: %s, container: %s, prefix: %s',
                  backup['id'], container, backup['service_metadata'])

        if container is not None:
            swift_object_names = []
            try:
                swift_object_names = self._generate_object_names(backup)
            except Exception:
                LOG.warn(_LW('swift error while listing objects, continuing'
                             ' with delete'))

            for swift_object_name in swift_object_names:
                try:
                    self.conn.delete_object(container, swift_object_name)
                except socket.error as err:
                    raise exception.SwiftConnectionFailed(reason=err)
                except Exception:
                    LOG.warn(_LW('swift error while deleting object %s, '
                                 'continuing with delete')
                             % swift_object_name)
                else:
                    LOG.debug('deleted swift object: %(swift_object_name)s'
                              ' in container: %(container)s' %
                              {
                                  'swift_object_name': swift_object_name,
                                  'container': container
                              })
                # Deleting a backup's objects from swift can take some time.
                # Yield so other threads can run
                eventlet.sleep(0)

        LOG.debug('delete %s finished' % backup['id'])
Example #4
0
    def set_nas_security_options(self, is_new_cinder_install):
        """Determine the setting to use for Secure NAS options.

        Value of each NAS Security option is checked and updated. If the
        option is currently 'auto', then it is set to either true or false
        based upon if this is a new Cinder installation. The RemoteFS variable
        '_execute_as_root' will be updated for this driver.

        :param is_new_cinder_install: bool indication of new Cinder install
        """
        doc_html = "http://docs.openstack.org/admin-guide-cloud/content" "/nfs_backend.html"

        self._ensure_shares_mounted()
        if not self._mounted_shares:
            raise exception.NfsNoSharesMounted()

        nfs_mount = self._get_mount_point_for_share(self._mounted_shares[0])

        self.configuration.nas_secure_file_permissions = self._determine_nas_security_option_setting(
            self.configuration.nas_secure_file_permissions, nfs_mount, is_new_cinder_install
        )

        LOG.debug(
            "NAS variable secure_file_permissions setting is: %s" % self.configuration.nas_secure_file_permissions
        )

        if self.configuration.nas_secure_file_permissions == "false":
            LOG.warn(
                _LW(
                    "The NAS file permissions mode will be 666 (allowing "
                    "other/world read & write access). "
                    "This is considered an insecure NAS environment. "
                    "Please see %s for information on a secure "
                    "NFS configuration."
                )
                % doc_html
            )

        self.configuration.nas_secure_file_operations = self._determine_nas_security_option_setting(
            self.configuration.nas_secure_file_operations, nfs_mount, is_new_cinder_install
        )

        # If secure NAS, update the '_execute_as_root' flag to not
        # run as the root user; run as process' user ID.
        if self.configuration.nas_secure_file_operations == "true":
            self._execute_as_root = False

        LOG.debug("NAS variable secure_file_operations setting is: %s" % self.configuration.nas_secure_file_operations)

        if self.configuration.nas_secure_file_operations == "false":
            LOG.warn(
                _LW(
                    "The NAS file operations will be run as "
                    "root: allowing root level access at the storage "
                    "backend. This is considered an insecure NAS "
                    "environment. Please see %s "
                    "for information on a secure NAS configuration."
                )
                % doc_html
            )
Example #5
0
    def _create_server(self, connector, client):
        server_info = None
        chap_enabled = self.configuration.hplefthand_iscsi_chap_enabled
        try:
            server_info = client.getServerByName(connector['host'])
            chap_secret = server_info['chapTargetSecret']
            if not chap_enabled and chap_secret:
                LOG.warning(_LW('CHAP secret exists for host %s but CHAP is '
                                'disabled'), connector['host'])
            if chap_enabled and chap_secret is None:
                LOG.warning(_LW('CHAP is enabled, but server secret not '
                                'configured on server %s'), connector['host'])
            return server_info
        except hpexceptions.HTTPNotFound:
            # server does not exist, so create one
            pass

        optional = None
        if chap_enabled:
            chap_secret = utils.generate_password()
            optional = {'chapName': connector['initiator'],
                        'chapTargetSecret': chap_secret,
                        'chapAuthenticationRequired': True
                        }

        server_info = client.createServer(connector['host'],
                                          connector['initiator'],
                                          optional)
        return server_info
Example #6
0
def refresh_cluster_ssc(backend, na_server, vserver, synchronous=False):
    """Refresh cluster ssc for backend."""
    if not isinstance(na_server, netapp_api.NaServer):
        raise exception.InvalidInput(reason=_("Backend server not NaServer."))
    delta_secs = getattr(backend, 'ssc_run_delta_secs', 1800)
    if getattr(backend, 'ssc_job_running', None):
        LOG.warning(_LW('ssc job in progress. Returning... '))
        return
    elif (getattr(backend, 'ssc_run_time', None) is None or
          (backend.ssc_run_time and
           timeutils.is_older_than(backend.ssc_run_time, delta_secs))):
        if synchronous:
            get_cluster_latest_ssc(backend, na_server, vserver)
        else:
            t = threading.Timer(0, get_cluster_latest_ssc,
                                args=[backend, na_server, vserver])
            t.start()
    elif getattr(backend, 'refresh_stale_running', None):
        LOG.warning(_LW('refresh stale ssc job in progress. Returning... '))
        return
    else:
        if backend.stale_vols:
            if synchronous:
                refresh_cluster_stale_ssc(backend, na_server, vserver)
            else:
                t = threading.Timer(0, refresh_cluster_stale_ssc,
                                    args=[backend, na_server, vserver])
                t.start()
Example #7
0
 def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
     """Copies src volume to dest volume."""
     LOG.info(_LI("Copying src vol %(src)s to dest vol %(dst)s.")
              % {'src': src_vol['label'], 'dst': dst_vol['label']})
     try:
         job = None
         job = self._client.create_volume_copy_job(src_vol['id'],
                                                   dst_vol['volumeRef'])
         while True:
             j_st = self._client.list_vol_copy_job(job['volcopyRef'])
             if (j_st['status'] == 'inProgress' or j_st['status'] ==
                     'pending' or j_st['status'] == 'unknown'):
                 time.sleep(self.SLEEP_SECS)
                 continue
             if (j_st['status'] == 'failed' or j_st['status'] == 'halted'):
                 LOG.error(_LE("Vol copy job status %s."), j_st['status'])
                 msg = _("Vol copy job for dest %s failed.")\
                     % dst_vol['label']
                 raise exception.NetAppDriverException(msg)
             LOG.info(_LI("Vol copy job completed for dest %s.")
                      % dst_vol['label'])
             break
     finally:
         if job:
             try:
                 self._client.delete_vol_copy_job(job['volcopyRef'])
             except exception.NetAppDriverException:
                 LOG.warning(_LW("Failure deleting "
                                 "job %s."), job['volcopyRef'])
         else:
             LOG.warning(_LW('Volume copy job for src vol %s not found.'),
                         src_vol['id'])
     LOG.info(_LI('Copy job to dest vol %s completed.'), dst_vol['label'])
Example #8
0
    def __init__(self, *args, **kwargs):
        super(DellEQLSanISCSIDriver, self).__init__(*args, **kwargs)
        self.configuration.append_config_values(eqlx_opts)
        self._group_ip = None
        self.sshpool = None

        if self.configuration.eqlx_use_chap is True:
            LOG.warning(_LW(
                'Configuration options eqlx_use_chap, '
                'eqlx_chap_login and eqlx_chap_password are deprecated. Use '
                'use_chap_auth, chap_username and chap_password '
                'respectively for the same.'))

            self.configuration.use_chap_auth = (
                self.configuration.eqlx_use_chap)
            self.configuration.chap_username = (
                self.configuration.eqlx_chap_login)
            self.configuration.chap_password = (
                self.configuration.eqlx_chap_password)

        if self.configuration.eqlx_cli_timeout:
            msg = _LW('Configuration option eqlx_cli_timeout '
                      'is deprecated and will be removed in M release. '
                      'Use ssh_conn_timeout instead.')
            self.configuration.ssh_conn_timeout = (
                self.configuration.eqlx_cli_timeout)
            versionutils.report_deprecated_feature(LOG, msg)
Example #9
0
    def _check_goodness_function(self, stats):
        """Gets a host's goodness rating based on its goodness function."""

        goodness_rating = 0

        if stats['goodness_function'] is None:
            LOG.warning(_LW("Goodness function not set :: defaulting to "
                            "minimal goodness rating of 0"))
        else:
            try:
                goodness_result = self._run_evaluator(
                    stats['goodness_function'],
                    stats)
            except Exception as ex:
                LOG.warning(_LW("Error in goodness_function function "
                                "'%(function)s' : '%(error)s' :: Defaulting "
                                "to a goodness of 0"),
                            {'function': stats['goodness_function'],
                             'error': ex, })
                return goodness_rating

            if type(goodness_result) is bool:
                if goodness_result:
                    goodness_rating = 100
            elif goodness_result < 0 or goodness_result > 100:
                LOG.warning(_LW("Invalid goodness result.  Result must be "
                                "between 0 and 100.  Result generated: '%s' "
                                ":: Defaulting to a goodness of 0"),
                            goodness_result)
            else:
                goodness_rating = goodness_result

        return goodness_rating
Example #10
0
 def _copy_volume_high_prior_readonly(self, src_vol, dst_vol):
     """Copies src volume to dest volume."""
     LOG.info(
         _LI("Copying src vol %(src)s to dest vol %(dst)s."), {"src": src_vol["label"], "dst": dst_vol["label"]}
     )
     try:
         job = None
         job = self._client.create_volume_copy_job(src_vol["id"], dst_vol["volumeRef"])
         while True:
             j_st = self._client.list_vol_copy_job(job["volcopyRef"])
             if j_st["status"] == "inProgress" or j_st["status"] == "pending" or j_st["status"] == "unknown":
                 time.sleep(self.SLEEP_SECS)
                 continue
             if j_st["status"] == "failed" or j_st["status"] == "halted":
                 LOG.error(_LE("Vol copy job status %s."), j_st["status"])
                 raise exception.NetAppDriverException(_("Vol copy job for dest %s failed.") % dst_vol["label"])
             LOG.info(_LI("Vol copy job completed for dest %s."), dst_vol["label"])
             break
     finally:
         if job:
             try:
                 self._client.delete_vol_copy_job(job["volcopyRef"])
             except exception.NetAppDriverException:
                 LOG.warning(_LW("Failure deleting " "job %s."), job["volcopyRef"])
         else:
             LOG.warning(_LW("Volume copy job for src vol %s not found."), src_vol["id"])
     LOG.info(_LI("Copy job to dest vol %s completed."), dst_vol["label"])
Example #11
0
 def delete_snapshot(self, vdiname, snapname):
     try:
         (_stdout, _stderr) = self._run_dog('vdi', 'delete', '-s',
                                            snapname, vdiname)
         if _stderr.rstrip().endswith(self.DOG_RESP_SNAPSHOT_NOT_FOUND):
             LOG.warning(_LW('Snapshot "%s" not found.'), snapname)
         elif _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND):
             LOG.warning(_LW('Volume "%s" not found.'), vdiname)
         elif _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
             # NOTE(tishizaki)
             # Dog command does not return error_code although
             # dog command cannot connect to sheep process.
             # That is a Sheepdog's bug.
             # To avoid a Sheepdog's bug, now we need to check stderr.
             # If Sheepdog has been fixed, this check logic is needed
             # by old Sheepdog users.
             reason = (_('Failed to connect to sheep daemon. '
                       'addr: %(addr)s, port: %(port)s'),
                       {'addr': self.addr, 'port': self.port})
             raise exception.SheepdogError(reason=reason)
     except exception.SheepdogCmdError as e:
         cmd = e.kwargs['cmd']
         _stderr = e.kwargs['stderr']
         with excutils.save_and_reraise_exception():
             if _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
                 msg = _LE('Failed to connect to sheep daemon. '
                           'addr: %(addr)s, port: %(port)s')
                 LOG.error(msg, {'addr': self.addr, 'port': self.port})
             else:
                 LOG.error(_LE('Failed to delete snapshot. (command: %s)'),
                           cmd)
Example #12
0
    def _create_server(self, connector, client):
        server_info = None
        chap_enabled = self.configuration.hplefthand_iscsi_chap_enabled
        try:
            server_info = client.getServerByName(connector["host"])
            chap_secret = server_info["chapTargetSecret"]
            if not chap_enabled and chap_secret:
                LOG.warning(_LW("CHAP secret exists for host %s but CHAP is " "disabled"), connector["host"])
            if chap_enabled and chap_secret is None:
                LOG.warning(_LW("CHAP is enabled, but server secret not " "configured on server %s"), connector["host"])
            return server_info
        except hpexceptions.HTTPNotFound:
            # server does not exist, so create one
            pass

        optional = None
        if chap_enabled:
            chap_secret = utils.generate_password()
            optional = {
                "chapName": connector["initiator"],
                "chapTargetSecret": chap_secret,
                "chapAuthenticationRequired": True,
            }

        server_info = client.createServer(connector["host"], connector["initiator"], optional)
        return server_info
Example #13
0
    def delete_snapshot(self, snapshot):
        """Deletes a snapshot."""

        LOG.debug('Delete snapshot: %s', snapshot['name'])

        # Retrieve the CG name for the base volume
        volume_name = self.configuration.zadara_vol_name_template\
            % snapshot['volume_name']
        cg_name = self._get_volume_cg_name(volume_name)
        if not cg_name:
            # If the volume isn't present, then don't attempt to delete
            LOG.warning(_LW("snapshot: original volume %s not found, "
                            "skipping delete operation")
                        % snapshot['volume_name'])
            return True

        snap_id = self._get_snap_id(cg_name, snapshot['name'])
        if not snap_id:
            # If the snapshot isn't present, then don't attempt to delete
            LOG.warning(_LW("snapshot: snapshot %s not found, "
                            "skipping delete operation")
                        % snapshot['name'])
            return True

        self.vpsa.send_cmd('delete_snapshot',
                           snap_id=snap_id)
Example #14
0
File: iscsi.py Project: Qeas/cinder
    def remove_export(self, _ctx, volume):
        """Destroy all resources created to export zvol.

        :param volume: reference of volume to be unexported
        """
        zvol_name = self._get_zvol_name(volume['name'])
        target_name = self._get_target_name(volume['name'])
        target_group_name = self._get_target_group_name(volume['name'])
        self.nms.scsidisk.delete_lu(zvol_name)

        try:
            self.nms.stmf.destroy_targetgroup(target_group_name)
        except nexenta.NexentaException as exc:
            # We assume that target group is already gone
            LOG.warn(_LW('Got error trying to destroy target group'
                         ' %(target_group)s, assuming it is '
                         'already gone: %(exc)s'),
                     {'target_group': target_group_name, 'exc': exc})
        try:
            self.nms.iscsitarget.delete_target(target_name)
        except nexenta.NexentaException as exc:
            # We assume that target is gone as well
            LOG.warn(_LW('Got error trying to delete target %(target)s,'
                         ' assuming it is already gone: %(exc)s'),
                     {'target': target_name, 'exc': exc})
Example #15
0
def validate_storage_migration(volume, target_host, src_serial, src_protocol):
    if 'location_info' not in target_host['capabilities']:
        LOG.warning(_LW("Failed to get pool name and "
                        "serial number. 'location_info' "
                        "from %s."), target_host['host'])
        return False
    info = target_host['capabilities']['location_info']
    LOG.debug("Host for migration is %s.", info)
    try:
        serial_number = info.split('|')[1]
    except AttributeError:
        LOG.warning(_LW('Error on getting serial number '
                        'from %s.'), target_host['host'])
        return False
    if serial_number != src_serial:
        LOG.debug('Skip storage-assisted migration because '
                  'target and source backend are not managing'
                  'the same array.')
        return False
    if (target_host['capabilities']['storage_protocol'] != src_protocol
            and get_original_status(volume) == 'in-use'):
        LOG.debug('Skip storage-assisted migration because '
                  'in-use volume can not be '
                  'migrate between different protocols.')
        return False
    return True
Example #16
0
    def initialize_iscsi_ports(self, common):
        # map iscsi_ip-> ip_port
        #             -> iqn
        #             -> nsp
        self.iscsi_ips = {}
        temp_iscsi_ip = {}

        # use the 3PAR ip_addr list for iSCSI configuration
        if len(self.configuration.hp3par_iscsi_ips) > 0:
            # add port values to ip_addr, if necessary
            for ip_addr in self.configuration.hp3par_iscsi_ips:
                ip = ip_addr.split(':')
                if len(ip) == 1:
                    temp_iscsi_ip[ip_addr] = {'ip_port': DEFAULT_ISCSI_PORT}
                elif len(ip) == 2:
                    temp_iscsi_ip[ip[0]] = {'ip_port': ip[1]}
                else:
                    LOG.warning(_LW("Invalid IP address format '%s'"), ip_addr)

        # add the single value iscsi_ip_address option to the IP dictionary.
        # This way we can see if it's a valid iSCSI IP. If it's not valid,
        # we won't use it and won't bother to report it, see below
        if (self.configuration.iscsi_ip_address not in temp_iscsi_ip):
            ip = self.configuration.iscsi_ip_address
            ip_port = self.configuration.iscsi_port
            temp_iscsi_ip[ip] = {'ip_port': ip_port}

        # get all the valid iSCSI ports from 3PAR
        # when found, add the valid iSCSI ip, ip port, iqn and nsp
        # to the iSCSI IP dictionary
        iscsi_ports = common.get_active_iscsi_target_ports()

        for port in iscsi_ports:
            ip = port['IPAddr']
            if ip in temp_iscsi_ip:
                ip_port = temp_iscsi_ip[ip]['ip_port']
                self.iscsi_ips[ip] = {'ip_port': ip_port,
                                      'nsp': port['nsp'],
                                      'iqn': port['iSCSIName']
                                      }
                del temp_iscsi_ip[ip]

        # if the single value iscsi_ip_address option is still in the
        # temp dictionary it's because it defaults to $my_ip which doesn't
        # make sense in this context. So, if present, remove it and move on.
        if (self.configuration.iscsi_ip_address in temp_iscsi_ip):
            del temp_iscsi_ip[self.configuration.iscsi_ip_address]

        # lets see if there are invalid iSCSI IPs left in the temp dict
        if len(temp_iscsi_ip) > 0:
            LOG.warning(_LW("Found invalid iSCSI IP address(s) in "
                            "configuration option(s) hp3par_iscsi_ips or "
                            "iscsi_ip_address '%s.'"),
                        (", ".join(temp_iscsi_ip)))

        if not len(self.iscsi_ips) > 0:
            msg = _('At least one valid iSCSI IP address must be set.')
            LOG.error(msg)
            raise exception.InvalidInput(reason=msg)
Example #17
0
    def _mount_quobyte(self, quobyte_volume, mount_path, ensure=False):
        """Mount Quobyte volume to mount path."""
        mounted = False
        for l in QuobyteDriver.read_proc_mount():
            if l.split()[1] == mount_path:
                mounted = True
                break

        if mounted:
            try:
                os.stat(mount_path)
            except OSError as exc:
                if exc.errno == errno.ENOTCONN:
                    mounted = False
                    try:
                        LOG.info(_LI('Fixing previous mount %s which was not'
                                     ' unmounted correctly.') % mount_path)
                        self._execute('umount.quobyte', mount_path,
                                      run_as_root=False)
                    except processutils.ProcessExecutionError as exc:
                        LOG.warn(_LW("Failed to unmount previous mount: %s"),
                                 exc)
                else:
                    # TODO(quobyte): Extend exc analysis in here?
                    LOG.warn(_LW("Unknown error occurred while checking mount"
                                 " point: %s Trying to continue."), exc)

        if not mounted:
            if not os.path.isdir(mount_path):
                self._execute('mkdir', '-p', mount_path)

            command = ['mount.quobyte', quobyte_volume, mount_path]
            if self.configuration.quobyte_client_cfg:
                command.extend(['-c', self.configuration.quobyte_client_cfg])

            try:
                LOG.info(_LI('Mounting volume: %s ...') % quobyte_volume)
                self._execute(*command, run_as_root=False)
                LOG.info(_LI('Mounting volume: %s succeeded') % quobyte_volume)
                mounted = True
            except processutils.ProcessExecutionError as exc:
                if ensure and 'already mounted' in exc.stderr:
                    LOG.warn(_LW("%s is already mounted"), quobyte_volume)
                else:
                    raise

        if mounted:
            try:
                xattr.getxattr(mount_path, 'quobyte.info')
            except Exception as exc:
                msg = _LE("The mount %(mount_path)s is not a valid"
                          " Quobyte USP volume. Error: %(exc)s") \
                    % {'mount_path': mount_path, 'exc': exc}
                raise exception.VolumeDriverException(msg)
            if not os.access(mount_path, os.W_OK | os.X_OK):
                LOG.warn(_LW("Volume is not writable. Please broaden the file"
                             " permissions. Mount: %s"), mount_path)
Example #18
0
def get_volume_type_reservation(ctxt, volume, type_id,
                                reserve_vol_type_only=False):
    from cinder import quota
    QUOTAS = quota.QUOTAS
    # Reserve quotas for the given volume type
    try:
        reserve_opts = {'volumes': 1, 'gigabytes': volume['size']}
        QUOTAS.add_volume_type_opts(ctxt,
                                    reserve_opts,
                                    type_id)
        # If reserve_vol_type_only is True, just reserve volume_type quota,
        # not volume quota.
        if reserve_vol_type_only:
            reserve_opts.pop('volumes')
            reserve_opts.pop('gigabytes')
        # Note that usually the project_id on the volume will be the same as
        # the project_id in the context. But, if they are different then the
        # reservations must be recorded against the project_id that owns the
        # volume.
        project_id = volume['project_id']
        reservations = QUOTAS.reserve(ctxt,
                                      project_id=project_id,
                                      **reserve_opts)
    except exception.OverQuota as e:
        overs = e.kwargs['overs']
        usages = e.kwargs['usages']
        quotas = e.kwargs['quotas']

        def _consumed(name):
            return (usages[name]['reserved'] + usages[name]['in_use'])

        for over in overs:
            if 'gigabytes' in over:
                s_size = volume['size']
                d_quota = quotas[over]
                d_consumed = _consumed(over)
                LOG.warning(
                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                        "%(s_size)sG volume - (%(d_consumed)dG of "
                        "%(d_quota)dG already consumed)"),
                    {'s_pid': ctxt.project_id,
                     's_size': s_size,
                     'd_consumed': d_consumed,
                     'd_quota': d_quota})
                raise exception.VolumeSizeExceedsAvailableQuota(
                    requested=s_size, quota=d_quota, consumed=d_consumed)
            elif 'volumes' in over:
                LOG.warning(
                    _LW("Quota exceeded for %(s_pid)s, tried to create "
                        "volume (%(d_consumed)d volumes "
                        "already consumed)"),
                    {'s_pid': ctxt.project_id,
                     'd_consumed': _consumed(over)})
                raise exception.VolumeLimitExceeded(
                    allowed=quotas[over])
    return reservations
Example #19
0
    def failback(self, volumes):
        """Failover volumes back to primary backend.

        The main steps:
        1. Switch the role of replication pairs.
        2. Copy the second LUN data back to primary LUN.
        3. Split replication pairs.
        4. Switch the role of replication pairs.
        5. Enable replications.
        """
        volumes_update = []
        for v in volumes:
            v_update = {}
            v_update['volume_id'] = v['id']
            drv_data = get_replication_driver_data(v)
            pair_id = drv_data.get('pair_id')
            if not pair_id:
                LOG.warning(_LW("No pair id in volume %s."), v['id'])
                v_update['updates'] = {'replication_status': 'error'}
                volumes_update.append(v_update)
                continue

            rmt_lun_id = drv_data.get('rmt_lun_id')
            if not rmt_lun_id:
                LOG.warning(_LW("No remote lun id in volume %s."), v['id'])
                v_update['updates'] = {'replication_status': 'error'}
                volumes_update.append(v_update)
                continue

            # Switch replication pair role, and start synchronize.
            self.local_driver.enable(pair_id)

            # Wait for synchronize complete.
            self.local_driver.wait_replica_ready(pair_id)

            # Split replication pair again
            self.rmt_driver.failover(pair_id)

            # Switch replication pair role, and start synchronize.
            self.rmt_driver.enable(pair_id)

            lun_info = self.rmt_client.get_lun_info(rmt_lun_id)
            lun_wwn = lun_info.get('WWN')
            metadata = huawei_utils.get_volume_metadata(v)
            metadata.update({'lun_wwn': lun_wwn})
            new_drv_data = {'pair_id': pair_id,
                            'rmt_lun_id': v['provider_location']}
            new_drv_data = to_string(new_drv_data)
            v_update['updates'] = {'provider_location': rmt_lun_id,
                                   'replication_status': 'available',
                                   'replication_driver_data': new_drv_data,
                                   'metadata': metadata}
            volumes_update.append(v_update)

        return volumes_update
Example #20
0
def process_reserve_over_quota(context, over_quota_exception,
                               resource, size=None):
    """Handle OverQuota exception.

    Analyze OverQuota exception, and raise new exception related to
    resource type. If there are unexpected items in overs,
    UnexpectedOverQuota is raised.

    :param context: security context
    :param over_quota_exception: OverQuota exception
    :param resource: can be backups, snapshots, and volumes
    :param size: requested size in reservation
    """
    def _consumed(name):
        return (usages[name]['reserved'] + usages[name]['in_use'])

    overs = over_quota_exception.kwargs['overs']
    usages = over_quota_exception.kwargs['usages']
    quotas = over_quota_exception.kwargs['quotas']
    invalid_overs = []

    for over in overs:
        if 'gigabytes' in over:
            msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                      "%(s_size)dG %(s_resource)s (%(d_consumed)dG of "
                      "%(d_quota)dG already consumed).")
            LOG.warning(msg, {'s_pid': context.project_id,
                              's_size': size,
                              's_resource': resource[:-1],
                              'd_consumed': _consumed(over),
                              'd_quota': quotas[over]})
            if resource == 'backups':
                exc = exception.VolumeBackupSizeExceedsAvailableQuota
            else:
                exc = exception.VolumeSizeExceedsAvailableQuota
            raise exc(
                name=over,
                requested=size,
                consumed=_consumed(over),
                quota=quotas[over])
        if (resource in OVER_QUOTA_RESOURCE_EXCEPTIONS.keys() and
                resource in over):
            msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                      "%(s_resource)s (%(d_consumed)d %(s_resource)ss "
                      "already consumed).")
            LOG.warning(msg, {'s_pid': context.project_id,
                              'd_consumed': _consumed(over),
                              's_resource': resource[:-1]})
            raise OVER_QUOTA_RESOURCE_EXCEPTIONS[resource](
                allowed=quotas[over],
                name=over)
        invalid_overs.append(over)

    if invalid_overs:
        raise exception.UnexpectedOverQuota(name=', '.join(invalid_overs))
Example #21
0
    def execute(self, context, size, volume_type_id, optional_args):
        try:
            values = {'per_volume_gigabytes': size}
            QUOTAS.limit_check(context, project_id=context.project_id,
                               **values)
        except exception.OverQuota as e:
            quotas = e.kwargs['quotas']
            raise exception.VolumeSizeExceedsLimit(
                size=size, limit=quotas['per_volume_gigabytes'])

        try:
            reserve_opts = {'volumes': 1, 'gigabytes': size}
            QUOTAS.add_volume_type_opts(context, reserve_opts, volume_type_id)
            reservations = QUOTAS.reserve(context, **reserve_opts)
            return {
                'reservations': reservations,
            }
        except exception.OverQuota as e:
            overs = e.kwargs['overs']
            quotas = e.kwargs['quotas']
            usages = e.kwargs['usages']

            def _consumed(name):
                return usages[name]['reserved'] + usages[name]['in_use']

            def _get_over(name):
                for over in overs:
                    if name in over:
                        return over
                return None

            over_name = _get_over('gigabytes')
            if over_name:
                msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                          "%(s_size)sG volume (%(d_consumed)dG "
                          "of %(d_quota)dG already consumed)")
                LOG.warning(msg, {'s_pid': context.project_id,
                                  's_size': size,
                                  'd_consumed': _consumed(over_name),
                                  'd_quota': quotas[over_name]})
                raise exception.VolumeSizeExceedsAvailableQuota(
                    name=over_name,
                    requested=size,
                    consumed=_consumed(over_name),
                    quota=quotas[over_name])
            elif _get_over('volumes'):
                msg = _LW("Quota exceeded for %(s_pid)s, tried to create "
                          "volume (%(d_consumed)d volumes "
                          "already consumed)")
                LOG.warning(msg, {'s_pid': context.project_id,
                                  'd_consumed': _consumed('volumes')})
                raise exception.VolumeLimitExceeded(allowed=quotas['volumes'])
            else:
                # If nothing was reraised, ensure we reraise the initial error
                raise
Example #22
0
def log_extra_spec_warnings(extra_specs):
    for spec in set(extra_specs.keys() if extra_specs else []) & set(OBSOLETE_SSC_SPECS.keys()):
        LOG.warning(
            _LW("Extra spec %(old)s is obsolete.  Use %(new)s " "instead."),
            {"old": spec, "new": OBSOLETE_SSC_SPECS[spec]},
        )
    for spec in set(extra_specs.keys() if extra_specs else []) & set(DEPRECATED_SSC_SPECS.keys()):
        LOG.warning(
            _LW("Extra spec %(old)s is deprecated.  Use %(new)s " "instead."),
            {"old": spec, "new": DEPRECATED_SSC_SPECS[spec]},
        )
Example #23
0
def log_extra_spec_warnings(extra_specs):
    for spec in (set(extra_specs.keys() if extra_specs else []) &
                 set(OBSOLETE_SSC_SPECS.keys())):
        LOG.warning(_LW('Extra spec %(old)s is obsolete.  Use %(new)s '
                        'instead.'), {'old': spec,
                                      'new': OBSOLETE_SSC_SPECS[spec]})
    for spec in (set(extra_specs.keys() if extra_specs else []) &
                 set(DEPRECATED_SSC_SPECS.keys())):
        LOG.warning(_LW('Extra spec %(old)s is deprecated.  Use %(new)s '
                        'instead.'), {'old': spec,
                                      'new': DEPRECATED_SSC_SPECS[spec]})
Example #24
0
 def delete_snapshot(self, vdiname, snapname):
     try:
         (_stdout, _stderr) = self._run_dog("vdi", "delete", "-s", snapname, vdiname)
         if _stderr.rstrip().endswith(self.DOG_RESP_SNAPSHOT_NOT_FOUND):
             LOG.warning(_LW('Snapshot "%s" not found.'), snapname)
         elif _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND):
             LOG.warning(_LW('Volume "%s" not found.'), vdiname)
     except exception.SheepdogCmdError as e:
         cmd = e.kwargs["cmd"]
         _stderr = e.kwargs["stderr"]
         with excutils.save_and_reraise_exception():
             LOG.error(_LE("Failed to delete snapshot. (command: %s)"), cmd)
Example #25
0
    def set_nas_security_options(self, is_new_cinder_install):
        self._execute_as_root = False

        LOG.debug(
            "nas_secure_file_* settings are %(ops)s and %(perm)s",
            {
                "ops": self.configuration.nas_secure_file_operations,
                "perm": self.configuration.nas_secure_file_permissions,
            },
        )

        if self.configuration.nas_secure_file_operations == "auto":
            """Note (kaisers): All previous Quobyte driver versions ran with
            secure settings hardcoded to 'True'. Therefore the default 'auto'
            setting can safely be mapped to the same, secure, setting.
            """
            LOG.debug("Mapping 'auto' value to 'true' for" " nas_secure_file_operations.")
            self.configuration.nas_secure_file_operations = "true"

        if self.configuration.nas_secure_file_permissions == "auto":
            """Note (kaisers): All previous Quobyte driver versions ran with
            secure settings hardcoded to 'True'. Therefore the default 'auto'
            setting can safely be mapped to the same, secure, setting.
            """
            LOG.debug("Mapping 'auto' value to 'true' for" " nas_secure_file_permissions.")
            self.configuration.nas_secure_file_permissions = "true"

        if self.configuration.nas_secure_file_operations == "false":
            LOG.warning(
                _LW(
                    "The NAS file operations will be run as "
                    "root, allowing root level access at the storage "
                    "backend."
                )
            )
            self._execute_as_root = True
        else:
            LOG.info(
                _LI(
                    "The NAS file operations will be run as"
                    " non privileged user in secure mode. Please"
                    " ensure your libvirtd settings have been configured"
                    " accordingly (see section 'OpenStack' in the Quobyte"
                    " Manual."
                )
            )

        if self.configuration.nas_secure_file_permissions == "false":
            LOG.warning(_LW("The NAS file permissions mode will be 666 " "(allowing other/world read & write access)."))
Example #26
0
    def _unmap_vdisk_from_host(self, vdisk_name, connector):
        if "host" in connector:
            host_name = self._get_host_from_connector(connector)
            self._driver_assert(
                host_name is not None, (_("_get_host_from_connector failed to return the host name " "for connector."))
            )
        else:
            host_name = None

        # Check if vdisk-host mapping exists, remove if it does. If no host
        # name was given, but only one mapping exists, we can use that.
        mapping_data = self._get_vdiskhost_mappings(vdisk_name)
        if not mapping_data:
            LOG.warning(
                _LW("_unmap_vdisk_from_host: No mapping of volume " "%(vol_name)s to any host found."),
                {"vol_name": vdisk_name},
            )
            return host_name
        if host_name is None:
            if len(mapping_data) > 1:
                LOG.warning(
                    _LW(
                        "_unmap_vdisk_from_host: Multiple mappings of "
                        "volume %(vdisk_name)s found, no host "
                        "specified."
                    ),
                    {"vdisk_name": vdisk_name},
                )
                return
            else:
                host_name = list(mapping_data.keys())[0]
        else:
            if host_name not in mapping_data:
                LOG.error(
                    _LE("_unmap_vdisk_from_host: No mapping of volume " "%(vol_name)s to host %(host_name)s found."),
                    {"vol_name": vdisk_name, "host_name": host_name},
                )
                return host_name

        # We have a valid host_name now
        ssh_cmd = ["svctask", "rmvdiskhostmap", "-host", host_name, vdisk_name]
        out, err = self._ssh(ssh_cmd)
        # Verify CLI behaviour - no output is returned from rmvdiskhostmap
        self._assert_ssh_return((not out.strip()), "_unmap_vdisk_from_host", ssh_cmd, out, err)

        # If this host has no more mappings, delete it
        mapping_data = self._get_hostvdisk_mappings(host_name)
        if not mapping_data:
            self._delete_host(host_name)
Example #27
0
    def _mount_quobyte(self, quobyte_volume, mount_path, ensure=False):
        """Mount Quobyte volume to mount path."""
        mounted = False
        for l in QuobyteDriver.read_proc_mount():
            if l.split()[1] == mount_path:
                mounted = True
                break

        if mounted:
            try:
                os.stat(mount_path)
            except OSError as exc:
                if exc.errno == errno.ENOTCONN:
                    mounted = False
                    try:
                        LOG.info(_LI('Fixing previous mount %s which was not'
                                     ' unmounted correctly.'), mount_path)
                        self._execute('umount.quobyte', mount_path,
                                      run_as_root=self._execute_as_root)
                    except processutils.ProcessExecutionError as exc:
                        LOG.warning(_LW("Failed to unmount previous mount: "
                                        "%s"), exc)
                else:
                    # TODO(quobyte): Extend exc analysis in here?
                    LOG.warning(_LW("Unknown error occurred while checking "
                                    "mount point: %s Trying to continue."),
                                exc)

        if not mounted:
            if not os.path.isdir(mount_path):
                self._execute('mkdir', '-p', mount_path)

            command = ['mount.quobyte', quobyte_volume, mount_path]
            if self.configuration.quobyte_client_cfg:
                command.extend(['-c', self.configuration.quobyte_client_cfg])

            try:
                LOG.info(_LI('Mounting volume: %s ...'), quobyte_volume)
                self._execute(*command, run_as_root=self._execute_as_root)
                LOG.info(_LI('Mounting volume: %s succeeded'), quobyte_volume)
                mounted = True
            except processutils.ProcessExecutionError as exc:
                if ensure and 'already mounted' in exc.stderr:
                    LOG.warning(_LW("%s is already mounted"), quobyte_volume)
                else:
                    raise

        if mounted:
            self._validate_volume(mount_path)
    def host_passes(self, host_state, filter_properties):
        context = filter_properties['context']
        host = volume_utils.extract_host(host_state.host, 'host')

        scheduler_hints = filter_properties.get('scheduler_hints') or {}
        instance_uuid = scheduler_hints.get(HINT_KEYWORD, None)

        # Without 'local_to_instance' hint
        if not instance_uuid:
            return True

        if not uuidutils.is_uuid_like(instance_uuid):
            raise exception.InvalidUUID(uuid=instance_uuid)

        # TODO(adrienverge): Currently it is not recommended to allow instance
        # migrations for hypervisors where this hint will be used. In case of
        # instance migration, a previously locally-created volume will not be
        # automatically migrated. Also in case of instance migration during the
        # volume's scheduling, the result is unpredictable. A future
        # enhancement would be to subscribe to Nova migration events (e.g. via
        # Ceilometer).

        # First, lookup for already-known information in local cache
        if instance_uuid in self._cache:
            return self._cache[instance_uuid] == host

        if not self._nova_has_extended_server_attributes(context):
            LOG.warning(_LW('Hint "%s" dropped because '
                            'ExtendedServerAttributes not active in Nova.'),
                        HINT_KEYWORD)
            raise exception.CinderException(_('Hint "%s" not supported.') %
                                            HINT_KEYWORD)

        server = nova.API().get_server(context, instance_uuid,
                                       privileged_user=True,
                                       timeout=REQUESTS_TIMEOUT)

        if not hasattr(server, INSTANCE_HOST_PROP):
            LOG.warning(_LW('Hint "%s" dropped because Nova did not return '
                            'enough information. Either Nova policy needs to '
                            'be changed or a privileged account for Nova '
                            'should be specified in conf.'), HINT_KEYWORD)
            raise exception.CinderException(_('Hint "%s" not supported.') %
                                            HINT_KEYWORD)

        self._cache[instance_uuid] = getattr(server, INSTANCE_HOST_PROP)

        # Match if given instance is hosted on host
        return self._cache[instance_uuid] == host
Example #29
0
    def delete_snapshot(self, snapshot_name):
        """Deletes a snapshot."""

        snap = self.vnx.get_snap(name=snapshot_name)
        try:
            snap.delete()
        except storops_ex.VNXSnapNotExistsError as ex:
            LOG.warning(_LW("Snapshot %(name)s may be deleted already. "
                            "Message: %(msg)s"),
                        {'name': snapshot_name, 'msg': ex.message})
        except storops_ex.VNXDeleteAttachedSnapError as ex:
            with excutils.save_and_reraise_exception():
                LOG.warning(_LW("Failed to delete snapshot %(name)s "
                                "which is in use. Message: %(msg)s"),
                            {'name': snapshot_name, 'msg': ex.message})
Example #30
0
File: hgst.py Project: dims/cinder
 def delete_volume(self, volume):
     """Delete a Volume's underlying space."""
     volname = self._get_space_name(volume)
     if volname:
         params = [self.VGCCLUSTER, "space-delete"]
         params += ["-n", six.text_type(volname)]
         # This can fail benignly when we are deleting a snapshot
         try:
             self._execute(*params, run_as_root=True)
         except processutils.ProcessExecutionError as err:
             LOG.warning(_LW("Unable to delete space %(space)s"), {"space": volname})
             self._log_cli_err(err)
     else:
         # This can be benign when we are deleting a snapshot
         LOG.warning(_LW("Attempted to delete a space that's not there."))
Example #31
0
    def terminate_connection(self, volume, connector, **kwargs):
        """Terminate a connection to a volume.

        :param volume: dictionary volume reference
        :param connector: dictionary connector reference
        """

        info = _loc_info(volume['provider_location'])
        if 'tgt' not in info.keys():  # spurious disconnection
            LOG.warn(_LW("terminate_conn: provider location empty."))
            return
        (arid, lun) = info['id_lu']
        (_portal, iqn, loc, ctl, port, hlun) = info['tgt']
        LOG.info(_LI("terminate: connection %s"), volume['provider_location'])
        self.bend.del_iscsi_conn(self.config['hnas_cmd'],
                                 self.config['mgmt_ip0'],
                                 self.config['username'],
                                 self.config['password'],
                                 ctl, iqn, hlun)
        self._update_vol_location(volume['id'], loc)

        return {'provider_location': loc}
Example #32
0
 def delete_snapshot(self, snapshot):
     snapshot_name = self._get_name_from_id(SNAPSHOT_PREFIX, snapshot['id'])
     LOG.debug('Calling Delet Snapshot: %s in Azure.' % snapshot_name)
     try:
         async_action = self.snapshots.delete(
             CONF.azure.resource_group,
             snapshot_name,
         )
         async_action.result()
     except AzureMissingResourceHttpError:
         # If the snapshot isn't present, then don't attempt to delete
         LOG.warning(
             _LW("snapshot: %s not found, "
                 "skipping delete operations"), snapshot['name'])
         LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
     except Exception as e:
         message = (_("Create Snapshop %(snapshop)s in Azure failed. "
                      "reason: %(reason)s") %
                    dict(snapshop=snapshot_name, reason=six.text_type(e)))
         LOG.exception(message)
         raise exception.VolumeBackendAPIException(data=message)
     LOG.info(_LI('Deleted Snapshot: %s in Azure.'), snapshot_name)
Example #33
0
    def _update_host_state_map(self, context):

        # Get resource usage across the available volume nodes:
        topic = CONF.volume_topic
        volume_services = objects.ServiceList.get_all_by_topic(context,
                                                               topic,
                                                               disabled=False)
        active_hosts = set()
        no_capabilities_hosts = set()
        for service in volume_services.objects:
            host = service.host
            if not utils.service_is_up(service):
                LOG.warning(_LW("volume service is down. (host: %s)"), host)
                continue
            capabilities = self.service_states.get(host, None)
            if capabilities is None:
                no_capabilities_hosts.add(host)
                continue

            host_state = self.host_state_map.get(host)
            if not host_state:
                host_state = self.host_state_cls(host,
                                                 capabilities=capabilities,
                                                 service=dict(service))
                self.host_state_map[host] = host_state
            # update capabilities and attributes in host_state
            host_state.update_from_volume_capability(capabilities,
                                                     service=dict(service))
            active_hosts.add(host)

        self._no_capabilities_hosts = no_capabilities_hosts

        # remove non-active hosts from host_state_map
        nonactive_hosts = set(self.host_state_map.keys()) - active_hosts
        for host in nonactive_hosts:
            LOG.info(
                _LI("Removing non-active host: %(host)s from "
                    "scheduler cache."), {'host': host})
            del self.host_state_map[host]
Example #34
0
 def map_lun(self, path, igroup_name, lun_id=None):
     """Maps LUN to the initiator and returns LUN id assigned."""
     lun_map = netapp_api.NaElement.create_node_with_children(
         'lun-map', **{
             'path': path,
             'initiator-group': igroup_name
         })
     if lun_id:
         lun_map.add_new_child('lun-id', lun_id)
     try:
         result = self.connection.invoke_successfully(lun_map, True)
         return result.get_child_content('lun-id-assigned')
     except netapp_api.NaApiError as e:
         code = e.code
         message = e.message
         LOG.warning(
             _LW('Error mapping LUN. Code :%(code)s, Message: '
                 '%(message)s'), {
                     'code': code,
                     'message': message
                 })
         raise
Example #35
0
 def _find_host_exhaustive(self, connector, hosts):
     hname = connector['host']
     hnames = [ihost[0:ihost.rfind('-')] for ihost in hosts]
     if hname in hnames:
         host = hosts[hnames.index(hname)]
         ssh_cmd = ['svcinfo', 'lshost', '-delim', '!', host]
         out, err = self._ssh(ssh_cmd)
         self._assert_ssh_return(out.strip(), '_find_host_exhaustive',
                                 ssh_cmd, out, err)
         attr_lines = [attr_line for attr_line in out.split('\n')]
         attr_parm = {}
         for attr_line in attr_lines:
             attr_name, foo, attr_val = attr_line.partition('!')
             attr_parm[attr_name] = attr_val
         if ('WWPN' in attr_parm.keys() and 'wwpns' in connector
                 and attr_parm['WWPN'].lower() in map(
                     str.lower, map(str, connector['wwpns']))):
             return host
     else:
         LOG.warning(_LW('Host %(host)s was not found on backend storage.'),
                     {'host': hname})
     return None
Example #36
0
    def delete_volume(self, volume):
        """Destroy a zfs volume on appliance.

        :param volume: volume reference
        """
        pool, group, name = self._get_volume_path(volume).split('/')
        url = ('storage/pools/%(pool)s/volumeGroups/%(group)s'
               '/volumes/%(name)s') % {
                   'pool': pool,
                   'group': group,
                   'name': name
               }
        try:
            self.nef.delete(url)
        except exception.NexentaException as exc:
            # We assume that volume is gone
            LOG.warning(
                _LW('Got error trying to delete volume %(volume)s,'
                    ' assuming it is already gone: %(exc)s'), {
                        'volume': volume,
                        'exc': exc
                    })
Example #37
0
def convert_config_string_to_dict(config_string):
    """Convert config file replication string to a dict.

    The only supported form is as follows:
    "{'key-1'='val-1' 'key-2'='val-2'...}"

    :param config_string: Properly formatted string to convert to dict.
    :response: dict of string values
    """

    resultant_dict = {}

    try:
        st = config_string.replace("=", ":")
        st = st.replace(" ", ", ")
        resultant_dict = ast.literal_eval(st)
    except Exception:
        LOG.warning(_LW("Error encountered translating config_string: "
                        "%(config_string)s to dict"),
                    {'config_string': config_string})

    return resultant_dict
Example #38
0
    def _get_reserved_percentage(self):
        legacy_used_ratio = self.configuration.nfs_used_ratio
        legacy_reserved_ratio = 1 - legacy_used_ratio
        legacy_percentage = legacy_reserved_ratio * 100
        if legacy_used_ratio == NFS_USED_RATIO_DEFAULT:
            return self.configuration.reserved_percentage

        # Honor legacy option if its value is not the default.
        msg = _LW("The option 'nfs_used_ratio' is deprecated and will "
                  "be removed in the Mitaka release.  Please set "
                  "'reserved_percentage = %d' instead.") % (
                      legacy_percentage)
        versionutils.report_deprecated_feature(LOG, msg)

        if not ((self.configuration.nfs_used_ratio > 0) and
                (self.configuration.nfs_used_ratio <= 1)):
            msg = _("NFS config 'nfs_used_ratio' invalid.  Must be > 0 "
                    "and <= 1.0: %s.") % self.configuration.nfs_used_ratio
            LOG.error(msg)
            raise exception.InvalidConfigurationValue(msg)

        return legacy_percentage
Example #39
0
 def unmap_lun(self, path, igroup_name):
     """Unmaps a LUN from given initiator."""
     lun_unmap = netapp_api.NaElement.create_node_with_children(
         'lun-unmap', **{
             'path': path,
             'initiator-group': igroup_name
         })
     try:
         self.connection.invoke_successfully(lun_unmap, True)
     except netapp_api.NaApiError as e:
         exc_info = sys.exc_info()
         LOG.warning(
             _LW("Error unmapping LUN. Code :%(code)s, Message: "
                 "%(message)s"), {
                     'code': e.code,
                     'message': e.message
                 })
         # if the LUN is already unmapped
         if e.code == '13115' or e.code == '9016':
             pass
         else:
             six.reraise(*exc_info)
Example #40
0
    def terminate_connection(self, volume, connector, **kwargs):
        """Terminate the map."""
        # Check the connector, as we can't get initiatorname during
        # local_delete and the whole info in force-delete
        if ('host' not in connector) or ('initiator' not in connector):
            LOG.info(_LI("terminate_connection: delete or force delete."))
            return

        host_name = connector['host']
        iqn = connector['initiator']
        LOG.debug('terminate_connection: volume: %(vol)s, host: %(host)s, '
                  'connector: %(initiator)s' % {
                      'vol': volume['name'],
                      'host': host_name,
                      'initiator': iqn
                  })

        self.sshclient.update_login_info()
        lun_id = self.sshclient.check_volume_exist_on_array(volume)
        iscsi_conf = self._get_iscsi_conf(self.configuration)
        chapinfo = self.sshclient.find_chap_info(iscsi_conf,
                                                 connector['initiator'])

        if not lun_id:
            LOG.warning(_LW("Volume %s not exists on the array."),
                        volume['id'])
        host_id = self.sshclient.get_host_id(host_name, iqn)
        self.sshclient.remove_map(lun_id, host_id)

        if (host_id is not None
                and not self.sshclient.get_host_map_info(host_id)):
            if (chapinfo and self.sshclient._chapuser_added_to_initiator(
                    connector['initiator'], chapinfo[0])):
                self.sshclient._remove_chap(connector['initiator'], chapinfo)

        info = {'driver_volume_type': 'iSCSI', 'data': {'iqn': iqn}}
        LOG.info(_LI('terminate_connection, return data is: %s.'), info)
        return info
Example #41
0
    def _execute_command_and_parse_attributes(self, ssh_cmd):
        """Execute command on the FlashSystem and parse attributes.

        Exception is raised if the information from the system
        can not be obtained.

        """

        LOG.debug(
            'enter: _execute_command_and_parse_attributes: '
            'command: %s.', six.text_type(ssh_cmd))

        try:
            out, err = self._ssh(ssh_cmd)
        except processutils.ProcessExecutionError:
            LOG.warning(_LW('Failed to run command: ' '%s.'), ssh_cmd)
            # Does not raise exception when command encounters error.
            # Only return and the upper logic decides what to do.
            return None

        self._assert_ssh_return(out, '_execute_command_and_parse_attributes',
                                ssh_cmd, out, err)

        attributes = {}
        for attrib_line in out.split('\n'):
            # If '!' not found, return the string and two empty strings
            attrib_name, foo, attrib_value = attrib_line.partition('!')
            if attrib_name is not None and attrib_name.strip():
                self._append_dict(attributes, attrib_name, attrib_value)

        LOG.debug(
            'leave: _execute_command_and_parse_attributes: '
            'command: %(cmd)s attributes: %(attr)s.', {
                'cmd': six.text_type(ssh_cmd),
                'attr': six.text_type(attributes)
            })

        return attributes
Example #42
0
    def get_all_volume_groups(root_helper, vg_name=None):
        """Static method to get all VGs on a system.

        :param root_helper: root_helper to use for execute
        :param vg_name: optional, gathers info for only the specified VG
        :returns: List of Dictionaries with VG info

        """
        cmd = LVM.LVM_CMD_PREFIX + [
            'vgs', '--noheadings', '--unit=g', '-o',
            'name,size,free,lv_count,uuid', '--separator', ':', '--nosuffix'
        ]
        if vg_name is not None:
            cmd.append(vg_name)

        start_vgs = time.time()
        (out, _err) = putils.execute(*cmd,
                                     root_helper=root_helper,
                                     run_as_root=True)
        total_time = time.time() - start_vgs
        if total_time > 60:
            LOG.warning(_LW('Took %s seconds to get '
                            'volume groups.'), total_time)

        vg_list = []
        if out is not None:
            vgs = out.split()
            for vg in vgs:
                fields = vg.split(':')
                vg_list.append({
                    'name': fields[0],
                    'size': float(fields[1]),
                    'available': float(fields[2]),
                    'lv_count': int(fields[3]),
                    'uuid': fields[4]
                })

        return vg_list
Example #43
0
    def initialize_secondary(self, api, sclivevolume, initiatorname):
        """Initialize the secondary connection of a live volume pair.

        :param api: Dell SC api.
        :param sclivevolume: Dell SC live volume object.
        :param initiatorname: Cinder iscsi initiator from the connector.
        :return: ISCSI properties.
        """

        # Find our server.
        secondary = api.find_server(initiatorname,
                                    sclivevolume['secondaryScSerialNumber'])
        # No? Create it.
        if secondary is None:
            secondary = api.create_server(
                [initiatorname], self.configuration.dell_server_os,
                sclivevolume['secondaryScSerialNumber'])
        if secondary:
            if api.map_secondary_volume(sclivevolume, secondary):
                # Get our volume and get our properties.
                secondaryvol = api.get_volume(
                    sclivevolume['secondaryVolume']['instanceId'])
                if secondaryvol:
                    return api.find_iscsi_properties(secondaryvol)
        # Dummy return on failure.
        data = {'target_discovered': False,
                'target_iqn': None,
                'target_iqns': [],
                'target_portal': None,
                'target_portals': [],
                'target_lun': None,
                'target_luns': [],
                }
        LOG.warning(_LW('Unable to map live volume secondary volume'
                        ' %(vol)s to secondary server intiator: %(init)r'),
                    {'vol': sclivevolume['secondaryVolume']['instanceName'],
                     'init': initiatorname})
        return data
Example #44
0
def _calculate_count(size_in_m, blocksize):

    # Check if volume_dd_blocksize is valid
    try:
        # Rule out zero-sized/negative/float dd blocksize which
        # cannot be caught by strutils
        if blocksize.startswith(('-', '0')) or '.' in blocksize:
            raise ValueError
        bs = strutils.string_to_bytes('%sB' % blocksize)
    except ValueError:
        LOG.warning(
            _LW("Incorrect value error: %(blocksize)s, "
                "it may indicate that \'volume_dd_blocksize\' "
                "was configured incorrectly. Fall back to default."),
            {'blocksize': blocksize})
        # Fall back to default blocksize
        CONF.clear_override('volume_dd_blocksize')
        blocksize = CONF.volume_dd_blocksize
        bs = strutils.string_to_bytes('%sB' % blocksize)

    count = math.ceil(size_in_m * units.Mi / bs)

    return blocksize, int(count)
Example #45
0
 def delete(self, vdiname):
     try:
         (_stdout, _stderr) = self._run_dog('vdi', 'delete', vdiname)
         if _stderr.rstrip().endswith(self.DOG_RESP_VDI_NOT_FOUND):
             LOG.warning(_LW('Volume not found. %s'), vdiname)
         elif _stderr.startswith(self.DOG_RESP_CONNECTION_ERROR):
             # NOTE(tishizaki)
             # Dog command does not return error_code although
             # dog command cannot connect to sheep process.
             # That is a Sheepdog's bug.
             # To avoid a Sheepdog's bug, now we need to check stderr.
             # If Sheepdog has been fixed, this check logic is needed
             # by old Sheepdog users.
             reason = (_('Failed to connect to sheep daemon. '
                         'addr: %(addr)s, port: %(port)s'), {
                             'addr': self.addr,
                             'port': self.port
                         })
             raise exception.SheepdogError(reason=reason)
     except exception.SheepdogCmdError as e:
         _stderr = e.kwargs['stderr']
         with excutils.save_and_reraise_exception():
             LOG.error(_LE('Failed to delete volume. %s'), vdiname)
Example #46
0
    def get_evs(self, cmd, ip0, user, pw, fsid):
        """Gets the EVSID for the named filesystem.

        :param cmd: ssc command name
        :param ip0: string IP address of controller
        :param user: string user authentication for array
        :param pw: string password authentication for array
        :returns: EVS id of the file system
        """

        out, err = self.run_cmd(cmd, ip0, user, pw, "evsfs", "list",
                                check_exit_code=True)
        LOG.debug('get_evs: out %s.', out)

        lines = out.split('\n')
        for line in lines:
            inf = line.split()
            if fsid in line and (fsid == inf[0] or fsid == inf[1]):
                return inf[3]

        LOG.warning(_LW('get_evs: %(out)s -- No find for %(fsid)s'),
                    {'out': out, 'fsid': fsid})
        return 0
Example #47
0
    def _delete_vdisk(self, name, force):
        """Deletes existing vdisks."""

        LOG.debug('enter: _delete_vdisk: vdisk %s.', name)

        # Try to delete volume only if found on the storage
        vdisk_defined = self._is_vdisk_defined(name)
        if not vdisk_defined:
            LOG.warning(_LW('warning: Tried to delete vdisk %s but '
                            'it does not exist.'), name)
            return

        ssh_cmd = ['svctask', 'rmvdisk', '-force', name]
        if not force:
            ssh_cmd.remove('-force')
        out, err = self._ssh(ssh_cmd)
        # No output should be returned from rmvdisk
        self._assert_ssh_return(
            (not out.strip()),
            ('_delete_vdisk %(name)s') % {'name': name},
            ssh_cmd, out, err)

        LOG.debug('leave: _delete_vdisk: vdisk %s.', name)
Example #48
0
    def configure_scsi_device(self, device_number, target_wwn, lun):
        """Write the LUN to the port's unit_add attribute.

        If auto-discovery of LUNs is disabled on s390 platforms
        luns need to be added to the configuration through the
        unit_add interface
        """
        LOG.debug(
            "Configure lun for s390: device_number=(%(device_num)s) "
            "target_wwn=(%(target_wwn)s) target_lun=(%(target_lun)s)", {
                'device_num': device_number,
                'target_wwn': target_wwn,
                'target_lun': lun
            })
        zfcp_device_command = ("/sys/bus/ccw/drivers/zfcp/%s/%s/unit_add" %
                               (device_number, target_wwn))
        LOG.debug("unit_add call for s390 execute: %s", zfcp_device_command)
        try:
            self.echo_scsi_command(zfcp_device_command, lun)
        except putils.ProcessExecutionError as exc:
            msg = _LW("unit_add call for s390 failed exit (%(code)s), "
                      "stderr (%(stderr)s)")
            LOG.warn(msg, {'code': exc.exit_code, 'stderr': exc.stderr})
 def _schedule(self, context, request_spec, filter_properties=None):
     weighed_hosts = self._get_weighted_candidates(context, request_spec,
                                                   filter_properties)
     # When we get the weighed_hosts, we clear those hosts whose backend
     # is not same as consistencygroup's backend.
     if request_spec.get('CG_backend'):
         group_backend = request_spec.get('CG_backend')
     else:
         group_backend = request_spec.get('group_backend')
     if weighed_hosts and group_backend:
         # Get host name including host@backend#pool info from
         # weighed_hosts.
         for host in weighed_hosts[::-1]:
             backend = utils.extract_host(host.obj.host)
             if backend != group_backend:
                 weighed_hosts.remove(host)
     if not weighed_hosts:
         LOG.warning(
             _LW('No weighed hosts found for volume '
                 'with properties: %s'),
             filter_properties['request_spec'].get('volume_type'))
         return None
     return self._choose_top_host(weighed_hosts, request_spec)
Example #50
0
    def cleanup_migration(self, src_id, dst_id=None):
        """Invoke when migration meets error.

        :param src_id:  source LUN id
        :param dst_id:  destination LUN id
        """
        # if migration session is still there
        # we need to cancel the session
        session = self.vnx.get_migration_session(src_id)
        src_lun = self.vnx.get_lun(lun_id=src_id)
        if session.existed:
            LOG.warning(
                _LW('Cancelling migration session: '
                    '%(src_id)s -> %(dst_id)s.'), {
                        'src_id': src_id,
                        'dst_id': dst_id
                    })
            try:
                src_lun.cancel_migrate()
            except storops_ex.VNXLunNotMigratingError:
                LOG.info(
                    _LI('The LUN is not migrating, this message can be'
                        ' safely ignored'))
 def _map_lun(self, name, initiator_list, initiator_type, lun_id=None):
     """Maps LUN to the initiator(s) and returns LUN ID assigned."""
     metadata = self._get_lun_attr(name, 'metadata')
     path = metadata['Path']
     igroup_name, ig_host_os, ig_type = self._get_or_create_igroup(
         initiator_list, initiator_type, self.host_type)
     if ig_host_os != self.host_type:
         LOG.warning(_LW("LUN misalignment may occur for current"
                         " initiator group %(ig_nm)s) with host OS type"
                         " %(ig_os)s. Please configure initiator group"
                         " manually according to the type of the"
                         " host OS."),
                     {'ig_nm': igroup_name, 'ig_os': ig_host_os})
     try:
         return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
     except na_api.NaApiError:
         exc_info = sys.exc_info()
         (_igroup, lun_id) = self._find_mapped_lun_igroup(path,
                                                          initiator_list)
         if lun_id is not None:
             return lun_id
         else:
             six.reraise(*exc_info)
Example #52
0
 def add_used_hlun(self, command, port, gid, used_list, ldev):
     unit = self.unit_name
     ret, stdout, stderr = self.exec_hsnm(command,
                                          '-unit %s -refer' % unit)
     if ret:
         msg = basic_lib.output_err(
             600, cmd=command, ret=ret, out=stdout, err=stderr)
         raise exception.HBSDCmdError(message=msg, ret=ret, err=stderr)
     lines = stdout.splitlines()
     for line in lines[2:]:
         line = shlex.split(line)
         if not line:
             continue
         if line[0] == port and int(line[1][0:3]) == gid:
             if int(line[2]) not in used_list:
                 used_list.append(int(line[2]))
             if int(line[3]) == ldev:
                 hlu = int(line[2])
                 LOG.warning(_LW('ldev(%(ldev)d) is already mapped '
                                 '(hlun: %(hlu)d)'),
                             {'ldev': ldev, 'hlu': hlu})
                 return hlu
     return None
    def create_snapshot(self, snapshot):
        '''Create snapshot'''
        # our volume name is the volume id
        volume_name = snapshot.get('volume_id')
        snapshot_id = snapshot.get('id')
        LOG.debug('Creating snapshot %(snap)s on volume %(vol)s', {
            'snap': snapshot_id,
            'vol': volume_name
        })
        with self._client.open_connection() as api:
            ssn = api.find_sc(self.configuration.dell_sc_ssn)
            if ssn is not None:
                scvolume = api.find_volume(ssn, volume_name)
                if scvolume is not None:
                    if api.create_replay(scvolume, snapshot_id, 0) is not None:
                        snapshot['status'] = 'available'
                        return
                else:
                    LOG.warning(_LW('Unable to locate volume:%s'), volume_name)

        snapshot['status'] = 'error_creating'
        raise exception.VolumeBackendAPIException(
            _('Failed to create snapshot %s') % snapshot_id)
Example #54
0
    def register_initiator(self, storage_group, host, initiator_port_map):
        """Registers the initiators of `host` to the `storage_group`.

        :param storage_group: the storage group object.
        :param host: the ip and name information of the initiator.
        :param initiator_port_map: the dict specifying which initiators are
                                   bound to which ports.
        """
        for (initiator_id, ports_to_bind) in initiator_port_map.items():
            for port in ports_to_bind:
                try:
                    storage_group.connect_hba(port, initiator_id, host.name,
                                              host_ip=host.ip)
                except storops_ex.VNXStorageGroupError as ex:
                    LOG.warning(_LW('Failed to set path to port %(port)s for '
                                    'initiator %(hba_id)s. Message: %(msg)s'),
                                {'port': port, 'hba_id': initiator_id,
                                 'msg': ex.message})

        if any(initiator_port_map.values()):
            LOG.debug('New path set for initiator %(hba_id)s, so update '
                      'storage group with poll.', {'hba_id': initiator_id})
            utils.update_res_with_poll(storage_group)
Example #55
0
    def unmanage(self, volume):
        """Removes the specified volume from Cinder management.

        Does not delete the underlying backend storage object.

        The volume will be renamed with "-unmanaged" as a suffix
        """
        vol_name = _get_vol_name(volume)
        unmanaged_vol_name = vol_name + "-unmanaged"
        LOG.info(_LI("Renaming existing volume %(ref_name)s to %(new_name)s"),
                 {
                     "ref_name": vol_name,
                     "new_name": unmanaged_vol_name
                 })
        try:
            self._array.rename_volume(vol_name, unmanaged_vol_name)
        except purestorage.PureHTTPError as err:
            with excutils.save_and_reraise_exception() as ctxt:
                if (err.code == 400 and ERR_MSG_NOT_EXIST in err.text):
                    ctxt.reraise = False
                    LOG.warn(
                        _LW("Volume unmanage was unable to rename "
                            "the volume, error message: %s"), err.text)
Example #56
0
    def _get_evs(self, cmd, ip0, user, pw, fsid):
        """Gets the EVSID for the named filesystem."""

        out, err = self.run_cmd(cmd,
                                ip0,
                                user,
                                pw,
                                "evsfs",
                                "list",
                                check_exit_code=True)
        LOG.debug('get_evs: out %s', out)

        lines = out.split('\n')
        for line in lines:
            inf = line.split()
            if fsid in line and (fsid == inf[0] or fsid == inf[1]):
                return inf[3]

        LOG.warning(_LW('get_evs: %(out)s -- No find for %(fsid)s'), {
            'out': out,
            'fsid': fsid
        })
        return 0
Example #57
0
    def __init__(self, *args, **kwargs):
        super(ISERTgtAdm, self).__init__(*args, **kwargs)

        LOG.warning(
            _LW('ISERTgtAdm is deprecated, you should '
                'now just use LVMVolumeDriver and specify '
                'iscsi_helper for the target driver you '
                'wish to use. In order to enable iser, please '
                'set iscsi_protocol=iser with lioadm or tgtadm '
                'target helpers.'))

        self.volumes_dir = self.configuration.safe_get('volumes_dir')
        self.iscsi_protocol = 'iser'
        self.protocol = 'iSER'

        # backwards compatibility mess
        self.configuration.num_volume_device_scan_tries = \
            self.configuration.num_iser_scan_tries
        self.configuration.iscsi_target_prefix = \
            self.configuration.iser_target_prefix
        self.configuration.iscsi_ip_address = \
            self.configuration.iser_ip_address
        self.configuration.iscsi_port = self.configuration.iser_port
Example #58
0
    def check_api_permissions(self):
        """Check which APIs that support SSC functionality are available."""

        inaccessible_apis = []
        invalid_extra_specs = []

        for api_tuple, extra_specs in SSC_API_MAP.items():
            object_name, operation_name, api = api_tuple
            if not self.zapi_client.check_cluster_api(object_name,
                                                      operation_name, api):
                inaccessible_apis.append(api)
                invalid_extra_specs.extend(extra_specs)

        if inaccessible_apis:
            if 'volume-get-iter' in inaccessible_apis:
                msg = _('User not permitted to query Data ONTAP volumes.')
                raise exception.VolumeBackendAPIException(data=msg)
            else:
                LOG.warning(
                    _LW('The configured user account does not have '
                        'sufficient privileges to use all needed '
                        'APIs. The following extra specs will fail '
                        'or be ignored: %s.'), invalid_extra_specs)
    def _check_filter_function(self, stats):
        """Checks if a volume passes a backend's filter function.

           Returns a tuple in the format (filter_passing, filter_invalid).
           Both values are booleans.
        """
        if stats['filter_function'] is None:
            LOG.debug("Filter function not set :: passing backend")
            return True

        try:
            filter_result = self._run_evaluator(stats['filter_function'],
                                                stats)
        except Exception as ex:
            # Warn the admin for now that there is an error in the
            # filter function.
            LOG.warning(_LW("Error in filtering function "
                            "'%(function)s' : '%(error)s' :: failing backend"),
                        {'function': stats['filter_function'],
                         'error': ex, })
            return False

        return filter_result
Example #60
0
    def delete_volume(self, volume):
        """Deletes a logical volume."""

        if not volume['provider_location']:
            LOG.warning(
                _LW('Volume %s does not have '
                    'provider_location specified, '
                    'skipping'), volume['name'])
            return

        self._ensure_share_mounted(volume['provider_location'])

        mounted_path = self._active_volume_path(volume)

        self._execute('rm', '-f', mounted_path, run_as_root=True)

        # If an exception (e.g. timeout) occurred during delete_snapshot, the
        # base volume may linger around, so just delete it if it exists
        base_volume_path = self._local_path_volume(volume)
        fileutils.delete_if_exists(base_volume_path)

        info_path = self._local_path_volume_info(volume)
        fileutils.delete_if_exists(info_path)