Ejemplo n.º 1
0
def main():
    log.register_options(CONF)
    config.set_middleware_defaults()
    CONF(sys.argv[1:], project='manila',
         version=version.version_string())
    log.setup(CONF, "manila")
    LOG = log.getLogger('manila.all')

    msg = _('manila-all is deprecated in Newton and '
            'will be removed in Ocata.')
    versionutils.report_deprecated_feature(LOG, msg)

    utils.monkey_patch()
    launcher = service.process_launcher()
    # manila-api
    try:
        server = service.WSGIService('osapi_share')
        launcher.launch_service(server, workers=server.workers or 1)
    except (Exception, SystemExit):
        LOG.exception(_LE('Failed to load osapi_share'))

    for binary in ['manila-share', 'manila-scheduler', 'manila-api',
                   'manila-data']:
        try:
            launcher.launch_service(service.Service.create(binary=binary))
        except (Exception, SystemExit):
            LOG.exception(_LE('Failed to load %s'), binary)
    launcher.wait()
Ejemplo n.º 2
0
    def report_state(self):
        """Update the state of this service in the datastore."""
        ctxt = context.get_admin_context()
        zone = CONF.storage_availability_zone
        state_catalog = {}
        try:
            try:
                service_ref = db.service_get(ctxt, self.service_id)
            except exception.NotFound:
                LOG.debug('The service database object disappeared, '
                          'Recreating it.')
                self._create_service_ref(ctxt)
                service_ref = db.service_get(ctxt, self.service_id)

            state_catalog['report_count'] = service_ref['report_count'] + 1
            if zone != service_ref['availability_zone']:
                state_catalog['availability_zone'] = zone

            db.service_update(ctxt,
                              self.service_id, state_catalog)

            # TODO(termie): make this pattern be more elegant.
            if getattr(self, 'model_disconnected', False):
                self.model_disconnected = False
                LOG.error(_LE('Recovered model server connection!'))

        # TODO(vish): this should probably only catch connection errors
        except Exception:  # pylint: disable=W0702
            if not getattr(self, 'model_disconnected', False):
                self.model_disconnected = True
                LOG.exception(_LE('model server went away'))
Ejemplo n.º 3
0
 def _execute(self, *cmd, **kwargs):
     for x in range(0, len(self.hosts)):
         try:
             check_exit_code = kwargs.pop('check_exit_code', True)
             host = self.hosts[x]
             if host in self.local_hosts:
                 cmd = self._as_user(cmd,
                                     self.configuration.maprfs_ssh_name)
                 out, err = utils.execute(*cmd,
                                          check_exit_code=check_exit_code)
             else:
                 out, err = self._run_ssh(host, cmd, check_exit_code)
             # move available cldb host to the beginning
             if x > 0:
                 self.hosts[0], self.hosts[x] = self.hosts[x], self.hosts[0]
             return out, err
         except exception.ProcessExecutionError as e:
             if self._check_error(e):
                 raise
             elif x < len(self.hosts) - 1:
                 msg = _LE('Error running SSH command. Trying another host')
                 LOG.error(msg)
             else:
                 raise
         except Exception as e:
             if x < len(self.hosts) - 1:
                 msg = _LE('Error running SSH command. Trying another host')
                 LOG.error(msg)
             else:
                 raise exception.ProcessExecutionError(six.text_type(e))
def upgrade():
    meta = sql.MetaData()
    meta.bind = op.get_bind()
    is_public = sql.Column('is_public', sql.Boolean)

    try:
        op.add_column('share_types', is_public)

        share_types = sql.Table('share_types', meta, is_public.copy())
        share_types.update().values(is_public=True).execute()
    except Exception:
        LOG.error(_LE("Column |%s| not created!"), repr(is_public))
        raise

    try:
        op.create_table(
            'share_type_projects',
            sql.Column('id', sql.Integer, primary_key=True, nullable=False),
            sql.Column('created_at', sql.DateTime),
            sql.Column('updated_at', sql.DateTime),
            sql.Column('deleted_at', sql.DateTime),
            sql.Column('share_type_id', sql.String(36),
                       sql.ForeignKey('share_types.id', name="stp_id_fk")),
            sql.Column('project_id', sql.String(length=255)),
            sql.Column('deleted', sql.Integer),
            sql.UniqueConstraint('share_type_id', 'project_id', 'deleted',
                                 name="stp_project_id_uc"),
            mysql_engine='InnoDB',
        )
    except Exception:
        LOG.error(_LE("Table |%s| not created!"), 'share_type_projects')
        raise
Ejemplo n.º 5
0
    def __init__(self, message=None, detail_data={}, **kwargs):
        self.kwargs = kwargs
        self.detail_data = detail_data

        if "code" not in self.kwargs:
            try:
                self.kwargs["code"] = self.code
            except AttributeError:
                pass
        for k, v in self.kwargs.items():
            if isinstance(v, Exception):
                self.kwargs[k] = six.text_type(v)

        if not message:
            try:
                message = self.message % kwargs

            except Exception as e:
                # kwargs doesn't match a variable in the message
                # log the issue and the kwargs
                LOG.exception(_LE("Exception in string format operation."))
                for name, value in kwargs.items():
                    LOG.error(_LE("%(name)s: %(value)s"), {"name": name, "value": value})
                if CONF.fatal_exception_format_errors:
                    raise e
                else:
                    # at least get the core message out if something happened
                    message = self.message
        elif isinstance(message, Exception):
            message = six.text_type(message)

        if re.match(".*[^\.]\.\.$", message):
            message = message[:-1]
        self.msg = message
        super(ManilaException, self).__init__(message)
Ejemplo n.º 6
0
    def init_host(self):
        """Initialization for a standalone service."""

        ctxt = context.get_admin_context()
        self.driver.do_setup(ctxt)
        self.driver.check_for_setup_error()

        shares = self.db.share_get_all_by_host(ctxt, self.host)
        LOG.debug("Re-exporting %s shares", len(shares))
        for share in shares:
            if share['status'] != 'available':
                LOG.info(
                    _LI("Share %(name)s: skipping export, because it has "
                        "'%(status)s' status."),
                    {'name': share['name'], 'status': share['status']},
                )
                continue

            self._ensure_share_has_pool(ctxt, share)
            share_server = self._get_share_server(ctxt, share)
            try:
                export_locations = self.driver.ensure_share(
                    ctxt, share, share_server=share_server)
            except Exception as e:
                LOG.error(
                    _LE("Caught exception trying ensure share '%(s_id)s'. "
                        "Exception: \n%(e)s."),
                    {'s_id': share['id'], 'e': six.text_type(e)},
                )
                continue

            if export_locations:
                self.db.share_export_locations_update(
                    ctxt, share['id'], export_locations)

            rules = self.db.share_access_get_all_for_share(ctxt, share['id'])
            for access_ref in rules:
                if access_ref['state'] != access_ref.STATE_ACTIVE:
                    continue

                try:
                    self.driver.allow_access(ctxt, share, access_ref,
                                             share_server=share_server)
                except exception.ShareAccessExists:
                    pass
                except Exception as e:
                    LOG.error(
                        _LE("Unexpected exception during share access"
                            " allow operation. Share id is '%(s_id)s'"
                            ", access rule type is '%(ar_type)s', "
                            "access rule id is '%(ar_id)s', exception"
                            " is '%(e)s'."),
                        {'s_id': share['id'],
                         'ar_type': access_ref['access_type'],
                         'ar_id': access_ref['id'],
                         'e': six.text_type(e)},
                    )

        self.publish_service_capabilities(ctxt)
Ejemplo n.º 7
0
    def init_host(self):
        """Initialization for a standalone service."""

        ctxt = context.get_admin_context()
        self.driver.do_setup(ctxt)
        self.driver.check_for_setup_error()

        share_instances = self.db.share_instances_get_all_by_host(ctxt, self.host)
        LOG.debug("Re-exporting %s shares", len(share_instances))
        for share_instance in share_instances:
            if share_instance["status"] != constants.STATUS_AVAILABLE:
                LOG.info(
                    _LI("Share instance %(id)s: skipping export, " "because it has '%(status)s' status."),
                    {"id": share_instance["id"], "status": share_instance["status"]},
                )
                continue

            self._ensure_share_instance_has_pool(ctxt, share_instance)
            share_server = self._get_share_server(ctxt, share_instance)
            share_instance = self.db.share_instance_get(ctxt, share_instance["id"], with_share_data=True)
            try:
                export_locations = self.driver.ensure_share(ctxt, share_instance, share_server=share_server)
            except Exception as e:
                LOG.error(
                    _LE("Caught exception trying ensure share '%(s_id)s'. " "Exception: \n%(e)s."),
                    {"s_id": share_instance["id"], "e": six.text_type(e)},
                )
                continue

            if export_locations:
                self.db.share_export_locations_update(ctxt, share_instance["id"], export_locations)

            rules = self.db.share_access_get_all_for_share(ctxt, share_instance["share_id"])
            for access_ref in rules:
                if access_ref["state"] != constants.STATUS_ACTIVE:
                    continue

                try:
                    self.driver.allow_access(ctxt, share_instance, access_ref, share_server=share_server)
                except exception.ShareAccessExists:
                    pass
                except Exception as e:
                    LOG.error(
                        _LE(
                            "Unexpected exception during share access"
                            " allow operation. Share id is '%(s_id)s'"
                            ", access rule type is '%(ar_type)s', "
                            "access rule id is '%(ar_id)s', exception"
                            " is '%(e)s'."
                        ),
                        {
                            "s_id": share_instance["id"],
                            "ar_type": access_ref["access_type"],
                            "ar_id": access_ref["id"],
                            "e": six.text_type(e),
                        },
                    )

        self.publish_service_capabilities(ctxt)
Ejemplo n.º 8
0
 def _heartbeat(self):
     try:
         self.coordinator.heartbeat()
     except coordination.ToozConnectionError:
         LOG.exception(_LE('Connection error while sending a heartbeat '
                           'to coordination back end.'))
         raise
     except coordination.ToozError:
         LOG.exception(_LE('Error sending a heartbeat to coordination '
                           'back end.'))
Ejemplo n.º 9
0
    def unmanage_share(self, context, share_id):
        context = context.elevated()
        share_ref = self.db.share_get(context, share_id)
        share_server = self._get_share_server(context, share_ref)
        project_id = share_ref['project_id']

        def share_manage_set_error_status(msg, exception):
            status = {'status': constants.STATUS_UNMANAGE_ERROR}
            self.db.share_update(context, share_id, status)
            LOG.error(msg, six.text_type(exception))

        try:
            if self.driver.driver_handles_share_servers:
                msg = _("Unmanage share is not supported for "
                        "driver_handles_share_servers=True mode.")
                raise exception.InvalidShare(reason=msg)

            if share_server:
                msg = _("Unmanage share is not supported for "
                        "shares with share servers.")
                raise exception.InvalidShare(reason=msg)

            self.driver.unmanage(share_ref)

        except exception.InvalidShare as e:
            share_manage_set_error_status(
                _LE("Share can not be unmanaged: %s."), e)
            return

        try:
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          shares=-1,
                                          gigabytes=-share_ref['size'])
            QUOTAS.commit(context, reservations, project_id=project_id)
        except Exception as e:
            # Note(imalinovskiy):
            # Quota reservation errors here are not fatal, because
            # unmanage is administrator API and he/she could update user
            # quota usages later if it's required.
            LOG.warning(_LE("Failed to update quota usages: %s."),
                        six.text_type(e))

        if self.configuration.safe_get('unmanage_remove_access_rules'):
            try:
                self._remove_share_access_rules(context, share_ref,
                                                share_server)
            except Exception as e:
                share_manage_set_error_status(
                    _LE("Can not remove access rules of share: %s."), e)
                return

        self.db.share_update(context, share_id,
                             {'status': constants.STATUS_UNMANAGED,
                              'deleted': True})
Ejemplo n.º 10
0
    def do_setup(self, context):
        """Setup the GlusterFS volumes."""
        glusterfs_versions, exceptions = {}, {}
        for srvaddr in self.configuration.glusterfs_servers:
            try:
                glusterfs_versions[srvaddr] = self._glustermanager(
                    srvaddr, False).get_gluster_version()
            except exception.GlusterfsException as exc:
                exceptions[srvaddr] = six.text_type(exc)
        if exceptions:
            for srvaddr, excmsg in exceptions.items():
                LOG.error(_LE("'gluster version' failed on server "
                              "%(server)s with: %(message)s"),
                          {'server': srvaddr, 'message': excmsg})
            raise exception.GlusterfsException(_(
                "'gluster version' failed on servers %s") % (
                ','.join(exceptions.keys())))
        notsupp_servers = []
        for srvaddr, vers in glusterfs_versions.items():
            if common.GlusterManager.numreduct(
               vers) < self.driver.GLUSTERFS_VERSION_MIN:
                notsupp_servers.append(srvaddr)
        if notsupp_servers:
            gluster_version_min_str = '.'.join(
                six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
            for srvaddr in notsupp_servers:
                LOG.error(_LE("GlusterFS version %(version)s on server "
                              "%(server)s is not supported, "
                              "minimum requirement: %(minvers)s"),
                          {'server': srvaddr,
                           'version': '.'.join(glusterfs_versions[srvaddr]),
                           'minvers': gluster_version_min_str})
            raise exception.GlusterfsException(_(
                "Unsupported GlusterFS version on servers %(servers)s, "
                "minimum requirement: %(minvers)s") % {
                'servers': ','.join(notsupp_servers),
                'minvers': gluster_version_min_str})
        self.glusterfs_versions = glusterfs_versions

        gluster_volumes_initial = set(
            self._fetch_gluster_volumes(filter_used=False))
        if not gluster_volumes_initial:
            # No suitable volumes are found on the Gluster end.
            # Raise exception.
            msg = (_("Gluster backend does not provide any volume "
                     "matching pattern %s"
                     ) % self.configuration.glusterfs_volume_pattern)
            LOG.error(msg)
            raise exception.GlusterfsException(msg)

        LOG.info(_LI("Found %d Gluster volumes allocated for Manila."
                     ), len(gluster_volumes_initial))

        self._check_mount_glusterfs()
def downgrade():
    try:
        op.drop_column('share_types', 'is_public')
    except Exception:
        LOG.error(_LE("share_types.is_public column not dropped"))
        raise

    try:
        op.drop_table('share_type_projects')
    except Exception:
        LOG.error(_LE("share_type_projects table not dropped"))
        raise
Ejemplo n.º 12
0
    def create_share_from_snapshot(self, context, share, snapshot,
                                   share_server=None):
        old_gmgr = self._share_manager(snapshot['share_instance'])

        # Snapshot clone feature in GlusterFS server essential to support this
        # API is available in GlusterFS server versions 3.7 and higher. So do
        # a version check.
        vers = self.glusterfs_versions[old_gmgr.host_access]
        minvers = (3, 7)
        if common.GlusterManager.numreduct(vers) < minvers:
            minvers_str = '.'.join(six.text_type(c) for c in minvers)
            vers_str = '.'.join(vers)
            msg = (_("GlusterFS version %(version)s on server %(server)s does "
                     "not support creation of shares from snapshot. "
                     "minimum requirement: %(minversion)s") %
                   {'version': vers_str, 'server': old_gmgr.host,
                    'minversion': minvers_str})
            LOG.error(msg)
            raise exception.GlusterfsException(msg)

        # Clone the snapshot. The snapshot clone, a new GlusterFS volume
        # would serve as a share.
        backend_snapshot_name = self._find_actual_backend_snapshot_name(
            old_gmgr, snapshot)
        volume = ''.join(['manila-', share['id']])
        args_tuple = (('snapshot', 'activate', backend_snapshot_name,
                      'force', '--mode=script'),
                      ('snapshot', 'clone', volume, backend_snapshot_name))
        for args in args_tuple:
            out, err = old_gmgr.gluster_call(
                *args,
                log=_LE("Creating share from snapshot"))

        # Get a manager for the the new volume/share.
        comp_vol = old_gmgr.components.copy()
        comp_vol.update({'volume': volume})
        gmgr = self._glustermanager(comp_vol)
        export = self.driver._setup_via_manager(
            {'share': share, 'manager': gmgr},
            {'share': snapshot['share_instance'], 'manager': old_gmgr})

        argseq = (('set',
                   [USER_CLONED_FROM, snapshot['share_id']]),
                  ('set', [USER_MANILA_SHARE, share['id']]),
                  ('start', []))
        for op, opargs in argseq:
            args = ['volume', op, gmgr.volume] + opargs
            gmgr.gluster_call(*args, log=_LE("Creating share from snapshot"))

        self.gluster_used_vols.add(gmgr.qualified)
        self.private_storage.update(share['id'], {'volume': gmgr.qualified})

        return export
Ejemplo n.º 13
0
    def shrink_share(self, context, share_id, new_size):
        context = context.elevated()
        share = self.db.share_get(context, share_id)
        share_server = self._get_share_server(context, share)
        project_id = share['project_id']
        new_size = int(new_size)

        def error_occurred(exc, msg, status=constants.STATUS_SHRINKING_ERROR):
            LOG.exception(msg, resource=share)
            self.db.share_update(context, share['id'], {'status': status})

            raise exception.ShareShrinkingError(
                reason=six.text_type(exc), share_id=share_id)

        reservations = None

        try:
            size_decrease = int(share['size']) - new_size
            reservations = QUOTAS.reserve(context,
                                          project_id=share['project_id'],
                                          gigabytes=-size_decrease)
        except Exception as e:
            error_occurred(
                e, _LE("Failed to update quota on share shrinking."))

        try:
            self.driver.shrink_share(
                share, new_size, share_server=share_server)
        # NOTE(u_glide): Replace following except block by error notification
        # when Manila has such mechanism. It's possible because drivers
        # shouldn't shrink share when this validation error occurs.
        except Exception as e:
            if isinstance(e, exception.ShareShrinkingPossibleDataLoss):
                msg = _LE("Shrink share failed due to possible data loss.")
                status = constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR
                error_params = {'msg': msg, 'status': status}
            else:
                error_params = {'msg': _LE("Shrink share failed.")}

            try:
                error_occurred(e, **error_params)
            finally:
                QUOTAS.rollback(context, reservations, project_id=project_id)

        QUOTAS.commit(context, reservations, project_id=project_id)

        share_update = {
            'size': new_size,
            'status': constants.STATUS_AVAILABLE
        }
        share = self.db.share_update(context, share['id'], share_update)

        LOG.info(_LI("Shrink share completed successfully."), resource=share)
Ejemplo n.º 14
0
    def update_replication_pair_state(self, replica_pair_id):
        pair_info = self._get_replication_pair_info(replica_pair_id)

        def _is_to_recover(pair_info):
            return (pair_info['RUNNINGSTATUS'] ==
                    constants.REPLICA_RUNNING_STATUS_TO_RECOVER)

        health = self._check_replication_health(pair_info)
        if health is not None:
            if not _is_to_recover(pair_info):
                LOG.warning(_LW("Cannot update the replication %s "
                                "because it's not in normal status and "
                                "not to recover."),
                            replica_pair_id)
                return

            # replication is in to-recover status, try to resync manually.
            LOG.debug("Resync replication %s because it is to recover.",
                      replica_pair_id)
            self._sync_replication_pair(replica_pair_id)
            return

        if strutils.bool_from_string(pair_info['ISPRIMARY']):
            # current replica is primary, not consistent with manila.
            # the reason for this circumstance is the last switch over
            # didn't succeed completely. continue the switch over progress..
            try:
                self.helper.switch_replication_pair(replica_pair_id)
            except Exception:
                msg = _LE('Replication pair %s primary/secondary '
                          'relationship is not right, try to switch over '
                          'again but still failed.')
                LOG.exception(msg, replica_pair_id)
                return

            # refresh the replication pair info
            pair_info = self._get_replication_pair_info(replica_pair_id)

        if pair_info['SECRESACCESS'] == constants.REPLICA_SECONDARY_RW:
            try:
                self.helper.set_pair_secondary_write_lock(replica_pair_id)
            except Exception:
                msg = _LE('Replication pair %s secondary access is R/W, '
                          'try to set write lock but still failed.')
                LOG.exception(msg, replica_pair_id)
                return

        if pair_info['RUNNINGSTATUS'] in (
                constants.REPLICA_RUNNING_STATUS_NORMAL,
                constants.REPLICA_RUNNING_STATUS_SPLITTED,
                constants.REPLICA_RUNNING_STATUS_TO_RECOVER):
            self._sync_replication_pair(replica_pair_id)
def downgrade():
    try:
        op.drop_table(ELM_TABLE_NAME)
    except Exception:
        LOG.error(_LE("Failed to drop '%s' table!"), ELM_TABLE_NAME)
        raise

    try:
        op.drop_column(EL_TABLE_NAME, 'is_admin_only')
        op.drop_column(EL_TABLE_NAME, 'uuid')
    except Exception:
        LOG.error(_LE("Failed to update '%s' table!"), EL_TABLE_NAME)
        raise
Ejemplo n.º 16
0
 def deny_access(self, context, share, share_name, access):
     """Denies access to the CIFS share for a given user."""
     host_ip, share_name = self._get_export_location(share)
     user_name = access['access_to']
     try:
         self._client.remove_cifs_share_access(share_name, user_name)
     except netapp_api.NaApiError as e:
         if e.code == netapp_api.EONTAPI_EINVAL:
             LOG.error(_LE("User %s does not exist."), user_name)
         elif e.code == netapp_api.EOBJECTNOTFOUND:
             LOG.error(_LE("Rule %s does not exist."), user_name)
         else:
             raise e
Ejemplo n.º 17
0
 def deny_access(self, context, share, access):
     """Denies access to the CIFS share for a given user."""
     host_ip, share_name = self._get_export_location(share)
     user = access['access_to']
     try:
         self._restrict_access(user, share_name)
     except naapi.NaApiError as e:
         if e.code == "22":
             LOG.error(_LE("User %s does not exist."), user)
         elif e.code == "15661":
             LOG.error(_LE("Rule %s does not exist."), user)
         else:
             raise e
Ejemplo n.º 18
0
    def _delete_vserver(self, vserver_name, vserver_client,
                        security_services=None):
        """Delete vserver.

        Checks if vserver exists and does not have active shares.
        Offlines and destroys root volumes.
        Deletes vserver.
        """
        if not self._vserver_exists(vserver_name):
            LOG.error(_LE("Vserver %s does not exist."), vserver_name)
            return
        volumes_data = vserver_client.send_request('volume-get-iter')
        volumes_count = int(volumes_data.get_child_content('num-records'))
        if volumes_count == 1:
            try:
                vserver_client.send_request(
                    'volume-offline',
                    {'name': self.configuration.netapp_root_volume_name})
            except naapi.NaApiError as e:
                if e.code == '13042':
                    LOG.error(_LE("Volume %s is already offline."),
                              self.configuration.netapp_root_volume_name)
                else:
                    raise e
            vserver_client.send_request(
                'volume-destroy',
                {'name': self.configuration.netapp_root_volume_name})
        elif volumes_count > 1:
            msg = _("Error deleting vserver. "
                    "Vserver %s has shares.") % vserver_name
            LOG.error(msg)
            raise exception.NetAppException(msg)
        if security_services:
            for service in security_services:
                if service['type'] == 'active_directory':
                    args = {
                        'admin-password': service['password'],
                        'admin-username': service['user'],
                    }
                    try:
                        vserver_client.send_request('cifs-server-delete',
                                                    args)
                    except naapi.NaApiError as e:
                        if e.code == "15661":
                            LOG.error(_LE("CIFS server does not exist for"
                                          " vserver %s"), vserver_name)
                        else:
                            vserver_client.send_request('cifs-server-delete')
        self._client.send_request('vserver-destroy',
                                  {'vserver-name': vserver_name})
Ejemplo n.º 19
0
    def delete_snapshot(self, context, snapshot, share_server=None):
        """Deletes a snapshot."""

        vol = snapshot['share']['export_location']
        gluster_mgr = self.gluster_used_vols_dict[vol]
        args = ('snapshot', 'list', gluster_mgr.volume, '--mode=script')
        try:
            out, err = gluster_mgr.gluster_call(*args)
        except exception.ProcessExecutionError as exc:
            LOG.error(_LE("Error retrieving snapshot list: %s"), exc.stderr)
            raise exception.GlusterfsException(_("gluster %s failed") %
                                               ' '.join(args))
        snapgrep = list(filter(lambda x: snapshot['id'] in x, out.split("\n")))
        if len(snapgrep) != 1:
            msg = (_("Failed to identify backing GlusterFS object "
                     "for snapshot %(snap_id)s of share %(share_id)s: "
                     "a single candidate was expected, %(found)d was found.") %
                   {'snap_id': snapshot['id'],
                    'share_id': snapshot['share_id'],
                    'found': len(snapgrep)})
            raise exception.GlusterfsException(msg)
        args = ('--xml', 'snapshot', 'delete', snapgrep[0], '--mode=script')
        try:
            out, err = gluster_mgr.gluster_call(*args)
        except exception.ProcessExecutionError as exc:
            LOG.error(_LE("Error deleting snapshot: %s"), exc.stderr)
            raise exception.GlusterfsException(_("gluster %s failed") %
                                               ' '.join(args))

        if not out:
            raise exception.GlusterfsException(
                _('gluster snapshot delete %s: no data received') %
                gluster_mgr.volume
            )

        outxml = etree.fromstring(out)
        opret = int(outxml.find('opRet').text)
        operrno = int(outxml.find('opErrno').text)
        operrstr = outxml.find('opErrstr').text

        if opret:
            raise exception.GlusterfsException(
                _("Deleting snapshot %(snap_id)s of share %(share_id)s failed "
                  "with %(errno)d: %(errstr)s") % {
                      'snap_id': snapshot['id'],
                      'share_id': snapshot['share_id'],
                      'errno': operrno,
                      'errstr': operrstr})
Ejemplo n.º 20
0
    def create_share(self, ctx, share, share_server=None):
        """Create a sub-directory/share in the GlusterFS volume."""
        # probe into getting a NAS protocol helper for the share in order
        # to facilitate early detection of unsupported protocol type
        sizestr = six.text_type(share['size']) + 'GB'
        share_dir = '/' + share['name']
        local_share_path = self._get_local_share_path(share)
        cmd = ['mkdir', local_share_path]
        # set hard limit quota on the sub-directory/share
        args = ('volume', 'quota', self.gluster_manager.volume,
                'limit-usage', share_dir, sizestr)
        try:
            self.driver._execute(*cmd, run_as_root=True)
            self.gluster_manager.gluster_call(*args)
        except Exception as exc:
            if isinstance(exc, exception.ProcessExecutionError):
                exc = exception.GlusterfsException(exc)
            if isinstance(exc, exception.GlusterfsException):
                self._cleanup_create_share(local_share_path, share['name'])
                LOG.error(_LE('Unable to create share %s'), share['name'])
            raise exc

        comp_share = self.gluster_manager.components.copy()
        comp_share['path'] = '/' + share['name']
        export_location = self.driver._setup_via_manager(
            {'share': share,
             'manager': self._glustermanager(comp_share)})

        return export_location
def upgrade():
    try:
        op.add_column('shares',
                      sa.Column('is_public', sa.Boolean, default=False))
    except Exception:
        LOG.error(_LE("Column shares.is_public not created!"))
        raise
Ejemplo n.º 22
0
    def delete_snapshot(self, context, snapshot, share_server=None):
        """Deletes a snapshot."""

        gluster_mgr = self._share_manager(snapshot['share'])
        backend_snapshot_name = self._find_actual_backend_snapshot_name(
            gluster_mgr, snapshot)
        args = ('--xml', 'snapshot', 'delete', backend_snapshot_name,
                '--mode=script')
        try:
            out, err = gluster_mgr.gluster_call(*args)
        except exception.ProcessExecutionError as exc:
            LOG.error(_LE("Error deleting snapshot: %s"), exc.stderr)
            raise exception.GlusterfsException(
                _("gluster %s failed") % ' '.join(args))

        if not out:
            raise exception.GlusterfsException(
                _('gluster snapshot delete %s: no data received') %
                gluster_mgr.volume)

        outxml = etree.fromstring(out)
        opret = int(outxml.find('opRet').text)
        operrno = int(outxml.find('opErrno').text)
        operrstr = outxml.find('opErrstr').text

        if opret:
            raise exception.GlusterfsException(
                _("Deleting snapshot %(snap_id)s of share %(share_id)s failed "
                  "with %(errno)d: %(errstr)s") % {
                      'snap_id': snapshot['id'],
                      'share_id': snapshot['share_id'],
                      'errno': operrno,
                      'errstr': operrstr
                  })
Ejemplo n.º 23
0
    def delete(self, context, share, force=False):
        """Delete share."""
        share = self.db.share_get(context, share['id'])
        if context.is_admin and context.project_id != share['project_id']:
            project_id = share['project_id']
        else:
            project_id = context.project_id

        share_id = share['id']

        statuses = (constants.STATUS_AVAILABLE, constants.STATUS_ERROR,
                    constants.STATUS_INACTIVE)
        if not (force or share['status'] in statuses):
            msg = _("Share status must be one of %(statuses)s") % {
                "statuses": statuses
            }
            raise exception.InvalidShare(reason=msg)

        snapshots = self.db.share_snapshot_get_all_for_share(context, share_id)
        if len(snapshots):
            msg = _("Share still has %d dependent snapshots") % len(snapshots)
            raise exception.InvalidShare(reason=msg)

        cgsnapshot_members_count = self.db.count_cgsnapshot_members_in_share(
            context, share_id)
        if cgsnapshot_members_count:
            msg = (_("Share still has %d dependent cgsnapshot members") %
                   cgsnapshot_members_count)
            raise exception.InvalidShare(reason=msg)

        # Make sure share is not part of a migration
        if share['task_state'] not in (
                None, constants.STATUS_TASK_STATE_MIGRATION_ERROR,
                constants.STATUS_TASK_STATE_MIGRATION_SUCCESS):
            msg = _("Share %s is busy as part of an active "
                    "task.") % share['id']
            LOG.error(msg)
            raise exception.InvalidShare(reason=msg)

        try:
            reservations = QUOTAS.reserve(context,
                                          project_id=project_id,
                                          shares=-1,
                                          gigabytes=-share['size'])
        except Exception as e:
            reservations = None
            LOG.exception(_LE("Failed to update quota for deleting share: %s"),
                          six.text_type(e))

        ownername = context.user_id
        #lease.delete_success(share['id'], ownername)

        for share_instance in share.instances:
            if share_instance['host']:
                self.delete_instance(context, share_instance, force=force)
            else:
                self.db.share_instance_delete(context, share_instance['id'])

        if reservations:
            QUOTAS.commit(context, reservations, project_id=project_id)
def upgrade():
    for t_name in TABLE_NAMES:
        try:
            op.drop_column(t_name, COLUMN_NAME)
        except Exception:
            LOG.error(_LE("Column '%s' could not be dropped"), COLUMN_NAME)
            raise
Ejemplo n.º 25
0
        def _teardown_server():
            # NOTE(vponomaryov): Verify that there are no dependent shares.
            # Without this verification we can get here exception in next case:
            # share-server-delete API was called after share creation scheduled
            # and share_server reached ACTIVE status, but before update
            # of share_server_id field for share. If so, after lock realese
            # this method starts executing when amount of dependent shares
            # has been changed.
            shares = self.db.share_get_all_by_share_server(
                context, share_server['id'])
            if shares:
                raise exception.ShareServerInUse(
                    share_server_id=share_server['id'])

            self.db.share_server_update(context, share_server['id'],
                                        {'status': constants.STATUS_DELETING})
            sec_services = self.db.share_network_get(
                context, share_server['share_network_id'])['security_services']
            try:
                LOG.debug("Deleting share server")
                self.driver.teardown_server(share_server['backend_details'],
                                            security_services=sec_services)
            except Exception:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE("Share server %s failed on deletion."),
                              share_server['id'])
                    self.db.share_server_update(
                        context, share_server['id'],
                        {'status': constants.STATUS_ERROR})
            else:
                self.db.share_server_delete(context, share_server['id'])
Ejemplo n.º 26
0
    def _init_counter_info(self):
        """Set a few counter names based on Data ONTAP version."""

        self.system_object_name = None
        self.avg_processor_busy_base_counter_name = None

        try:
            if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
                self.system_object_name = 'system:constituent'
                self.avg_processor_busy_base_counter_name = (
                    self._get_base_counter_name('system:constituent',
                                                'avg_processor_busy'))
            elif self.zapi_client.features.SYSTEM_METRICS:
                self.system_object_name = 'system'
                self.avg_processor_busy_base_counter_name = (
                    self._get_base_counter_name('system',
                                                'avg_processor_busy'))
        except netapp_api.NaApiError:
            if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
                self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
            else:
                self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
            LOG.exception(_LE('Could not get performance base counter '
                              'name. Performance-based scheduler '
                              'functions may not be available.'))
Ejemplo n.º 27
0
    def host_passes(self, host_state, filter_properties):
        """Return True if host has sufficient capacity."""
        volume_size = filter_properties.get('size')

        if host_state.free_capacity_gb is None:
            # Fail Safe
            LOG.error(_LE("Free capacity not set: "
                          "volume node info collection broken."))
            return False

        free_space = host_state.free_capacity_gb
        if free_space == 'infinite' or free_space == 'unknown':
            # NOTE(zhiteng) for those back-ends cannot report actual
            # available capacity, we assume it is able to serve the
            # request.  Even if it was not, the retry mechanism is
            # able to handle the failure by rescheduling
            return True
        reserved = float(host_state.reserved_percentage) / 100
        free = math.floor(free_space * (1 - reserved))
        if free < volume_size:
            LOG.warning(_LW("Insufficient free space for volume creation "
                            "(requested / avail): "
                            "%(requested)s/%(available)s"),
                        {'requested': volume_size,
                         'available': free})

        return free >= volume_size
Ejemplo n.º 28
0
    def rollback(self, context, reservations, project_id=None, user_id=None):
        """Roll back reservations.

        :param context: The request context, for access checks.
        :param reservations: A list of the reservation UUIDs, as
                             returned by the reserve() method.
        :param project_id: Specify the project_id if current context
                           is admin and admin wants to impact on
                           common user's tenant.
        """

        try:
            self._driver.rollback(context,
                                  reservations,
                                  project_id=project_id,
                                  user_id=user_id)
        except Exception:
            # NOTE(Vek): Ignoring exceptions here is safe, because the
            # usage resynchronization and the reservation expiration
            # mechanisms will resolve the issue.  The exception is
            # logged, however, because this is less than optimal.
            LOG.exception(_LE("Failed to roll back reservations %s"),
                          reservations)
            return
        LOG.debug("Rolled back reservations %s", reservations)
Ejemplo n.º 29
0
    def periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            ticks_to_skip = self._ticks_to_skip[task_name]
            if ticks_to_skip > 0:
                LOG.debug(
                    "Skipping %(full_task_name)s, %(ticks_to_skip)s"
                    " ticks left until next run", {
                        'full_task_name': full_task_name,
                        'ticks_to_skip': ticks_to_skip
                    })
                self._ticks_to_skip[task_name] -= 1
                continue

            self._ticks_to_skip[task_name] = task._ticks_between_runs
            LOG.debug("Running periodic task %(full_task_name)s",
                      {'full_task_name': full_task_name})

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"), {
                    'full_task_name': full_task_name,
                    'e': e
                })
Ejemplo n.º 30
0
    def setup_server(self, network_info, metadata=None):
        """Set up and configures share server with given network parameters."""
        server_name = network_info['server_id']
        segmentation_id = network_info['segmentation_id']
        network = self.validate_network(network_info)
        mtu = network['mtu']
        tenant = self.client.get_tenant(network_info['server_id'],
                                        segmentation_id)

        sp_ports_map = unity_utils.find_ports_by_mtu(
            self.client.get_file_ports(), self.port_ids_conf, mtu)

        sp = self._choose_sp(sp_ports_map)
        nas_server = self.client.create_nas_server(server_name,
                                                   sp,
                                                   self.nas_server_pool,
                                                   tenant=tenant)
        sp = nas_server.home_sp
        port_id = self._choose_port(sp_ports_map, sp)
        try:
            self._create_network_interface(nas_server, network, port_id)

            self._handle_security_services(nas_server,
                                           network_info['security_services'])

            return {'share_server_name': server_name}

        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Could not setup server.'))
                server_details = {'share_server_name': server_name}
                self.teardown_server(server_details,
                                     network_info['security_services'])
    def inner(self, *args, **kwargs):
        total_retry_time = self.cli_retry_time

        if total_retry_time is None:
            total_retry_time = DEFAULT_RETRY_TIME

        retry_time = 0
        while retry_time < total_retry_time:
            rc, out = func(self, *args, **kwargs)
            retry_time += 1

            if rc == 0:
                break

            LOG.error(_LE(
                'Retry %(retry)s times: %(method)s Failed '
                '%(rc)s: %(reason)s'), {
                    'retry': retry_time,
                    'method': self.__class__.__name__,
                    'rc': rc,
                    'reason': out})
        LOG.debug(
            'Method: %(method)s Return Code: %(rc)s '
            'Output: %(out)s', {
                'method': self.__class__.__name__, 'rc': rc, 'out': out})
        return rc, out
Ejemplo n.º 32
0
        def _gluster_call(*args, **kwargs):
            logmsg = kwargs.pop('log', None)
            error_policy = kwargs.pop('error_policy', 'coerce')
            if (error_policy not in ('raw', 'coerce', 'suppress')
                    and not isinstance(error_policy[0], int)):
                raise TypeError(
                    _("undefined error_policy %s") % repr(error_policy))

            try:
                return gluster_execf(*(('gluster', ) + args), **kwargs)
            except exception.ProcessExecutionError as exc:
                if error_policy == 'raw':
                    raise
                elif error_policy == 'coerce':
                    pass
                elif (error_policy == 'suppress'
                      or exc.exit_code in error_policy):
                    return
                if logmsg:
                    LOG.error(
                        _LE("%s: GlusterFS instrumentation failed.") % logmsg)
                raise exception.GlusterfsException(
                    _("GlusterFS management command '%(cmd)s' failed "
                      "with details as follows:\n%(details)s.") % {
                          'cmd': ' '.join(args),
                          'details': exc
                      })
Ejemplo n.º 33
0
    def create_share(self, context, share, share_server=None):
        LOG.debug("Creating share in HSP: %(shr)s", {'shr': share['id']})

        if share['share_proto'].lower() != 'nfs':
            msg = _("Only NFS protocol is currently supported.")
            raise exception.InvalidShare(reason=msg)

        self.hsp.add_file_system(share['id'], share['size'] * units.Gi)
        filesystem_id = self.hsp.get_file_system(share['id'])['id']

        try:
            self.hsp.add_share(share['id'], filesystem_id)
        except exception.HSPBackendException:
            with excutils.save_and_reraise_exception():
                self.hsp.delete_file_system(filesystem_id)
                msg = _LE("Could not create share %s on HSP.")
                LOG.exception(msg, share['id'])

        uri = self.hsp_host + ':/' + share['id']

        LOG.debug("Share created successfully on path: %(uri)s.", {'uri': uri})
        return [{
            "path": uri,
            "metadata": {},
            "is_admin_only": False,
        }]
Ejemplo n.º 34
0
    def _fetch_gluster_volumes(self):
        """Do a 'gluster volume list | grep <volume pattern>'.

        Aggregate the results from all servers.
        Extract the named groups from the matching volume names
        using the specs given in PATTERN_DICT.
        Return a dict with keys of the form <server>:/<volname>
        and values being dicts that map names of named groups
        to their extracted value.
        """

        volumes_dict = {}
        for gsrv, gluster_mgr in six.iteritems(self.glusterfs_servers):
            try:
                out, err = gluster_mgr.gluster_call("volume", "list")
            except exception.ProcessExecutionError as exc:
                msgdict = {"err": exc.stderr, "hostinfo": ""}
                if gluster_mgr.remote_user:
                    msgdict["hostinfo"] = " on host %s" % gluster_mgr.host
                LOG.error(_LE("Error retrieving volume list%(hostinfo)s: " "%(err)s") % msgdict)
                raise exception.GlusterfsException(_("gluster volume list failed"))
            for volname in out.split("\n"):
                patmatch = self.volume_pattern.match(volname)
                if not patmatch:
                    continue
                pattern_dict = {}
                for key in self.volume_pattern_keys:
                    keymatch = patmatch.group(key)
                    if keymatch is None:
                        pattern_dict[key] = None
                    else:
                        trans = PATTERN_DICT[key].get("trans", lambda x: x)
                        pattern_dict[key] = trans(keymatch)
                volumes_dict[gsrv + ":/" + volname] = pattern_dict
        return volumes_dict
Ejemplo n.º 35
0
    def delete_snapshot(self, context, snapshot_id):
        """Delete share snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)

        share_server = self._get_share_server(context,
                                              snapshot_ref['share'])

        if context.project_id != snapshot_ref['project_id']:
            project_id = snapshot_ref['project_id']
        else:
            project_id = context.project_id

        try:
            self.driver.delete_snapshot(context, snapshot_ref,
                                        share_server=share_server)
        except exception.ShareSnapshotIsBusy:
            self.db.share_snapshot_update(context, snapshot_ref['id'],
                                          {'status': 'available'})
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.share_snapshot_update(context, snapshot_ref['id'],
                                              {'status': 'error_deleting'})
        else:
            self.db.share_snapshot_destroy(context, snapshot_id)
            try:
                reservations = QUOTAS.reserve(
                    context, project_id=project_id, snapshots=-1,
                    snapshot_gigabytes=-snapshot_ref['size'])
            except Exception:
                reservations = None
                LOG.exception(_LE("Failed to update usages deleting snapshot"))

            if reservations:
                QUOTAS.commit(context, reservations, project_id=project_id)
Ejemplo n.º 36
0
    def delete_snapshot(self, context, snapshot, share_server=None):
        """Deletes a snapshot."""

        vol = snapshot["share"]["export_location"]
        gluster_mgr = self.gluster_used_vols_dict[vol]
        args = ("--xml", "snapshot", "delete", snapshot["id"], "--mode=script")
        try:
            out, err = gluster_mgr.gluster_call(*args)
        except exception.ProcessExecutionError as exc:
            LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr)
            raise exception.GlusterfsException("gluster %s failed" % " ".join(args))

        if not out:
            raise exception.GlusterfsException("gluster volume info %s: no data received" % gluster_mgr.volume)

        outxml = etree.fromstring(out)
        opret = int(outxml.find("opRet").text)
        operrno = int(outxml.find("opErrno").text)
        operrstr = outxml.find("opErrstr").text

        if opret:
            raise exception.GlusterfsException(
                _("Deleting snapshot %(snap_id)s of share %(share_id)s failed " "with %(errno)d: %(errstr)s")
                % {"snap_id": snapshot["id"], "share_id": snapshot["share_id"], "errno": operrno, "errstr": operrstr}
            )
Ejemplo n.º 37
0
    def delete_snapshot(self, context, snapshot, share_server=None):
        """Deletes a snapshot."""

        vol = snapshot['share']['export_location']
        gluster_mgr = self.gluster_used_vols_dict[vol]
        args = ('--xml', 'snapshot', 'delete', snapshot['id'], '--mode=script')
        try:
            out, err = gluster_mgr.gluster_call(*args)
        except exception.ProcessExecutionError as exc:
            LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr)
            raise exception.GlusterfsException("gluster %s failed" %
                                               ' '.join(args))

        if not out:
            raise exception.GlusterfsException(
                'gluster volume info %s: no data received' %
                gluster_mgr.volume)

        outxml = etree.fromstring(out)
        opret = int(outxml.find('opRet').text)
        operrno = int(outxml.find('opErrno').text)
        operrstr = outxml.find('opErrstr').text

        if opret:
            raise exception.GlusterfsException(
                _("Deleting snapshot %(snap_id)s of share %(share_id)s failed "
                  "with %(errno)d: %(errstr)s") % {
                      'snap_id': snapshot['id'],
                      'share_id': snapshot['share_id'],
                      'errno': operrno,
                      'errstr': operrstr
                  })
Ejemplo n.º 38
0
 def create_port(self, tenant_id, network_id, host_id=None, subnet_id=None,
                 fixed_ip=None, device_owner=None, device_id=None,
                 mac_address=None, security_group_ids=None, dhcp_opts=None):
     try:
         port_req_body = {'port': {}}
         port_req_body['port']['network_id'] = network_id
         port_req_body['port']['admin_state_up'] = True
         port_req_body['port']['tenant_id'] = tenant_id
         if security_group_ids:
             port_req_body['port']['security_groups'] = security_group_ids
         if mac_address:
             port_req_body['port']['mac_address'] = mac_address
         if self._has_port_binding_extension() and host_id:
             port_req_body['port']['binding:host_id'] = host_id
         if dhcp_opts is not None:
             port_req_body['port']['extra_dhcp_opts'] = dhcp_opts
         if subnet_id:
             fixed_ip_dict = {'subnet_id': subnet_id}
             if fixed_ip:
                 fixed_ip_dict.update({'ip_address': fixed_ip})
             port_req_body['port']['fixed_ips'] = [fixed_ip_dict]
         if device_owner:
             port_req_body['port']['device_owner'] = device_owner
         if device_id:
             port_req_body['port']['device_id'] = device_id
         port = self.client.create_port(port_req_body).get('port', {})
         return port
     except neutron_client_exc.NeutronClientException as e:
         LOG.exception(_LE('Neutron error creating port on network %s'),
                       network_id)
         if e.status_code == 409:
             raise exception.PortLimitExceeded()
         raise exception.NetworkException(code=e.status_code,
                                          message=e.message)
Ejemplo n.º 39
0
    def create_share(self, context, share, share_server=None):
        """Create a share using GlusterFS volume.

        1 Manila share = 1 GlusterFS volume. Pick an unused
        GlusterFS volume for use as a share.
        """
        try:
            vol = self._pop_gluster_vol(share['size'])
        except exception.GlusterfsException:
            msg = (_LE("Error creating share %(share_id)s"), {
                'share_id': share['id']
            })
            LOG.error(msg)
            raise

        export = self.driver._setup_via_manager({
            'share':
            share,
            'manager':
            self._glustermanager(vol)
        })
        self.private_storage.update(share['id'], {'volume': vol})

        # TODO(deepakcs): Enable quota and set it to the share size.

        # For native protocol, the export_location should be of the form:
        # server:/volname
        LOG.info(_LI("export_location sent back from create_share: %s"),
                 export)
        return export
Ejemplo n.º 40
0
        def _gluster_call(*args, **kwargs):
            logmsg = kwargs.pop('log', None)
            error_policy = kwargs.pop('error_policy', 'coerce')
            if (error_policy not in ('raw', 'coerce', 'suppress') and
               not isinstance(error_policy[0], int)):
                raise TypeError(_("undefined error_policy %s") %
                                repr(error_policy))

            try:
                return gluster_execf(*(('gluster',) + args), **kwargs)
            except exception.ProcessExecutionError as exc:
                if error_policy == 'raw':
                    raise
                elif error_policy == 'coerce':
                    pass
                elif (error_policy == 'suppress' or
                      exc.exit_code in error_policy):
                    return
                if logmsg:
                    LOG.error(_LE("%s: GlusterFS instrumentation failed.") %
                              logmsg)
                raise exception.GlusterfsException(
                    _("GlusterFS management command '%(cmd)s' failed "
                      "with details as follows:\n%(details)s.") % {
                        'cmd': ' '.join(args),
                        'details': exc})
Ejemplo n.º 41
0
    def delete_share(self, context, share, share_server=None):
        """Delete a share on the GlusterFS volume.

        1 Manila share = 1 GlusterFS volume. Put the gluster
        volume back in the available list.
        """
        exp_locn = share.get('export_location', None)
        try:
            # Get the gluster address associated with the export.
            gmgr = self.gluster_used_vols_dict[exp_locn]
        except KeyError:
            msg = (_LW("Invalid request. Ignoring delete_share request for "
                       "share %(share_id)s"), {
                           'share_id': share['id']
                       })
            LOG.warn(msg)
            return

        try:
            self._wipe_gluster_vol(gmgr)
            self._push_gluster_vol(exp_locn)
        except exception.GlusterfsException:
            msg = (_LE("Error during delete_share request for "
                       "share %(share_id)s"), {
                           'share_id': share['id']
                       })
            LOG.error(msg)
            raise
Ejemplo n.º 42
0
    def login(self):
        """Login huawei array."""
        login_info = self._get_login_info()
        urlstr = login_info['RestURL']
        url_list = urlstr.split(";")
        deviceid = None
        for item_url in url_list:
            url = item_url.strip('').strip('\n') + "xx/sessions"
            data = jsonutils.dumps({"username": login_info['UserName'],
                                    "password": login_info['UserPassword'],
                                    "scope": "0"})
            self.init_http_head()
            result = self.do_call(url, data,
                                  calltimeout=constants.LOGIN_SOCKET_TIMEOUT)

            if((result['error']['code'] != 0)
               or ("data" not in result)
               or (result['data']['deviceid'] is None)):
                LOG.error(_LE("Login to %s failed, try another."), item_url)
                continue

            LOG.debug('Login success: %(url)s\n',
                      {'url': item_url})
            deviceid = result['data']['deviceid']
            self.url = item_url + deviceid
            self.headers['iBaseToken'] = result['data']['iBaseToken']
            break

        if deviceid is None:
            err_msg = _("All url login fail.")
            LOG.error(err_msg)
            raise exception.InvalidShare(reason=err_msg)

        return deviceid
Ejemplo n.º 43
0
    def _init_counter_info(self):
        """Set a few counter names based on Data ONTAP version."""

        self.system_object_name = None
        self.avg_processor_busy_base_counter_name = None

        try:
            if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
                self.system_object_name = 'system:constituent'
                self.avg_processor_busy_base_counter_name = (
                    self._get_base_counter_name('system:constituent',
                                                'avg_processor_busy'))
            elif self.zapi_client.features.SYSTEM_METRICS:
                self.system_object_name = 'system'
                self.avg_processor_busy_base_counter_name = (
                    self._get_base_counter_name('system',
                                                'avg_processor_busy'))
        except netapp_api.NaApiError:
            if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
                self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
            else:
                self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
            LOG.exception(
                _LE('Could not get performance base counter '
                    'name. Performance-based scheduler '
                    'functions may not be available.'))
Ejemplo n.º 44
0
    def create_share(self, context, share, share_server=None):
        LOG.debug("Creating share in HSP: %(shr)s", {'shr': share['id']})

        if share['share_proto'].lower() != 'nfs':
            msg = _("Only NFS protocol is currently supported.")
            raise exception.InvalidShare(reason=msg)

        self.hsp.add_file_system(share['id'], share['size'] * units.Gi)
        filesystem_id = self.hsp.get_file_system(share['id'])['id']

        try:
            self.hsp.add_share(share['id'], filesystem_id)
        except exception.HSPBackendException:
            with excutils.save_and_reraise_exception():
                self.hsp.delete_file_system(filesystem_id)
                msg = _LE("Could not create share %s on HSP.")
                LOG.exception(msg, share['id'])

        uri = self.hsp_host + ':/' + share['id']

        LOG.debug("Share created successfully on path: %(uri)s.",
                  {'uri': uri})
        return [{
            "path": uri,
            "metadata": {},
            "is_admin_only": False,
        }]
Ejemplo n.º 45
0
    def delete_snapshot(self, context, snapshot_id):
        """Delete share snapshot."""
        context = context.elevated()
        snapshot_ref = self.db.share_snapshot_get(context, snapshot_id)

        share_server = self._get_share_server(context,
                                              snapshot_ref['share'])

        if context.project_id != snapshot_ref['project_id']:
            project_id = snapshot_ref['project_id']
        else:
            project_id = context.project_id

        try:
            self.driver.delete_snapshot(context, snapshot_ref,
                                        share_server=share_server)
        except exception.ShareSnapshotIsBusy:
            self.db.share_snapshot_update(context, snapshot_ref['id'],
                                          {'status': 'available'})
        except Exception:
            with excutils.save_and_reraise_exception():
                self.db.share_snapshot_update(context, snapshot_ref['id'],
                                              {'status': 'error_deleting'})
        else:
            self.db.share_snapshot_destroy(context, snapshot_id)
            try:
                reservations = QUOTAS.reserve(
                    context, project_id=project_id, snapshots=-1,
                    snapshot_gigabytes=-snapshot_ref['size'])
            except Exception:
                reservations = None
                LOG.exception(_LE("Failed to update usages deleting snapshot"))

            if reservations:
                QUOTAS.commit(context, reservations, project_id=project_id)
Ejemplo n.º 46
0
    def delete_share(self, context, share, share_server=None):
        """Delete a share on the GlusterFS volume.

        1 Manila share = 1 GlusterFS volume. Put the gluster
        volume back in the available list.
        """
        gmgr = self._share_manager(share)
        try:
            self._wipe_gluster_vol(gmgr)
            self._push_gluster_vol(gmgr.qualified)
        except exception.GlusterfsException:
            msg = (_LE("Error during delete_share request for "
                       "share %(share_id)s"), {
                           'share_id': share['id']
                       })
            LOG.error(msg)
            raise

        self.private_storage.delete(share['id'])

        args = ('volume', 'set', gmgr.volume, USER_MANILA_SHARE, 'NONE')
        try:
            gmgr.gluster_call(*args)
        except exception.ProcessExecutionError:
            raise exception.GlusterfsException(
                _("gluster %(cmd)s failed on %(vol)s") % {
                    'cmd': ' '.join(args),
                    'vol': gmgr.qualified
                })
Ejemplo n.º 47
0
    def delete_share(self, context, share, share_server=None):
        """Delete a share on the GlusterFS volume.

        1 Manila share = 1 GlusterFS volume. Put the gluster
        volume back in the available list.
        """
        gmgr = self._share_manager(share)
        clone_of = gmgr.get_gluster_vol_option(USER_CLONED_FROM) or ''
        try:
            if UUID_RE.search(clone_of):
                # We take responsibility for the lifecycle
                # management of those volumes which were
                # created by us (as snapshot clones) ...
                args = ('volume', 'delete', gmgr.volume)
            else:
                # ... for volumes that come from the pool, we return
                # them to the pool (after some purification rituals)
                self._wipe_gluster_vol(gmgr)
                args = ('volume', 'set', gmgr.volume, USER_MANILA_SHARE,
                        'NONE')
            gmgr.gluster_call(*args)

            self._push_gluster_vol(gmgr.qualified)
        except exception.GlusterfsException:
            msg = (_LE("Error during delete_share request for "
                       "share %(share_id)s"), {
                           'share_id': share['id']
                       })
            LOG.error(msg)
            raise

        self.private_storage.delete(share['id'])
Ejemplo n.º 48
0
    def _change_file_mode(self, filepath):
        try:
            utils.execute('chmod', '666', filepath, run_as_root=True)

        except Exception as err:
            LOG.error(_LE('Bad response from change file: %s.') % err)
            raise err
Ejemplo n.º 49
0
    def _create_vserver_if_nonexistent(self, vserver_name, network_info):
        """Creates Vserver with given parameters if it doesn't exist."""

        if self._client.vserver_exists(vserver_name):
            msg = _('Vserver %s already exists.')
            raise exception.NetAppException(msg % vserver_name)

        LOG.debug('Vserver %s does not exist, creating.', vserver_name)
        self._client.create_vserver(
            vserver_name,
            self.configuration.netapp_root_volume_aggregate,
            self.configuration.netapp_root_volume,
            self._find_matching_aggregates())

        vserver_client = self._get_api_client(vserver=vserver_name)
        try:
            self._create_vserver_lifs(vserver_name,
                                      vserver_client,
                                      network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.error(_LE("Failed to create network interface(s)."))
                self._client.delete_vserver(vserver_name, vserver_client)

        vserver_client.enable_nfs()

        security_services = network_info.get('security_services')
        if security_services:
            self._client.setup_security_services(security_services,
                                                 vserver_client,
                                                 vserver_name)
Ejemplo n.º 50
0
    def do_setup(self, context):
        """Prepares the backend and appropriate NAS helpers."""
        if not self.configuration.glusterfs_target:
            raise exception.GlusterfsException(
                _('glusterfs_target configuration that specifies the GlusterFS'
                  ' volume to be mounted on the Manila host is not set.'))
        self.gluster_manager = self._glustermanager(
            self.configuration.glusterfs_target)
        self.gluster_manager.check_gluster_version(
            self.driver.GLUSTERFS_VERSION_MIN)
        self._check_mount_glusterfs()

        # enable quota options of a GlusteFS volume to allow
        # creation of shares of specific size
        args = ('volume', 'quota', self.gluster_manager.volume, 'enable')
        try:
            self.gluster_manager.gluster_call(*args)
        except exception.GlusterfsException:
            if (self.gluster_manager.
                    get_gluster_vol_option('features.quota')) != 'on':
                LOG.error(_LE("Error in tuning GlusterFS volume to enable "
                              "creation of shares of specific size."))
                raise

        self._ensure_gluster_vol_mounted()
Ejemplo n.º 51
0
    def create(self, local_share_info, remote_device_wwn, remote_fs_id):
        local_share_name = local_share_info.get('name')

        try:
            local_fs_id = self.helper.get_fsid_by_name(local_share_name)
            if not local_fs_id:
                msg = _("Local fs was not found by name %s.")
                LOG.error(msg, local_share_name)
                raise exception.ReplicationException(reason=msg %
                                                     local_share_name)

            remote_device = self.helper.get_remote_device_by_wwn(
                remote_device_wwn)
            pair_params = {
                "LOCALRESID": local_fs_id,
                "LOCALRESTYPE": constants.FILE_SYSTEM_TYPE,
                "REMOTEDEVICEID": remote_device.get('ID'),
                "REMOTEDEVICENAME": remote_device.get('NAME'),
                "REMOTERESID": remote_fs_id,
                "REPLICATIONMODEL": constants.REPLICA_ASYNC_MODEL,
                "RECOVERYPOLICY": '2',
                "SYNCHRONIZETYPE": '1',
                "SPEED": constants.REPLICA_SPEED_MEDIUM,
            }

            pair_info = self.helper.create_replication_pair(pair_params)
        except Exception:
            msg = _LE("Failed to create replication pair for share %s.")
            LOG.exception(msg, local_share_name)
            raise

        self._sync_replication_pair(pair_info['ID'])

        return pair_info['ID']
Ejemplo n.º 52
0
    def delete_share(self, context, share, share_server=None):
        """Delete a share on the GlusterFS volume.

        1 Manila share = 1 GlusterFS volume. Put the gluster
        volume back in the available list.
        """
        exp_locn = share.get("export_location", None)
        try:
            # Get the gluster address associated with the export.
            gmgr = self.gluster_used_vols_dict[exp_locn]
        except KeyError:
            msg = (
                _LW("Invalid request. Ignoring delete_share request for " "share %(share_id)s"),
                {"share_id": share["id"]},
            )
            LOG.warn(msg)
            return

        try:
            self._wipe_gluster_vol(gmgr)
            self._push_gluster_vol(exp_locn)
        except exception.GlusterfsException:
            msg = (_LE("Error during delete_share request for " "share %(share_id)s"), {"share_id": share["id"]})
            LOG.error(msg)
            raise
Ejemplo n.º 53
0
    def _get_gluster_vol_option(self, option):
        try:
            args, kw = self.gluster_address.make_gluster_args(
                '--xml',
                'volume',
                'info',
                self.gluster_address.volume
            )
            out, err = self._execute(*args, **kw)
        except exception.ProcessExecutionError as exc:
            LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr)
            raise exception.GlusterfsException(exc)

        if not out:
            raise exception.GlusterfsException(
                'Empty answer from gluster command'
            )

        vix = etree.fromstring(out)
        if int(vix.find('./volInfo/volumes/count').text) != 1:
            raise exception.InvalidShare('Volume name ambiguity')
        for e in vix.findall(".//option"):
            o, v = (e.find(a).text for a in ('name', 'value'))
            if o == option:
                return v
Ejemplo n.º 54
0
def upgrade():
    try:
        op.create_table(
            drivers_private_data_table_name,
            sql.Column('created_at', sql.DateTime),
            sql.Column('updated_at', sql.DateTime),
            sql.Column('deleted_at', sql.DateTime),
            sql.Column('deleted', sql.Integer, default=0),
            sql.Column('host',
                       sql.String(255),
                       nullable=False,
                       primary_key=True),
            sql.Column('entity_uuid',
                       sql.String(36),
                       nullable=False,
                       primary_key=True),
            sql.Column('key',
                       sql.String(255),
                       nullable=False,
                       primary_key=True),
            sql.Column('value', sql.String(1023), nullable=False),
            mysql_engine='InnoDB',
        )
    except Exception:
        LOG.error(_LE("Table |%s| not created!"),
                  drivers_private_data_table_name)
        raise
Ejemplo n.º 55
0
 def run_vsctl(self, args):
     full_args = ["ovs-vsctl", "--timeout=2"] + args
     try:
         return utils.execute(*full_args, run_as_root=True)
     except Exception:
         LOG.exception(_LE("Unable to execute %(cmd)s."),
                       {'cmd': full_args})
Ejemplo n.º 56
0
    def __exit__(self, ex_type, ex_value, ex_traceback):
        if not ex_value:
            return True

        if isinstance(ex_value, exception.NotAuthorized):
            msg = six.text_type(ex_value)
            raise Fault(webob.exc.HTTPForbidden(explanation=msg))
        elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
            raise
        elif isinstance(ex_value, exception.Invalid):
            raise Fault(
                exception.ConvertedException(
                    code=ex_value.code, explanation=six.text_type(ex_value)))
        elif isinstance(ex_value, TypeError):
            exc_info = (ex_type, ex_value, ex_traceback)
            LOG.error(_LE('Exception handling resource: %s'),
                      ex_value,
                      exc_info=exc_info)
            raise Fault(webob.exc.HTTPBadRequest())
        elif isinstance(ex_value, Fault):
            LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value))
            raise ex_value
        elif isinstance(ex_value, webob.exc.HTTPException):
            LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value))
            raise Fault(ex_value)

        # We didn't handle the exception
        return False
Ejemplo n.º 57
0
    def login(self):
        """Login huawei array."""
        login_info = self._get_login_info()
        urlstr = login_info['RestURL']
        url_list = urlstr.split(";")
        deviceid = None
        for item_url in url_list:
            url = item_url.strip('').strip('\n') + "xx/sessions"
            data = jsonutils.dumps({
                "username": login_info['UserName'],
                "password": login_info['UserPassword'],
                "scope": "0"
            })
            self.init_http_head()
            result = self.do_call(url,
                                  data,
                                  calltimeout=constants.LOGIN_SOCKET_TIMEOUT)

            if ((result['error']['code'] != 0) or ("data" not in result)
                    or (result['data']['deviceid'] is None)):
                LOG.error(_LE("Login to %s failed, try another."), item_url)
                continue

            LOG.debug('Login success: %(url)s\n', {'url': item_url})
            deviceid = result['data']['deviceid']
            self.url = item_url + deviceid
            self.headers['iBaseToken'] = result['data']['iBaseToken']
            break

        if deviceid is None:
            err_msg = _("All url login fail.")
            LOG.error(err_msg)
            raise exception.InvalidShare(reason=err_msg)

        return deviceid
Ejemplo n.º 58
0
    def extend_share(self, context, share_id, new_size, reservations):
        context = context.elevated()
        share = self.db.share_get(context, share_id)
        share_server = self._get_share_server(context, share)
        project_id = share['project_id']

        try:
            self.driver.extend_share(
                share, new_size, share_server=share_server)
        except Exception as e:
            LOG.exception(_LE("Extend share failed."), resource=share)

            try:
                self.db.share_update(
                    context, share['id'],
                    {'status': constants.STATUS_EXTENDING_ERROR}
                )
                raise exception.ShareExtendingError(
                    reason=six.text_type(e), share_id=share_id)
            finally:
                QUOTAS.rollback(context, reservations, project_id=project_id)

        QUOTAS.commit(context, reservations, project_id=project_id)

        share_update = {
            'size': int(new_size),
            # NOTE(u_glide): translation to lower case should be removed in
            # a row with usage of upper case of share statuses in all places
            'status': constants.STATUS_AVAILABLE.lower()
        }
        share = self.db.share_update(context, share['id'], share_update)

        LOG.info(_LI("Extend share completed successfully."), resource=share)
Ejemplo n.º 59
0
Archivo: fault.py Proyecto: vkmc/manila
    def _error(self, inner, req):
        LOG.exception(_LE("Caught error: %s"), inner)

        safe = getattr(inner, 'safe', False)
        headers = getattr(inner, 'headers', None)
        status = getattr(inner, 'code', 500)
        if status is None:
            status = 500

        msg_dict = dict(url=req.url, status=status)
        LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
        outer = self.status_to_type(status)
        if headers:
            outer.headers = headers
        # NOTE(johannes): We leave the explanation empty here on
        # purpose. It could possibly have sensitive information
        # that should not be returned back to the user. See
        # bugs 868360 and 874472
        # NOTE(eglynn): However, it would be over-conservative and
        # inconsistent with the EC2 API to hide every exception,
        # including those that are safe to expose, see bug 1021373
        if safe:
            outer.explanation = '%s: %s' % (inner.__class__.__name__,
                                            six.text_type(inner))
        return wsgi.Fault(outer)