Example #1
0
    def __exit__(self, ex_type, ex_value, ex_traceback):
        if not ex_value:
            return True

        if isinstance(ex_value, exception.NotAuthorized):
            msg = six.text_type(ex_value)
            raise Fault(webob.exc.HTTPForbidden(explanation=msg))
        elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
            raise
        elif isinstance(ex_value, exception.Invalid):
            raise Fault(
                exception.ConvertedException(
                    code=ex_value.code, explanation=six.text_type(ex_value)))
        elif isinstance(ex_value, TypeError):
            exc_info = (ex_type, ex_value, ex_traceback)
            LOG.error(_LE('Exception handling resource: %s'),
                      ex_value,
                      exc_info=exc_info)
            raise Fault(webob.exc.HTTPBadRequest())
        elif isinstance(ex_value, Fault):
            LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value))
            raise ex_value
        elif isinstance(ex_value, webob.exc.HTTPException):
            LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value))
            raise Fault(ex_value)

        # We didn't handle the exception
        return False
Example #2
0
    def volume_client(self):
        if self._volume_client:
            return self._volume_client

        if not ceph_module_found:
            raise exception.ManilaException(
                _("Ceph client libraries not found.")
            )

        conf_path = self.configuration.safe_get('cephfs_conf_path')
        cluster_name = self.configuration.safe_get('cephfs_cluster_name')
        auth_id = self.configuration.safe_get('cephfs_auth_id')
        self._volume_client = ceph_volume_client.CephFSVolumeClient(
            auth_id, conf_path, cluster_name)
        LOG.info(_LI("[%(be)s}] Ceph client found, connecting..."),
                 {"be": self.backend_name})
        if auth_id != CEPH_DEFAULT_AUTH_ID:
            # Evict any other manila sessions.  Only do this if we're
            # using a client ID that isn't the default admin ID, to avoid
            # rudely disrupting anyone else.
            premount_evict = auth_id
        else:
            premount_evict = None
        try:
            self._volume_client.connect(premount_evict=premount_evict)
        except Exception:
            self._volume_client = None
            raise
        else:
            LOG.info(_LI("[%(be)s] Ceph client connection complete."),
                     {"be": self.backend_name})

        return self._volume_client
Example #3
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""
        while self.running:
            wrap = self._wait_child()
            if not wrap:
                # Yield to other threads if no children have exited
                # Sleep for a short time to avoid excessive CPU usage
                # (see bug #1095346)
                eventlet.greenthread.sleep(.01)
                continue

            LOG.info(_LI('wait wrap.failed %s'), wrap.failed)
            while (self.running and len(wrap.children) < wrap.workers
                   and not wrap.failed):
                self._start_child(wrap)

        if self.sigcaught:
            signame = {signal.SIGTERM: 'SIGTERM',
                       signal.SIGINT: 'SIGINT'}[self.sigcaught]
            LOG.info(_LI('Caught %s, stopping children'), signame)

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Example #4
0
    def volume_client(self):
        if self._volume_client:
            return self._volume_client

        if not ceph_module_found:
            raise exception.ManilaException(_("Ceph client libraries not found."))

        conf_path = self.configuration.safe_get("cephfs_conf_path")
        cluster_name = self.configuration.safe_get("cephfs_cluster_name")
        auth_id = self.configuration.safe_get("cephfs_auth_id")
        self._volume_client = ceph_volume_client.CephFSVolumeClient(auth_id, conf_path, cluster_name)
        LOG.info(_LI("[%(be)s}] Ceph client found, connecting..."), {"be": self.backend_name})
        if auth_id != CEPH_DEFAULT_AUTH_ID:
            # Evict any other manila sessions.  Only do this if we're
            # using a client ID that isn't the default admin ID, to avoid
            # rudely disrupting anyone else.
            premount_evict = auth_id
        else:
            premount_evict = None
        try:
            self._volume_client.connect(premount_evict=premount_evict)
        except Exception:
            self._volume_client = None
            raise
        else:
            LOG.info(_LI("[%(be)s] Ceph client connection complete."), {"be": self.backend_name})

        return self._volume_client
Example #5
0
    def _update_access_fallback(self, context, add_rules, delete_rules,
                                remove_rules, share_instance, share_server):
        for rule in add_rules:
            LOG.info(
                _LI("Applying access rule '%(rule)s' for share "
                    "instance '%(instance)s'"), {
                        'rule': rule['id'],
                        'instance': share_instance['id']
                    })

            self.driver.allow_access(context,
                                     share_instance,
                                     rule,
                                     share_server=share_server)

        # NOTE(ganso): Fallback mode temporary compatibility workaround
        if remove_rules:
            delete_rules.extend(remove_rules)

        for rule in delete_rules:
            LOG.info(
                _LI("Denying access rule '%(rule)s' from share "
                    "instance '%(instance)s'"), {
                        'rule': rule['id'],
                        'instance': share_instance['id']
                    })

            self.driver.deny_access(context,
                                    share_instance,
                                    rule,
                                    share_server=share_server)
Example #6
0
    def _wait_child(self):
        try:
            # Don't block if no child processes have exited
            pid, status = os.waitpid(0, os.WNOHANG)
            if not pid:
                return None
        except OSError as exc:
            if exc.errno not in (errno.EINTR, errno.ECHILD):
                raise
            return None

        code = 0
        if os.WIFSIGNALED(status):
            sig = os.WTERMSIG(status)
            LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
                     {'pid': pid, 'sig': sig})
        else:
            code = os.WEXITSTATUS(status)
            LOG.info(_LI('Child %(pid)d exited with status %(code)d'),
                     {'pid': pid, 'code': code})

        if pid not in self.children:
            LOG.warning(_LW('pid %d not in child list'), pid)
            return None

        wrap = self.children.pop(pid)
        wrap.children.remove(pid)
        if 2 == code:
            wrap.failed = True
            self.failedwrap = self.failedwrap + 1
            LOG.info(_LI('_wait_child %d'), self.failedwrap)
            if self.failedwrap == self.totalwrap:
                self.running = False
        return wrap
Example #7
0
        def _provide_share_server_for_share():
            exist = False
            try:
                share_server = \
                    self.db.share_server_get_by_host_and_share_net_valid(
                        context, self.host, share_network_id)
                exist = True
            except exception.ShareServerNotFound:
                share_server = self.db.share_server_create(
                    context,
                    {
                        'host': self.host,
                        'share_network_id': share_network_id,
                        'status': constants.STATUS_CREATING
                    }
                )

            LOG.debug("Using share_server %s for share %s" % (
                share_server['id'], share_id))
            share_ref = self.db.share_update(
                context,
                share_id,
                {'share_server_id': share_server['id']},
            )

            if not exist:
                # Create share server on backend with data from db
                share_server = self._setup_server(context, share_server)
                LOG.info(_LI("Share server created successfully."))
            else:
                LOG.info(_LI("Used already existed share server "
                             "'%(share_server_id)s'"),
                         {'share_server_id': share_server['id']})
            return share_server, share_ref
Example #8
0
    def validate_port_configuration(self, port_ids_conf):
        """Initializes the SP and ports based on the port option."""

        ports = self.client.get_file_ports()

        sp_ports_map, unmanaged_port_ids = unity_utils.match_ports(
            ports, port_ids_conf)

        if not sp_ports_map:
            msg = (_("All the specified storage ports to be managed "
                     "do not exist. Please check your configuration "
                     "unity_ethernet_ports in manila.conf. "
                     "The available ports in the backend are %s.") %
                   ",".join([port.get_id() for port in ports]))
            raise exception.BadConfigurationException(reason=msg)

        if unmanaged_port_ids:
            LOG.info(
                _LI("The following specified ports are not managed by "
                    "the backend: %(unmanaged)s. This host will only "
                    "manage the storage ports: %(exist)s"), {
                        'unmanaged': ",".join(unmanaged_port_ids),
                        'exist': ",".join(map(",".join, sp_ports_map.values()))
                    })
        else:
            LOG.debug("Ports: %s will be managed.",
                      ",".join(map(",".join, sp_ports_map.values())))

        if len(sp_ports_map) == 1:
            LOG.info(
                _LI("Only ports of %s are configured. Configure ports "
                    "of both SPA and SPB to use both of the SPs."),
                list(sp_ports_map)[0])

        return sp_ports_map
Example #9
0
 def _update_info_from_dpkg(self):
     LOG.debug('Trying dpkg-query command.')
     try:
         _vendor = None
         out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
                                   self.PACKAGE_NAME)
         if not out:
             LOG.info(_LI(
                 'No dpkg-query info found for %(pkg)s package.') % {
                 'pkg': self.PACKAGE_NAME})
             return False
         # Debian format: [epoch:]upstream_version[-debian_revision]
         deb_version = out
         # In case epoch or revision is missing, copy entire string.
         _release = deb_version
         if ':' in deb_version:
             deb_epoch, upstream_version = deb_version.split(':')
             _release = upstream_version
         if '-' in deb_version:
             deb_revision = deb_version.split('-')[1]
             _vendor = deb_revision
         self._release = _release
         if _vendor:
             self._vendor = _vendor
         return True
     except Exception as e:
         LOG.info(_LI('Could not run dpkg-query command: %(msg)s.') % {
             'msg': e})
         return False
Example #10
0
    def __exit__(self, ex_type, ex_value, ex_traceback):
        if not ex_value:
            return True

        if isinstance(ex_value, exception.NotAuthorized):
            msg = six.text_type(ex_value)
            raise Fault(webob.exc.HTTPForbidden(explanation=msg))
        elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod):
            raise
        elif isinstance(ex_value, exception.Invalid):
            raise Fault(exception.ConvertedException(
                code=ex_value.code, explanation=six.text_type(ex_value)))
        elif isinstance(ex_value, TypeError):
            exc_info = (ex_type, ex_value, ex_traceback)
            LOG.error(_LE('Exception handling resource: %s'),
                      ex_value, exc_info=exc_info)
            raise Fault(webob.exc.HTTPBadRequest())
        elif isinstance(ex_value, Fault):
            LOG.info(_LI("Fault thrown: %s"), six.text_type(ex_value))
            raise ex_value
        elif isinstance(ex_value, webob.exc.HTTPException):
            LOG.info(_LI("HTTP exception thrown: %s"), six.text_type(ex_value))
            raise Fault(ex_value)

        # We didn't handle the exception
        return False
Example #11
0
        def _provide_share_server_for_share():
            exist = False
            try:
                share_server = \
                    self.db.share_server_get_by_host_and_share_net_valid(
                        context, self.host, share_network_id)
                exist = True
            except exception.ShareServerNotFound:
                share_server = self.db.share_server_create(
                    context,
                    {
                        'host': self.host,
                        'share_network_id': share_network_id,
                        'status': constants.STATUS_CREATING
                    }
                )

            LOG.debug("Using share_server %s for share %s" % (
                share_server['id'], share_id))
            share_ref = self.db.share_update(
                context,
                share_id,
                {'share_server_id': share_server['id']},
            )

            if not exist:
                # Create share server on backend with data from db
                share_server = self._setup_server(context, share_server)
                LOG.info(_LI("Share server created successfully."))
            else:
                LOG.info(_LI("Used already existed share server "
                             "'%(share_server_id)s'"),
                         {'share_server_id': share_server['id']})
            return share_server, share_ref
Example #12
0
    def wait(self):
        """Loop waiting on children to die and respawning as necessary."""
        while self.running:
            wrap = self._wait_child()
            if not wrap:
                # Yield to other threads if no children have exited
                # Sleep for a short time to avoid excessive CPU usage
                # (see bug #1095346)
                eventlet.greenthread.sleep(.01)
                continue

            LOG.info(_LI('wait wrap.failed %s'), wrap.failed)
            while (self.running and len(wrap.children) < wrap.workers
                   and not wrap.failed):
                self._start_child(wrap)

        if self.sigcaught:
            signame = {
                signal.SIGTERM: 'SIGTERM',
                signal.SIGINT: 'SIGINT'
            }[self.sigcaught]
            LOG.info(_LI('Caught %s, stopping children'), signame)

        for pid in self.children:
            try:
                os.kill(pid, signal.SIGTERM)
            except OSError as exc:
                if exc.errno != errno.ESRCH:
                    raise

        # Wait for children to die
        if self.children:
            LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
            while self.children:
                self._wait_child()
Example #13
0
    def migration_start(self, context, ignore_list, share_id,
                        share_instance_id, dest_share_instance_id,
                        migration_info_src, migration_info_dest, notify):

        LOG.info(_LI(
            "Received request to migrate share content from share instance "
            "%(instance_id)s to instance %(dest_instance_id)s."),
            {'instance_id': share_instance_id,
             'dest_instance_id': dest_share_instance_id})

        share_ref = self.db.share_get(context, share_id)

        share_rpcapi = share_rpc.ShareAPI()

        mount_path = CONF.migration_tmp_location

        try:
            copy = data_utils.Copy(
                os.path.join(mount_path, share_instance_id),
                os.path.join(mount_path, dest_share_instance_id),
                ignore_list)

            self._copy_share_data(
                context, copy, share_ref, share_instance_id,
                dest_share_instance_id, migration_info_src,
                migration_info_dest)
        except exception.ShareDataCopyCancelled:
            share_rpcapi.migration_complete(
                context, share_ref, share_instance_id, dest_share_instance_id)
            return
        except Exception:
            self.db.share_update(
                context, share_id,
                {'task_state': constants.TASK_STATE_DATA_COPYING_ERROR})
            msg = _("Failed to copy contents from instance %(src)s to "
                    "instance %(dest)s.") % {'src': share_instance_id,
                                             'dest': dest_share_instance_id}
            LOG.exception(msg)
            share_rpcapi.migration_complete(
                context, share_ref, share_instance_id, dest_share_instance_id)
            raise exception.ShareDataCopyFailed(reason=msg)
        finally:
            self.busy_tasks_shares.pop(share_id, None)

        LOG.info(_LI(
            "Completed copy operation of migrating share content from share "
            "instance %(instance_id)s to instance %(dest_instance_id)s."),
            {'instance_id': share_instance_id,
             'dest_instance_id': dest_share_instance_id})

        if notify:
            LOG.info(_LI(
                "Notifying source backend that migrating share content from"
                " share instance %(instance_id)s to instance "
                "%(dest_instance_id)s completed."),
                {'instance_id': share_instance_id,
                 'dest_instance_id': dest_share_instance_id})

            share_rpcapi.migration_complete(
                context, share_ref, share_instance_id, dest_share_instance_id)
Example #14
0
    def _create_driver(storage_family, driver_mode, *args, **kwargs):
        """"Creates an appropriate driver based on family and mode."""

        storage_family = storage_family.lower()

        fmt = {'storage_family': storage_family, 'driver_mode': driver_mode}
        LOG.info(
            _LI('Requested unified config: %(storage_family)s and '
                '%(driver_mode)s.') % fmt)

        family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
        if family_meta is None:
            raise exception.InvalidInput(
                reason=_('Storage family %s is not supported.') %
                storage_family)

        driver_loc = family_meta.get(driver_mode)
        if driver_loc is None:
            raise exception.InvalidInput(
                reason=_('Driver mode %(driver_mode)s is not supported '
                         'for storage family %(storage_family)s.') % fmt)

        kwargs['netapp_mode'] = 'proxy'
        driver = importutils.import_object(driver_loc, *args, **kwargs)
        LOG.info(
            _LI('NetApp driver of family %(storage_family)s and mode '
                '%(driver_mode)s loaded.') % fmt)
        return driver
Example #15
0
 def _update_info_from_dpkg(self):
     LOG.debug('Trying dpkg-query command.')
     try:
         _vendor = None
         out, err = putils.execute("dpkg-query", "-W", "-f='${Version}'",
                                   self.PACKAGE_NAME)
         if not out:
             LOG.info(
                 _LI('No dpkg-query info found for %(pkg)s package.') %
                 {'pkg': self.PACKAGE_NAME})
             return False
         # Debian format: [epoch:]upstream_version[-debian_revision]
         deb_version = out
         # In case epoch or revision is missing, copy entire string.
         _release = deb_version
         if ':' in deb_version:
             deb_epoch, upstream_version = deb_version.split(':')
             _release = upstream_version
         if '-' in deb_version:
             deb_revision = deb_version.split('-')[1]
             _vendor = deb_revision
         self._release = _release
         if _vendor:
             self._vendor = _vendor
         return True
     except Exception as e:
         LOG.info(
             _LI('Could not run dpkg-query command: %(msg)s.') % {'msg': e})
         return False
Example #16
0
    def _update_access_fallback(self, context, add_rules, delete_rules,
                                remove_rules, share_instance, share_server):
        for rule in add_rules:
            LOG.info(
                _LI("Applying access rule '%(rule)s' for share "
                    "instance '%(instance)s'"),
                {'rule': rule['id'], 'instance': share_instance['id']}
            )

            self.driver.allow_access(
                context,
                share_instance,
                rule,
                share_server=share_server
            )

        # NOTE(ganso): Fallback mode temporary compatibility workaround
        if remove_rules:
            delete_rules.extend(remove_rules)

        for rule in delete_rules:
            LOG.info(
                _LI("Denying access rule '%(rule)s' from share "
                    "instance '%(instance)s'"),
                {'rule': rule['id'], 'instance': share_instance['id']}
            )

            self.driver.deny_access(
                context,
                share_instance,
                rule,
                share_server=share_server
            )
Example #17
0
    def validate_port_configuration(self, port_ids_conf):
        """Initializes the SP and ports based on the port option."""

        ports = self.client.get_file_ports()

        sp_ports_map, unmanaged_port_ids = unity_utils.match_ports(
            ports, port_ids_conf)

        if not sp_ports_map:
            msg = (_("All the specified storage ports to be managed "
                     "do not exist. Please check your configuration "
                     "unity_ethernet_ports in manila.conf. "
                     "The available ports in the backend are %s.") %
                   ",".join([port.get_id() for port in ports]))
            raise exception.BadConfigurationException(reason=msg)

        if unmanaged_port_ids:
            LOG.info(_LI("The following specified ports are not managed by "
                         "the backend: %(unmanaged)s. This host will only "
                         "manage the storage ports: %(exist)s"),
                     {'unmanaged': ",".join(unmanaged_port_ids),
                      'exist': ",".join(map(",".join,
                                            sp_ports_map.values()))})
        else:
            LOG.debug("Ports: %s will be managed.",
                      ",".join(map(",".join, sp_ports_map.values())))

        if len(sp_ports_map) == 1:
            LOG.info(_LI("Only ports of %s are configured. Configure ports "
                         "of both SPA and SPB to use both of the SPs."),
                     list(sp_ports_map)[0])

        return sp_ports_map
Example #18
0
    def _create_driver(storage_family, driver_mode, *args, **kwargs):
        """"Creates an appropriate driver based on family and mode."""

        storage_family = storage_family.lower()

        fmt = {'storage_family': storage_family, 'driver_mode': driver_mode}
        LOG.info(_LI('Requested unified config: %(storage_family)s and '
                     '%(driver_mode)s.') % fmt)

        family_meta = NETAPP_UNIFIED_DRIVER_REGISTRY.get(storage_family)
        if family_meta is None:
            raise exception.InvalidInput(
                reason=_('Storage family %s is not supported.')
                % storage_family)

        driver_loc = family_meta.get(driver_mode)
        if driver_loc is None:
            raise exception.InvalidInput(
                reason=_('Driver mode %(driver_mode)s is not supported '
                         'for storage family %(storage_family)s.') % fmt)

        kwargs['netapp_mode'] = 'proxy'
        driver = importutils.import_object(driver_loc, *args, **kwargs)
        LOG.info(_LI('NetApp driver of family %(storage_family)s and mode '
                     '%(driver_mode)s loaded.') % fmt)
        return driver
Example #19
0
def downgrade():
    LOG.info(_LI("Creating volume_type_extra_specs table"))
    vt_es = op.create_table(
        'volume_type_extra_specs',
        sa.Column('created_at', sa.DateTime),
        sa.Column('updated_at', sa.DateTime),
        sa.Column('deleted_at', sa.DateTime),
        sa.Column('deleted', sa.Boolean),
        sa.Column('id', sa.Integer, primary_key=True, nullable=False),
        sa.Column('volume_type_id', sa.String(length=36),
                  sa.ForeignKey('share_types.id'), nullable=False),
        sa.Column('key', sa.String(length=255)),
        sa.Column('value', sa.String(length=255)),
        mysql_engine='InnoDB')

    LOG.info(_LI("Migrating share_type_extra_specs to "
             "volume_type_extra_specs"))
    _copy_records(destination_table=vt_es, up_migration=False)

    LOG.info(_LI("Dropping share_type_extra_specs table"))
    op.drop_table("share_type_extra_specs")

    LOG.info(_LI("Renaming share_types table to volume_types"))
    op.drop_constraint('st_name_uc', 'share_types', type_='unique')
    op.create_unique_constraint('vt_name_uc', 'share_types',
                                ['name', 'deleted'])
    op.rename_table("share_types", "volume_types")

    LOG.info(_LI("Renaming column name shares.share_type_id to "
             "shares.volume_type.id"))
    op.alter_column("shares", "share_type_id",
                    new_column_name="volume_type_id",
                    type_=sa.String(length=36))
def downgrade():
    connection = op.get_bind()
    from_table = utils.load_table(TABLE_NAME, connection)
    migration_table_name = "_migrating_%(table)s_%(session)s" % {
        'table': TABLE_NAME,
        'session': uuidutils.generate_uuid()[:8]
    }

    LOG.info(_LI("Creating the migration table %(table)s"), {
        'table': migration_table_name
    })
    migration_table = op.create_table(
        migration_table_name,
        sql.Column('created_at', sql.DateTime),
        sql.Column('updated_at', sql.DateTime),
        sql.Column('deleted_at', sql.DateTime),
        sql.Column('deleted', sql.Integer, default=0),
        sql.Column('host', sql.String(255),
                   nullable=False, primary_key=True),
        sql.Column('entity_uuid', sql.String(36),
                   nullable=False, primary_key=True),
        sql.Column('key', sql.String(255),
                   nullable=False, primary_key=True),
        sql.Column('value', sql.String(1023), nullable=False),
        mysql_engine='InnoDB',
    )

    LOG.info(_LI("Copying data from %(from_table)s to the migration "
                 "table %(migration_table)s") % {
        'from_table': TABLE_NAME,
        'migration_table': migration_table_name
    })
    rows = []
    for row in op.get_bind().execute(from_table.select()):
        rows.append({
            'created_at': row.created_at,
            'updated_at': row.updated_at,
            'deleted_at': row.deleted_at,
            'deleted': row.deleted,
            'host': DEFAULT_HOST,
            'entity_uuid': row.entity_uuid,
            'key': row.key,
            'value': row.value
        })
    op.bulk_insert(migration_table, rows)

    LOG.info(_LI("Dropping table %(from_table)s") % {
        'from_table': TABLE_NAME
    })
    op.drop_table(TABLE_NAME)

    LOG.info(_LI("Rename the migration table %(migration_table)s to "
                 "the original table %(from_table)s") % {
        'migration_table': migration_table_name,
        'from_table': TABLE_NAME
    })
    op.rename_table(migration_table_name, TABLE_NAME)
def downgrade():
    connection = op.get_bind()
    from_table = utils.load_table(TABLE_NAME, connection)
    migration_table_name = "_migrating_%(table)s_%(session)s" % {
        'table': TABLE_NAME,
        'session': uuidutils.generate_uuid()[:8]
    }

    LOG.info(_LI("Creating the migration table %(table)s"),
             {'table': migration_table_name})
    migration_table = op.create_table(
        migration_table_name,
        sql.Column('created_at', sql.DateTime),
        sql.Column('updated_at', sql.DateTime),
        sql.Column('deleted_at', sql.DateTime),
        sql.Column('deleted', sql.Integer, default=0),
        sql.Column('host', sql.String(255), nullable=False, primary_key=True),
        sql.Column('entity_uuid',
                   sql.String(36),
                   nullable=False,
                   primary_key=True),
        sql.Column('key', sql.String(255), nullable=False, primary_key=True),
        sql.Column('value', sql.String(1023), nullable=False),
        mysql_engine='InnoDB',
    )

    LOG.info(
        _LI("Copying data from %(from_table)s to the migration "
            "table %(migration_table)s") % {
                'from_table': TABLE_NAME,
                'migration_table': migration_table_name
            })
    rows = []
    for row in op.get_bind().execute(from_table.select()):
        rows.append({
            'created_at': row.created_at,
            'updated_at': row.updated_at,
            'deleted_at': row.deleted_at,
            'deleted': row.deleted,
            'host': DEFAULT_HOST,
            'entity_uuid': row.entity_uuid,
            'key': row.key,
            'value': row.value
        })
    op.bulk_insert(migration_table, rows)

    LOG.info(_LI("Dropping table %(from_table)s") % {'from_table': TABLE_NAME})
    op.drop_table(TABLE_NAME)

    LOG.info(
        _LI("Rename the migration table %(migration_table)s to "
            "the original table %(from_table)s") % {
                'migration_table': migration_table_name,
                'from_table': TABLE_NAME
            })
    op.rename_table(migration_table_name, TABLE_NAME)
Example #22
0
    def manage_existing(self, share, driver_options):
        """Manages a share that exists on backend.

        :param share: Share that will be managed.
        :param driver_options: Empty dict or dict with 'volume_id' option.
        :returns: Returns a dict with size of share managed
            and its location (your path in file-system).
        """
        hnas_share_id = self._get_hnas_share_id(share['id'])

        # Make sure returned value is the same as provided,
        # confirming it does not exist.
        if hnas_share_id != share['id']:
            msg = _("Share ID %s already exists, cannot manage.") % share['id']
            raise exception.HNASBackendException(msg=msg)

        LOG.info(_LI("Share %(shr_path)s will be managed with ID %(shr_id)s."),
                 {'shr_path': share['export_locations'][0]['path'],
                  'shr_id': share['id']})

        old_path_info = share['export_locations'][0]['path'].split(':')
        old_path = old_path_info[1].split('/')

        if len(old_path) == 3:
            evs_ip = old_path_info[0]
            hnas_share_id = old_path[2]
        else:
            msg = _("Incorrect path. It should have the following format: "
                    "IP:/shares/share_id.")
            raise exception.ShareBackendException(msg=msg)

        if evs_ip != self.hnas_evs_ip:
            msg = _("The EVS IP %(evs)s is not "
                    "configured.") % {'evs': evs_ip}
            raise exception.ShareBackendException(msg=msg)

        if self.backend_name not in share['host']:
            msg = _("The backend passed in the host parameter (%(shr)s) is "
                    "not configured.") % {'shr': share['host']}
            raise exception.ShareBackendException(msg=msg)

        output = self._manage_existing(share['id'], hnas_share_id)
        self.private_storage.update(
            share['id'], {'hnas_id': hnas_share_id})

        LOG.debug("HNAS ID %(hnas_id)s has been saved to private storage for "
                  "Share ID %(share_id)s", {'hnas_id': hnas_share_id,
                                            'share_id': share['id']})

        LOG.info(_LI("Share %(shr_path)s was successfully managed with ID "
                     "%(shr_id)s."),
                 {'shr_path': share['export_locations'][0]['path'],
                  'shr_id': share['id']})

        return output
def downgrade():
    LOG.info(_LI("Changing back user_id length for share_networks"))
    op.alter_column("share_networks", "user_id", type_=sa.String(length=36))

    LOG.info(_LI("Changing back project_id length for share_networks"))
    op.alter_column("share_networks", "project_id", type_=sa.String(length=36))

    LOG.info(_LI("Changing back project_id length for security_services"))
    op.alter_column("security_services",
                    "project_id",
                    type_=sa.String(length=36))
Example #24
0
        def _provide_share_server_for_share():
            try:
                available_share_servers = get_available_share_servers()
            except exception.ShareServerNotFound:
                available_share_servers = None

            compatible_share_server = None

            if available_share_servers:
                try:
                    compatible_share_server = (
                        self.driver.choose_share_server_compatible_with_share(
                            context, available_share_servers, share,
                            snapshot=snapshot
                        )
                    )
                except Exception as e:
                    with excutils.save_and_reraise_exception():
                        error(_LE("Cannot choose compatible share-server: %s"),
                              e)

            if not compatible_share_server:
                compatible_share_server = self.db.share_server_create(
                    context,
                    {
                        'host': self.host,
                        'share_network_id': share_network_id,
                        'status': constants.STATUS_CREATING
                    }
                )

            msg = "Using share_server %(share_server)s for share %(share_id)s"
            LOG.debug(msg, {
                'share_server': compatible_share_server['id'],
                'share_id': share['id']
            })

            share_ref = self.db.share_update(
                context,
                share['id'],
                {'share_server_id': compatible_share_server['id']},
            )

            if compatible_share_server['status'] == constants.STATUS_CREATING:
                # Create share server on backend with data from db.
                compatible_share_server = self._setup_server(
                    context, compatible_share_server)
                LOG.info(_LI("Share server created successfully."))
            else:
                LOG.info(_LI("Used preexisting share server "
                             "'%(share_server_id)s'"),
                         {'share_server_id': compatible_share_server['id']})
            return compatible_share_server, share_ref
def downgrade():
    LOG.info(_LI("Changing back user_id length for share_networks"))
    op.alter_column("share_networks", "user_id",
                    type_=sa.String(length=36))

    LOG.info(_LI("Changing back project_id length for share_networks"))
    op.alter_column("share_networks", "project_id",
                    type_=sa.String(length=36))

    LOG.info(_LI("Changing back project_id length for security_services"))
    op.alter_column("security_services", "project_id",
                    type_=sa.String(length=36))
Example #26
0
    def update_access(self, share, share_name, rules):
        """Replaces the list of access rules known to the backend storage."""

        # Ensure rules are valid
        for rule in rules:
            self._validate_access_rule(rule)

        # Sort rules by ascending network size
        new_rules = {rule['access_to']: rule['access_level'] for rule in rules}
        addresses = self._get_sorted_access_rule_addresses(new_rules)

        # Ensure current export policy has the name we expect
        self._ensure_export_policy(share, share_name)
        export_policy_name = self._get_export_policy_name(share)

        # Make temp policy names so this non-atomic workflow remains resilient
        # across process interruptions.
        temp_new_export_policy_name = self._get_temp_export_policy_name()
        temp_old_export_policy_name = self._get_temp_export_policy_name()

        # Create new export policy
        self._client.create_nfs_export_policy(temp_new_export_policy_name)

        # Add new rules to new policy
        for address in addresses:
            self._client.add_nfs_export_rule(
                temp_new_export_policy_name, address,
                self._is_readonly(new_rules[address]))

        # Rename policy currently in force
        LOG.info(_LI('Renaming NFS export policy for share %(share)s to '
                     '%(policy)s.') %
                 {'share': share_name, 'policy': temp_old_export_policy_name})
        self._client.rename_nfs_export_policy(export_policy_name,
                                              temp_old_export_policy_name)

        # Switch share to the new policy
        LOG.info(_LI('Setting NFS export policy for share %(share)s to '
                     '%(policy)s.') %
                 {'share': share_name, 'policy': temp_new_export_policy_name})
        self._client.set_nfs_export_policy_for_volume(
            share_name, temp_new_export_policy_name)

        # Delete old policy
        self._client.soft_delete_nfs_export_policy(temp_old_export_policy_name)

        # Rename new policy to its final name
        LOG.info(_LI('Renaming NFS export policy for share %(share)s to '
                     '%(policy)s.') %
                 {'share': share_name, 'policy': export_policy_name})
        self._client.rename_nfs_export_policy(temp_new_export_policy_name,
                                              export_policy_name)
Example #27
0
    def do_setup(self, context):
        """Setup the GlusterFS volumes."""
        super(GlusterfsNativeShareDriver, self).do_setup(context)

        # We don't use a service mount as its not necessary for us.
        # Do some sanity checks.
        if len(self.configuration.glusterfs_targets) == 0:
            # No volumes specified in the config file. Raise exception.
            msg = (_("glusterfs_targets list seems to be empty! "
                     "Add one or more gluster volumes to work "
                     "with in the glusterfs_targets configuration "
                     "parameter."))
            LOG.error(msg)
            raise exception.GlusterfsException(msg)

        LOG.info(
            _LI("Number of gluster volumes read from config: "
                "%(numvols)s"),
            {'numvols': len(self.configuration.glusterfs_targets)})

        try:
            self._execute('mount.glusterfs', check_exit_code=False)
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                msg = (_("mount.glusterfs is not installed."))
                LOG.error(msg)
                raise exception.GlusterfsException(msg)
            else:
                msg = (_("Error running mount.glusterfs."))
                LOG.error(msg)
                raise

        # Update gluster_unused_vols_dict, gluster_used_vols_dict by walking
        # through the DB.
        self._update_gluster_vols_dict(context)
        if len(self.gluster_unused_vols_dict) == 0:
            # No volumes available for use as share. Warn user.
            msg = (_("No unused gluster volumes available for use as share! "
                     "Create share won't be supported unless existing shares "
                     "are deleted or add one or more gluster volumes to work "
                     "with in the glusterfs_targets configuration parameter."))
            LOG.warn(msg)
        else:
            LOG.info(
                _LI("Number of gluster volumes in use:  "
                    "%(inuse-numvols)s. Number of gluster volumes "
                    "available for use as share: %(unused-numvols)s"), {
                        'inuse-numvols': len(self.gluster_used_vols_dict),
                        'unused-numvols': len(self.gluster_unused_vols_dict)
                    })

        self._setup_gluster_vols()
Example #28
0
    def do_setup(self, context):
        """Setup the GlusterFS volumes."""
        super(GlusterfsNativeShareDriver, self).do_setup(context)

        # We don't use a service mount as its not necessary for us.
        # Do some sanity checks.
        if len(self.configuration.glusterfs_targets) == 0:
            # No volumes specified in the config file. Raise exception.
            msg = (_("glusterfs_targets list seems to be empty! "
                     "Add one or more gluster volumes to work "
                     "with in the glusterfs_targets configuration "
                     "parameter."))
            LOG.error(msg)
            raise exception.GlusterfsException(msg)

        LOG.info(_LI("Number of gluster volumes read from config: "
                     "%(numvols)s"),
                 {'numvols': len(self.configuration.glusterfs_targets)})

        try:
            self._execute('mount.glusterfs', check_exit_code=False)
        except OSError as exc:
            if exc.errno == errno.ENOENT:
                msg = (_("mount.glusterfs is not installed."))
                LOG.error(msg)
                raise exception.GlusterfsException(msg)
            else:
                msg = (_("Error running mount.glusterfs."))
                LOG.error(msg)
                raise

        # Update gluster_unused_vols_dict, gluster_used_vols_dict by walking
        # through the DB.
        self._update_gluster_vols_dict(context)
        if len(self.gluster_unused_vols_dict) == 0:
            # No volumes available for use as share. Warn user.
            msg = (_("No unused gluster volumes available for use as share! "
                     "Create share won't be supported unless existing shares "
                     "are deleted or add one or more gluster volumes to work "
                     "with in the glusterfs_targets configuration parameter."))
            LOG.warn(msg)
        else:
            LOG.info(_LI("Number of gluster volumes in use:  "
                         "%(inuse-numvols)s. Number of gluster volumes "
                         "available for use as share: %(unused-numvols)s"),
                     {'inuse-numvols': len(self.gluster_used_vols_dict),
                     'unused-numvols': len(self.gluster_unused_vols_dict)})

        self._setup_gluster_vols()
Example #29
0
    def unmanage(self, share):
        """Unmanages a share.

        :param share: Share that will be unmanaged.
        """
        self.private_storage.delete(share['id'])

        if len(share['export_locations']) == 0:
            LOG.info(_LI("The share with ID %(shr_id)s is no longer being "
                         "managed."), {'shr_id': share['id']})
        else:
            LOG.info(_LI("The share with current path %(shr_path)s and ID "
                         "%(shr_id)s is no longer being managed."),
                     {'shr_path': share['export_locations'][0]['path'],
                         'shr_id': share['id']})
Example #30
0
    def delete_cgsnapshot(self, context, snap_dict, share_server=None):
        """Deletes a consistency group snapshot."""
        try:
            vserver, vserver_client = self._get_vserver(
                share_server=share_server)
        except (exception.InvalidInput,
                exception.VserverNotSpecified,
                exception.VserverNotFound) as error:
            LOG.warning(_LW("Could not determine share server for CG snapshot "
                            "being deleted: %(snap)s. Deletion of CG snapshot "
                            "record will proceed anyway. Error: %(error)s"),
                        {'snap': snap_dict['id'], 'error': error})
            return None, None

        share_names = [self._get_valid_share_name(member['share_id'])
                       for member in snap_dict.get('cgsnapshot_members', [])]
        snapshot_name = self._get_valid_cg_snapshot_name(snap_dict['id'])

        for share_name in share_names:
            try:
                self._handle_busy_snapshot(vserver_client, share_name,
                                           snapshot_name)
            except exception.SnapshotNotFound:
                LOG.info(_LI("Snapshot %(snap)s does not exist for share "
                             "%(share)s."),
                         {'snap': snapshot_name, 'share': share_name})
                continue

            LOG.debug("Deleting snapshot %(snap)s for share %(share)s.",
                      {'snap': snapshot_name, 'share': share_name})
            vserver_client.delete_snapshot(share_name, snapshot_name)

        return None, None
Example #31
0
 def launch_server(self, server, workers=1):
     wrap = ServerWrapper(server, workers)
     self.totalwrap = self.totalwrap + 1
     LOG.info(_LI('Starting %d workers'), wrap.workers)
     while (self.running and len(wrap.children) < wrap.workers
            and not wrap.failed):
         self._start_child(wrap)
Example #32
0
    def _update_share_stats(self, data=None):
        LOG.debug("Updating Backend Capability Information - Hitachi HSP.")

        reserved = self.configuration.safe_get('reserved_share_percentage')
        max_over_subscription_ratio = self.configuration.safe_get(
            'max_over_subscription_ratio')
        hsp_cluster = self.hsp.get_cluster()

        total_space = hsp_cluster['properties']['total-storage-capacity']
        free_space = hsp_cluster['properties']['total-storage-available']

        data = {
            'share_backend_name':
            self.backend_name,
            'vendor_name':
            'Hitachi',
            'driver_version':
            '1.0.0',
            'storage_protocol':
            'NFS',
            'pools': [{
                'reserved_percentage': reserved,
                'pool_name': 'HSP',
                'thin_provisioning': True,
                'total_capacity_gb': total_space / units.Gi,
                'free_capacity_gb': free_space / units.Gi,
                'max_over_subscription_ratio': max_over_subscription_ratio,
                'qos': False,
                'dedupe': False,
                'compression': False,
            }],
        }

        LOG.info(_LI("Hitachi HSP Capabilities: %(data)s."), {'data': data})
        super(HitachiHSPDriver, self)._update_share_stats(data)
Example #33
0
    def create_snapshot(self, context, snapshot, share_server=None):
        """Is called to create snapshot."""
        snap_name = snapshot['name']

        pool, project, share_name = self._get_pool_project_share_name(
            snapshot['share'])

        share = {
            'poolName': '%s' % pool,
            'projectName': '%s' % project,
            'name': share_name,
            'availableSize': 0,
            'totalSize': 0,
            'datasetPath': '%s/%s/%s' %
                           (pool,
                            TEGILE_LOCAL_CONTAINER_NAME,
                            project),
            'mountpoint': share_name,
            'local': 'true',
        }

        params = (share, snap_name, False)

        LOG.info(_LI('Creating snapshot for share_name=%(shr)s'
                     ' snap_name=%(name)s'),
                 {'shr': share_name, 'name': snap_name})

        self._api('createShareSnapshot', params)
Example #34
0
    def delete(self, req, id):
        """Delete a share."""
        context = req.environ['manila.context']

        LOG.info(_LI("Delete share with id: %s"), id, context=context)

        try:
            share = self.share_api.get(context, id)

            # NOTE(ameade): If the share is in a consistency group, we require
            # it's id be specified as a param.
            if share.get('consistency_group_id'):
                consistency_group_id = req.params.get('consistency_group_id')
                if (share.get('consistency_group_id') and
                        not consistency_group_id):
                    msg = _("Must provide 'consistency_group_id' as a request "
                            "parameter when deleting a share in a consistency "
                            "group.")
                    raise exc.HTTPBadRequest(explanation=msg)
                elif consistency_group_id != share.get('consistency_group_id'):
                    msg = _("The specified 'consistency_group_id' does not "
                            "match the consistency group id of the share.")
                    raise exc.HTTPBadRequest(explanation=msg)

            self.share_api.delete(context, share)
        except exception.NotFound:
            raise exc.HTTPNotFound()
        except exception.InvalidShare as e:
            raise exc.HTTPForbidden(explanation=six.text_type(e))

        return webob.Response(status_int=202)
    def do_setup(self, context):
        """Setup the GlusterFS volumes."""
        glusterfs_versions, exceptions = {}, {}
        for srvaddr in self.configuration.glusterfs_servers:
            try:
                glusterfs_versions[srvaddr] = self._glustermanager(
                    srvaddr, False).get_gluster_version()
            except exception.GlusterfsException as exc:
                exceptions[srvaddr] = six.text_type(exc)
        if exceptions:
            for srvaddr, excmsg in six.iteritems(exceptions):
                LOG.error(
                    _LE("'gluster version' failed on server "
                        "%(server)s with: %(message)s"), {
                            'server': srvaddr,
                            'message': excmsg
                        })
            raise exception.GlusterfsException(
                _("'gluster version' failed on servers %s") %
                (','.join(exceptions.keys())))
        notsupp_servers = []
        for srvaddr, vers in six.iteritems(glusterfs_versions):
            if common.GlusterManager.numreduct(
                    vers) < self.driver.GLUSTERFS_VERSION_MIN:
                notsupp_servers.append(srvaddr)
        if notsupp_servers:
            gluster_version_min_str = '.'.join(
                six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN)
            for srvaddr in notsupp_servers:
                LOG.error(
                    _LE("GlusterFS version %(version)s on server "
                        "%(server)s is not supported, "
                        "minimum requirement: %(minvers)s"), {
                            'server': srvaddr,
                            'version': '.'.join(glusterfs_versions[srvaddr]),
                            'minvers': gluster_version_min_str
                        })
            raise exception.GlusterfsException(
                _("Unsupported GlusterFS version on servers %(servers)s, "
                  "minimum requirement: %(minvers)s") % {
                      'servers': ','.join(notsupp_servers),
                      'minvers': gluster_version_min_str
                  })
        self.glusterfs_versions = glusterfs_versions

        gluster_volumes_initial = set(
            self._fetch_gluster_volumes(filter_used=False))
        if not gluster_volumes_initial:
            # No suitable volumes are found on the Gluster end.
            # Raise exception.
            msg = (_("Gluster backend does not provide any volume "
                     "matching pattern %s") %
                   self.configuration.glusterfs_volume_pattern)
            LOG.error(msg)
            raise exception.GlusterfsException(msg)

        LOG.info(_LI("Found %d Gluster volumes allocated for Manila."),
                 len(gluster_volumes_initial))

        self._check_mount_glusterfs()
Example #36
0
 def _deallocate_container(self, context, share):
     """Deletes cinder volume."""
     try:
         volume = self._get_volume(context, share['id'])
     except exception.VolumeNotFound:
         LOG.info(_LI("Volume not found. Already deleted?"))
         volume = None
     if volume:
         if volume['status'] == 'in-use':
             raise exception.ManilaException(
                 _('Volume is still in use and '
                   'cannot be deleted now.'))
         self.volume_api.delete(context, volume['id'])
         t = time.time()
         while (time.time() - t <
                self.configuration.max_time_to_create_volume):
             try:
                 volume = self.volume_api.get(context, volume['id'])
             except exception.VolumeNotFound:
                 LOG.debug('Volume was deleted successfully')
                 break
             time.sleep(1)
         else:
             raise exception.ManilaException(
                 _('Volume have not been '
                   'deleted in %ss. Giving up') %
                 self.configuration.max_time_to_create_volume)
Example #37
0
    def _update_share_stats(self, data=None):
        LOG.debug("Updating Backend Capability Information - Hitachi HSP.")

        reserved = self.configuration.safe_get('reserved_share_percentage')
        max_over_subscription_ratio = self.configuration.safe_get(
            'max_over_subscription_ratio')
        hsp_cluster = self.hsp.get_cluster()

        total_space = hsp_cluster['properties']['total-storage-capacity']
        free_space = hsp_cluster['properties']['total-storage-available']

        data = {
            'share_backend_name': self.backend_name,
            'vendor_name': 'Hitachi',
            'driver_version': '1.0.0',
            'storage_protocol': 'NFS',
            'pools': [{
                'reserved_percentage': reserved,
                'pool_name': 'HSP',
                'thin_provisioning': True,
                'total_capacity_gb': total_space / units.Gi,
                'free_capacity_gb': free_space / units.Gi,
                'max_over_subscription_ratio': max_over_subscription_ratio,
                'qos': False,
                'dedupe': False,
                'compression': False,
            }],
        }

        LOG.info(_LI("Hitachi HSP Capabilities: %(data)s."),
                 {'data': data})
        super(HitachiHSPDriver, self)._update_share_stats(data)
Example #38
0
def reload_ganesha_config(servers, sshlogin, service='ganesha.nfsd'):
    """Request ganesha server reload updated config."""

    # Note:  dynamic reload of ganesha config is not enabled
    # in ganesha v2.0. Therefore, the code uses the ganesha service restart
    # option to make sure the config changes are reloaded
    for server in servers:
        # Until reload is fully implemented and if the reload returns a bad
        # status revert to service restart instead
        LOG.info(_LI('Restart service %(service)s on %(server)s to force a '
                     'config file reload'),
                 {'service': service, 'server': server})
        run_local = True

        reload_cmd = ['service', service, 'restart']
        localserver_iplist = socket.gethostbyname_ex(
            socket.gethostname())[2]
        if server not in localserver_iplist:
            remote_login = sshlogin + '@' + server
            reload_cmd = ['ssh', remote_login] + reload_cmd
            run_local = False
        try:
            utils.execute(*reload_cmd, run_as_root=run_local)
        except exception.ProcessExecutionError as e:
            msg = (_('Could not restart service %(service)s on '
                     '%(server)s: %(excmsg)s')
                   % {'service': service,
                      'server': server,
                      'excmsg': six.text_type(e)})
            LOG.error(msg)
            raise exception.GPFSGaneshaException(msg)
Example #39
0
    def delete(self, req, id):
        """Delete a share."""
        context = req.environ['manila.context']

        LOG.info(_LI("Delete share with id: %s"), id, context=context)

        try:
            share = self.share_api.get(context, id)

            # NOTE(ameade): If the share is in a share group, we require its
            # id be specified as a param.
            sg_id_key = 'share_group_id'
            if share.get(sg_id_key):
                share_group_id = req.params.get(sg_id_key)
                if not share_group_id:
                    msg = _("Must provide '%s' as a request "
                            "parameter when deleting a share in a share "
                            "group.") % sg_id_key
                    raise exc.HTTPBadRequest(explanation=msg)
                elif share_group_id != share.get(sg_id_key):
                    msg = _("The specified '%s' does not match "
                            "the share group id of the share.") % sg_id_key
                    raise exc.HTTPBadRequest(explanation=msg)

            self.share_api.delete(context, share)
        except exception.NotFound:
            raise exc.HTTPNotFound()
        except exception.InvalidShare as e:
            raise exc.HTTPForbidden(explanation=six.text_type(e))
        except exception.Conflict as e:
            raise exc.HTTPConflict(explanation=six.text_type(e))

        return webob.Response(status_int=202)
Example #40
0
    def __init__(self):
        LOG.info(_LI('Initializing extension manager.'))

        self.cls_list = CONF.osapi_share_extension

        self.extensions = {}
        self._load_extensions()
Example #41
0
    def create_share(self, context, share, share_server=None):
        """Create a share using GlusterFS volume.

        1 Manila share = 1 GlusterFS volume. Pick an unused
        GlusterFS volume for use as a share.
        """
        try:
            vol = self._pop_gluster_vol(share['size'])
        except exception.GlusterfsException:
            msg = (_LE("Error creating share %(share_id)s"), {
                'share_id': share['id']
            })
            LOG.error(msg)
            raise

        export = self.driver._setup_via_manager({
            'share':
            share,
            'manager':
            self._glustermanager(vol)
        })
        self.private_storage.update(share['id'], {'volume': vol})

        # TODO(deepakcs): Enable quota and set it to the share size.

        # For native protocol, the export_location should be of the form:
        # server:/volname
        LOG.info(_LI("export_location sent back from create_share: %s"),
                 export)
        return export
Example #42
0
File: fault.py Project: vkmc/manila
    def _error(self, inner, req):
        LOG.exception(_LE("Caught error: %s"), inner)

        safe = getattr(inner, 'safe', False)
        headers = getattr(inner, 'headers', None)
        status = getattr(inner, 'code', 500)
        if status is None:
            status = 500

        msg_dict = dict(url=req.url, status=status)
        LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict)
        outer = self.status_to_type(status)
        if headers:
            outer.headers = headers
        # NOTE(johannes): We leave the explanation empty here on
        # purpose. It could possibly have sensitive information
        # that should not be returned back to the user. See
        # bugs 868360 and 874472
        # NOTE(eglynn): However, it would be over-conservative and
        # inconsistent with the EC2 API to hide every exception,
        # including those that are safe to expose, see bug 1021373
        if safe:
            outer.explanation = '%s: %s' % (inner.__class__.__name__,
                                            six.text_type(inner))
        return wsgi.Fault(outer)
Example #43
0
    def _unmanage(self, req, id, body=None):
        """Unmanage a share snapshot."""
        context = req.environ['manila.context']

        LOG.info(_LI("Unmanage share snapshot with id: %s."), id)

        try:
            snapshot = self.share_api.get_snapshot(context, id)

            share = self.share_api.get(context, snapshot['share_id'])
            if share.get('share_server_id'):
                msg = _("Operation 'unmanage_snapshot' is not supported for "
                        "snapshots of shares that are created with share"
                        " servers (created with share-networks).")
                raise exc.HTTPForbidden(explanation=msg)
            elif snapshot['status'] in constants.TRANSITIONAL_STATUSES:
                msg = _("Snapshot with transitional state cannot be "
                        "unmanaged. Snapshot '%(s_id)s' is in '%(state)s' "
                        "state.") % {
                            'state': snapshot['status'],
                            's_id': snapshot['id']
                        }
                raise exc.HTTPForbidden(explanation=msg)

            self.share_api.unmanage_snapshot(context, snapshot, share['host'])
        except (exception.ShareSnapshotNotFound, exception.ShareNotFound) as e:
            raise exc.HTTPNotFound(explanation=six.text_type(e))

        return webob.Response(status_int=202)
Example #44
0
    def delete(self, req, id):
        """Delete a share."""
        context = req.environ['manila.context']

        LOG.info(_LI("Delete share with id: %s"), id, context=context)

        try:
            share = self.share_api.get(context, id)

            # NOTE(ameade): If the share is in a consistency group, we require
            # it's id be specified as a param.
            if share.get('consistency_group_id'):
                consistency_group_id = req.params.get('consistency_group_id')
                if (share.get('consistency_group_id')
                        and not consistency_group_id):
                    msg = _("Must provide 'consistency_group_id' as a request "
                            "parameter when deleting a share in a consistency "
                            "group.")
                    raise exc.HTTPBadRequest(explanation=msg)
                elif consistency_group_id != share.get('consistency_group_id'):
                    msg = _("The specified 'consistency_group_id' does not "
                            "match the consistency group id of the share.")
                    raise exc.HTTPBadRequest(explanation=msg)

            self.share_api.delete(context, share)
        except exception.NotFound:
            raise exc.HTTPNotFound()
        except exception.InvalidShare as e:
            raise exc.HTTPForbidden(explanation=six.text_type(e))
        except exception.Conflict as e:
            raise exc.HTTPConflict(explanation=six.text_type(e))

        return webob.Response(status_int=202)
Example #45
0
    def delete_share(self, context, share, share_server=None):
        LOG.debug("Deleting share in HSP: %(shr)s.", {'shr': share['id']})

        filesystem_id = hsp_share_id = None

        try:
            filesystem_id = self.hsp.get_file_system(share['id'])['id']
            hsp_share_id = self.hsp.get_share(filesystem_id)['id']
        except exception.HSPItemNotFoundException:
            LOG.info(_LI("Share %(shr)s already removed from backend."),
                     {'shr': share['id']})

        if hsp_share_id:
            # Clean all rules from share before deleting it
            current_rules = self.hsp.get_access_rules(hsp_share_id)
            for rule in current_rules:
                try:
                    self.hsp.delete_access_rule(hsp_share_id,
                                                rule['name'])
                except exception.HSPBackendException as e:
                    if 'No matching access rule found.' in e.msg:
                        LOG.debug("Rule %(rule)s already deleted in "
                                  "backend.", {'rule': rule['name']})
                    else:
                        raise

            self.hsp.delete_share(hsp_share_id)

        if filesystem_id:
            self.hsp.delete_file_system(filesystem_id)

        LOG.debug("Export and share successfully deleted: %(shr)s.",
                  {'shr': share['id']})
Example #46
0
    def extend_share(self, context, share_id, new_size, reservations):
        context = context.elevated()
        share = self.db.share_get(context, share_id)
        share_server = self._get_share_server(context, share)
        project_id = share['project_id']

        try:
            self.driver.extend_share(
                share, new_size, share_server=share_server)
        except Exception as e:
            LOG.exception(_LE("Extend share failed."), resource=share)

            try:
                self.db.share_update(
                    context, share['id'],
                    {'status': constants.STATUS_EXTENDING_ERROR}
                )
                raise exception.ShareExtendingError(
                    reason=six.text_type(e), share_id=share_id)
            finally:
                QUOTAS.rollback(context, reservations, project_id=project_id)

        QUOTAS.commit(context, reservations, project_id=project_id)

        share_update = {
            'size': int(new_size),
            # NOTE(u_glide): translation to lower case should be removed in
            # a row with usage of upper case of share statuses in all places
            'status': constants.STATUS_AVAILABLE.lower()
        }
        share = self.db.share_update(context, share['id'], share_update)

        LOG.info(_LI("Extend share completed successfully."), resource=share)
Example #47
0
    def _update_host_state_map(self, context):

        # Get resource usage across the available share nodes:
        topic = CONF.share_topic
        share_services = db.service_get_all_by_topic(context, topic)

        for service in share_services:
            host = service['host']

            # Warn about down services and remove them from host_state_map
            if not utils.service_is_up(service) or service['disabled']:
                LOG.warning(_LW("Share service is down. (host: %s).") % host)
                if self.host_state_map.pop(host, None):
                    LOG.info(_LI("Removing non-active host: %s from "
                                 "scheduler cache.") % host)
                continue

            # Create and register host_state if not in host_state_map
            capabilities = self.service_states.get(host, None)
            host_state = self.host_state_map.get(host)
            if not host_state:
                host_state = self.host_state_cls(
                    host,
                    capabilities=capabilities,
                    service=dict(six.iteritems(service)))
                self.host_state_map[host] = host_state

            # Update capabilities and attributes in host_state
            host_state.update_from_share_capability(
                capabilities, service=dict(six.iteritems(service)))
Example #48
0
    def delete_share(self, context, share, share_server=None):
        LOG.debug("Deleting share in HSP: %(shr)s.", {'shr': share['id']})

        filesystem_id = hsp_share_id = None

        try:
            filesystem_id = self.hsp.get_file_system(share['id'])['id']
            hsp_share_id = self.hsp.get_share(filesystem_id)['id']
        except exception.HSPItemNotFoundException:
            LOG.info(_LI("Share %(shr)s already removed from backend."),
                     {'shr': share['id']})

        if hsp_share_id:
            # Clean all rules from share before deleting it
            current_rules = self.hsp.get_access_rules(hsp_share_id)
            for rule in current_rules:
                try:
                    self.hsp.delete_access_rule(hsp_share_id, rule['name'])
                except exception.HSPBackendException as e:
                    if 'No matching access rule found.' in e.msg:
                        LOG.debug(
                            "Rule %(rule)s already deleted in "
                            "backend.", {'rule': rule['name']})
                    else:
                        raise

            self.hsp.delete_share(hsp_share_id)

        if filesystem_id:
            self.hsp.delete_file_system(filesystem_id)

        LOG.debug("Export and share successfully deleted: %(shr)s.",
                  {'shr': share['id']})
Example #49
0
    def delete_snapshot(self, context, snapshot, share_server=None):
        """Deletes a snapshot of a share."""
        try:
            vserver, vserver_client = self._get_vserver(
                share_server=share_server)
        except (exception.InvalidInput,
                exception.VserverNotSpecified,
                exception.VserverNotFound) as error:
            LOG.warning(_LW("Could not determine share server for snapshot "
                            "being deleted: %(snap)s. Deletion of snapshot "
                            "record will proceed anyway. Error: %(error)s"),
                        {'snap': snapshot['id'], 'error': error})
            return

        share_name = self._get_valid_share_name(snapshot['share_id'])
        snapshot_name = self._get_valid_snapshot_name(snapshot['id'])

        try:
            self._handle_busy_snapshot(vserver_client, share_name,
                                       snapshot_name)
        except exception.SnapshotNotFound:
            LOG.info(_LI("Snapshot %s does not exist."), snapshot_name)
            return

        LOG.debug('Deleting snapshot %(snap)s for share %(share)s.',
                  {'snap': snapshot_name, 'share': share_name})
        vserver_client.delete_snapshot(share_name, snapshot_name)
Example #50
0
    def _get_managed_ports(self, port_conf, sp):
        # Get the real ports from the backend storage
        real_ports = set([port.id for port in self.client.get_ip_ports(sp)])

        if not port_conf:
            LOG.debug("No ports are specified, so all ports in storage "
                      "system will be managed.")
            return real_ports

        matched_ports, unmanaged_ports = unity_utils.do_match(
            real_ports, port_conf)

        if not matched_ports:
            msg = (_("All the specified storage ports to be managed "
                     "do not exist. Please check your configuration "
                     "emc_interface_ports in manila.conf. "
                     "The available ports in the backend are %s") %
                   ",".join(real_ports))
            raise exception.BadConfigurationException(reason=msg)

        if unmanaged_ports:
            LOG.info(
                _LI("The following specified ports "
                    "are not managed by the backend: "
                    "%(un_managed)s. This host will only manage "
                    "the storage ports: %(exist)s"), {
                        'un_managed': ",".join(unmanaged_ports),
                        'exist': ",".join(matched_ports)
                    })
        else:
            LOG.debug("Ports: %s will be managed.", ",".join(matched_ports))

        return matched_ports
Example #51
0
    def unmanage(self, req, id):
        """Unmanage a share."""
        context = req.environ['manila.context']
        authorize(context)

        LOG.info(_LI("Unmanage share with id: %s"), id, context=context)

        try:
            share = self.share_api.get(context, id)
            if share.get('share_server_id'):
                msg = _("Operation 'unmanage' is not supported for shares "
                        "that are created on top of share servers "
                        "(created with share-networks).")
                raise exc.HTTPForbidden(explanation=msg)
            elif share['status'] in constants.TRANSITIONAL_STATUSES:
                msg = _("Share with transitional state can not be unmanaged. "
                        "Share '%(s_id)s' is in '%(state)s' state.") % dict(
                            state=share['status'], s_id=share['id'])
                raise exc.HTTPForbidden(explanation=msg)
            snapshots = self.share_api.db.share_snapshot_get_all_for_share(
                context, id)
            if snapshots:
                msg = _("Share '%(s_id)s' can not be unmanaged because it has "
                        "'%(amount)s' dependent snapshot(s).") % {
                            's_id': id, 'amount': len(snapshots)}
                raise exc.HTTPForbidden(explanation=msg)
            self.share_api.unmanage(context, share)
        except exception.NotFound as e:
            raise exc.HTTPNotFound(explanation=six.text_type(e))
        except (exception.InvalidShare, exception.PolicyNotAuthorized) as e:
            raise exc.HTTPForbidden(explanation=six.text_type(e))

        return webob.Response(status_int=202)
Example #52
0
    def create_share(self, context, share, share_server=None):
        """Create a CephFS volume.

        :param context: A RequestContext.
        :param share: A Share.
        :param share_server: Always None for CephFS native.
        :return: The export locations dictionary.
        """

        # `share` is a Share
        LOG.debug(
            "create_share {be} name={id} size={size} cg_id={cg}".format(
                be=self.backend_name, id=share["id"], size=share["size"], cg=share["consistency_group_id"]
            )
        )

        extra_specs = share_types.get_extra_specs_from_share(share)
        data_isolated = extra_specs.get("cephfs:data_isolated", False)

        size = self._to_bytes(share["size"])

        # Create the CephFS volume
        volume = self.volume_client.create_volume(self._share_path(share), size=size, data_isolated=data_isolated)

        # To mount this you need to know the mon IPs and the path to the volume
        mon_addrs = self.volume_client.get_mon_addrs()

        export_location = "{addrs}:{path}".format(addrs=",".join(mon_addrs), path=volume["mount_path"])

        LOG.info(
            _LI("Calculated export location for share %(id)s: %(loc)s"), {"id": share["id"], "loc": export_location}
        )

        return {"path": export_location, "is_admin_only": False, "metadata": {}}
Example #53
0
    def _get_managed_storage_pools(self, pools):
        matched_pools = set()
        if pools:
            # Get the real pools from the backend storage
            status, backend_pools = self._get_context('StoragePool').get_all()
            if status != constants.STATUS_OK:
                message = (_("Failed to get storage pool information. "
                             "Reason: %s") % backend_pools)
                LOG.error(message)
                raise exception.EMCVnxXMLAPIError(err=message)

            real_pools = set([item for item in backend_pools])
            conf_pools = set([item.strip() for item in pools])
            matched_pools, unmatched_pools = vnx_utils.do_match_any(
                real_pools, conf_pools)

            if not matched_pools:
                msg = (_("None of the specified storage pools to be managed "
                         "exist. Please check your configuration "
                         "emc_nas_pool_names in manila.conf. "
                         "The available pools in the backend are %s.") %
                       ",".join(real_pools))
                raise exception.InvalidParameterValue(err=msg)

            LOG.info(_LI("Storage pools: %s will be managed."),
                     ",".join(matched_pools))
        else:
            LOG.debug("No storage pool is specified, so all pools "
                      "in storage system will be managed.")
        return matched_pools
Example #54
0
    def shrink(self, context, share, new_size):
        policy.check_policy(context, 'share', 'shrink')

        status = six.text_type(share['status']).lower()
        valid_statuses = (constants.STATUS_AVAILABLE,
                          constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR)

        if status not in valid_statuses:
            msg_params = {
                'valid_status': ", ".join(valid_statuses),
                'share_id': share['id'],
                'status': status,
            }
            msg = _("Share %(share_id)s status must in (%(valid_status)s) "
                    "to shrink, but current status is: "
                    "%(status)s.") % msg_params
            raise exception.InvalidShare(reason=msg)

        size_decrease = int(share['size']) - int(new_size)
        if size_decrease <= 0 or new_size <= 0:
            msg = (_("New size for shrink must be less "
                     "than current size and greater than 0 (current: %(size)s,"
                     " new: %(new_size)s)") % {'new_size': new_size,
                                               'size': share['size']})
            raise exception.InvalidInput(reason=msg)

        self.update(context, share, {'status': constants.STATUS_SHRINKING})
        self.share_rpcapi.shrink_share(context, share, new_size)
        LOG.info(_LI("Shrink share (id=%(id)s) request issued successfully."
                     " New size: %(size)s") % {'id': share['id'],
                                               'size': new_size})
Example #55
0
    def __call__(self, request):
        """WSGI method that controls (de)serialization and method dispatch."""

        LOG.info(
            _LI("%(method)s %(url)s") % {
                "method": request.method,
                "url": request.url
            })
        if self.support_api_request_version:
            # Set the version of the API requested based on the header
            try:
                request.set_api_version_request()
            except exception.InvalidAPIVersionString as e:
                return Fault(
                    webob.exc.HTTPBadRequest(explanation=six.text_type(e)))
            except exception.InvalidGlobalAPIVersion as e:
                return Fault(
                    webob.exc.HTTPNotAcceptable(explanation=six.text_type(e)))

        # Identify the action, its arguments, and the requested
        # content type
        action_args = self.get_action_args(request.environ)
        action = action_args.pop('action', None)
        content_type, body = self.get_body(request)
        accept = request.best_match_content_type()

        # NOTE(Vek): Splitting the function up this way allows for
        #            auditing by external tools that wrap the existing
        #            function.  If we try to audit __call__(), we can
        #            run into troubles due to the @webob.dec.wsgify()
        #            decorator.
        return self._process_stack(request, action, action_args, content_type,
                                   body, accept)
Example #56
0
    def _get_managed_storage_pools(self, pools):
        matched_pools = set()
        if pools:
            # Get the real pools from the backend storage
            status, backend_pools = self._get_context('StoragePool').get_all()
            if status != constants.STATUS_OK:
                message = (_("Failed to get storage pool information. "
                             "Reason: %s") % backend_pools)
                LOG.error(message)
                raise exception.EMCVmaxXMLAPIError(err=message)

            real_pools = set([item for item in backend_pools])
            conf_pools = set([item.strip() for item in pools])
            matched_pools, unmatched_pools = vmax_utils.do_match_any(
                real_pools, conf_pools)

            if not matched_pools:
                msg = (_("None of the specified storage pools to be managed "
                         "exist. Please check your configuration "
                         "emc_nas_pool_names in manila.conf. "
                         "The available pools in the backend are %s.") %
                       ",".join(real_pools))
                raise exception.InvalidParameterValue(err=msg)

            LOG.info(_LI("Storage pools: %s will be managed."),
                     ",".join(matched_pools))
        else:
            LOG.debug("No storage pool is specified, so all pools "
                      "in storage system will be managed.")
        return matched_pools