def get_gluster_version(self): """Retrieve GlusterFS version. :returns: version (as tuple of strings, example: ('3', '6', '0beta2')) """ try: out, err = self.gluster_call('--version') except exception.ProcessExecutionError as exc: raise exception.GlusterfsException( _("'gluster version' failed on server " "%(server)s: %(message)s") % { 'server': self.host, 'message': exc.message }) try: owords = out.split() if owords[0] != 'glusterfs': raise RuntimeError vers = owords[1].split('.') # provoke an exception if vers does not start with two numerals int(vers[0]) int(vers[1]) except Exception: raise exception.GlusterfsException( _("Cannot parse version info obtained from server " "%(server)s, version info: %(info)s") % { 'server': self.host, 'info': out }) return vers
def do_setup(self, context): """Prepares the backend and appropriate NAS helpers.""" if not self.configuration.glusterfs_target: raise exception.GlusterfsException( _('glusterfs_target configuration that specifies the GlusterFS' ' volume to be mounted on the Manila host is not set.')) self.gluster_manager = self._glustermanager( self.configuration.glusterfs_target) self.gluster_manager.check_gluster_version( self.driver.GLUSTERFS_VERSION_MIN) self._check_mount_glusterfs() # enable quota options of a GlusteFS volume to allow # creation of shares of specific size args = ('volume', 'quota', self.gluster_manager.volume, 'enable') try: self.gluster_manager.gluster_call(*args) except exception.ProcessExecutionError as exc: if (self.gluster_manager. get_gluster_vol_option('features.quota')) != 'on': LOG.error(_LE("Error in tuning GlusterFS volume to enable " "creation of shares of specific size: %s"), exc.stderr) raise exception.GlusterfsException(exc) self._ensure_gluster_vol_mounted()
def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" vol = snapshot['share']['export_location'] gluster_mgr = self.gluster_used_vols_dict[vol] args = ('--xml', 'snapshot', 'delete', snapshot['id'], '--mode=script') try: out, err = gluster_mgr.gluster_call(*args) except exception.ProcessExecutionError as exc: LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr) raise exception.GlusterfsException("gluster %s failed" % ' '.join(args)) if not out: raise exception.GlusterfsException( 'gluster volume info %s: no data received' % gluster_mgr.volume) outxml = etree.fromstring(out) opret = int(outxml.find('opRet').text) operrno = int(outxml.find('opErrno').text) operrstr = outxml.find('opErrstr').text if opret: raise exception.GlusterfsException( _("Deleting snapshot %(snap_id)s of share %(share_id)s failed " "with %(errno)d: %(errstr)s") % { 'snap_id': snapshot['id'], 'share_id': snapshot['share_id'], 'errno': operrno, 'errstr': operrstr })
def _get_gluster_vol_option(self, option): try: args, kw = self.gluster_address.make_gluster_args( '--xml', 'volume', 'info', self.gluster_address.volume ) out, err = self._execute(*args, **kw) except exception.ProcessExecutionError as exc: LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr) raise exception.GlusterfsException(exc) if not out: raise exception.GlusterfsException( 'Empty answer from gluster command' ) vix = etree.fromstring(out) if int(vix.find('./volInfo/volumes/count').text) != 1: raise exception.InvalidShare('Volume name ambiguity') for e in vix.findall(".//option"): o, v = (e.find(a).text for a in ('name', 'value')) if o == option: return v
def _restart_gluster_vol(gluster_mgr): """Restart a GlusterFS volume through its manager. :param gluster_mgr: GlusterManager instance """ try: # TODO(csaba): '--mode=script' ensures that the Gluster CLI runs in # script mode. This seems unnecessary as the Gluster CLI is # expected to run in non-interactive mode when the stdin is not # a terminal, as is the case below. But on testing, found the # behaviour of Gluster-CLI to be the contrary. Need to investigate # this odd-behaviour of Gluster-CLI. gluster_mgr.gluster_call('volume', 'stop', gluster_mgr.volume, '--mode=script') except exception.ProcessExecutionError as exc: msg = (_("Error stopping gluster volume. " "Volume: %(volname)s, Error: %(error)s") % { 'volname': gluster_mgr.volume, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg) try: gluster_mgr.gluster_call('volume', 'start', gluster_mgr.volume) except exception.ProcessExecutionError as exc: msg = (_("Error starting gluster volume. " "Volume: %(volname)s, Error: %(error)s") % { 'volname': gluster_mgr.volume, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg)
def do_setup(self, context): """Setup the GlusterFS volumes.""" glusterfs_versions, exceptions = {}, {} for srvaddr in self.configuration.glusterfs_servers: try: glusterfs_versions[srvaddr] = self._glustermanager( srvaddr, False).get_gluster_version() except exception.GlusterfsException as exc: exceptions[srvaddr] = six.text_type(exc) if exceptions: for srvaddr, excmsg in six.iteritems(exceptions): LOG.error( _LE("'gluster version' failed on server " "%(server)s with: %(message)s"), { 'server': srvaddr, 'message': excmsg }) raise exception.GlusterfsException( _("'gluster version' failed on servers %s") % (','.join(exceptions.keys()))) notsupp_servers = [] for srvaddr, vers in six.iteritems(glusterfs_versions): if common.GlusterManager.numreduct( vers) < self.driver.GLUSTERFS_VERSION_MIN: notsupp_servers.append(srvaddr) if notsupp_servers: gluster_version_min_str = '.'.join( six.text_type(c) for c in self.driver.GLUSTERFS_VERSION_MIN) for srvaddr in notsupp_servers: LOG.error( _LE("GlusterFS version %(version)s on server " "%(server)s is not supported, " "minimum requirement: %(minvers)s"), { 'server': srvaddr, 'version': '.'.join(glusterfs_versions[srvaddr]), 'minvers': gluster_version_min_str }) raise exception.GlusterfsException( _("Unsupported GlusterFS version on servers %(servers)s, " "minimum requirement: %(minvers)s") % { 'servers': ','.join(notsupp_servers), 'minvers': gluster_version_min_str }) self.glusterfs_versions = glusterfs_versions gluster_volumes_initial = set( self._fetch_gluster_volumes(filter_used=False)) if not gluster_volumes_initial: # No suitable volumes are found on the Gluster end. # Raise exception. msg = (_("Gluster backend does not provide any volume " "matching pattern %s") % self.configuration.glusterfs_volume_pattern) LOG.error(msg) raise exception.GlusterfsException(msg) LOG.info(_LI("Found %d Gluster volumes allocated for Manila."), len(gluster_volumes_initial)) self._check_mount_glusterfs()
def _setup_via_manager(self, share_manager, share_manager_parent=None): gluster_manager = share_manager['manager'] # TODO(csaba): This should be refactored into proper dispatch to helper if self.nfs_helper == GlusterNFSHelper and not gluster_manager.path: # default is 'on' export_vol = gluster_manager.get_gluster_vol_option( NFS_EXPORT_VOL) or 'on' if export_vol.lower() not in ('on', '1', 'true', 'yes', 'enable'): raise exception.GlusterfsException( _("Gluster-NFS with volume layout should be used " "with `nfs.export-volumes = on`")) setting = [NFS_RPC_AUTH_REJECT, '*'] else: # gluster-nfs export of the whole volume must be prohibited # to not to defeat access control setting = [NFS_EXPORT_VOL, 'off'] args = ['volume', 'set', gluster_manager.volume] + setting try: gluster_manager.gluster_call(*args) except exception.ProcessExecutionError as exc: LOG.error( _LE("Error in tuning GlusterFS volume to prevent " "exporting the entire volume: %s"), exc.stderr) raise exception.GlusterfsException("gluster %s failed" % ' '.join(args)) return self.nfs_helper(self._execute, self.configuration, gluster_manager=gluster_manager).get_export( share_manager['share'])
def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot.""" gluster_mgr = self._share_manager(snapshot['share']) backend_snapshot_name = self._find_actual_backend_snapshot_name( gluster_mgr, snapshot) args = ('--xml', 'snapshot', 'delete', backend_snapshot_name, '--mode=script') try: out, err = gluster_mgr.gluster_call(*args) except exception.ProcessExecutionError as exc: LOG.error(_LE("Error deleting snapshot: %s"), exc.stderr) raise exception.GlusterfsException( _("gluster %s failed") % ' '.join(args)) if not out: raise exception.GlusterfsException( _('gluster snapshot delete %s: no data received') % gluster_mgr.volume) outxml = etree.fromstring(out) opret = int(outxml.find('opRet').text) operrno = int(outxml.find('opErrno').text) operrstr = outxml.find('opErrstr').text if opret: raise exception.GlusterfsException( _("Deleting snapshot %(snap_id)s of share %(share_id)s failed " "with %(errno)d: %(errstr)s") % { 'snap_id': snapshot['id'], 'share_id': snapshot['share_id'], 'errno': operrno, 'errstr': operrstr })
def _restart_gluster_vol(self, gluster_addr): gargs, gkw = gluster_addr.make_gluster_args('volume', 'stop', gluster_addr.volume, '--mode=script') try: self._execute(*gargs, **gkw) except exception.ProcessExecutionError as exc: msg = (_("Error stopping gluster volume. " "Volume: %(volname)s, Error: %(error)s"), { 'volname': gluster_addr.volume, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg) gargs, gkw = gluster_addr.make_gluster_args('volume', 'start', gluster_addr.volume) try: self._execute(*gargs, **gkw) except exception.ProcessExecutionError as exc: msg = (_("Error starting gluster volume. " "Volume: %(volname)s, Error: %(error)s"), { 'volname': gluster_addr.volume, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg)
def __init__(self, address, execf=None, path_to_private_key=None, remote_server_password=None, requires={}): """Initialize a GlusterManager instance. :param address: the Gluster URI (either string of [<user>@]<host>[:/<volume>[/<path>]] format or component dict with "user", "host", "volume", "path" keys). :param execf: executor function for management commands. :param path_to_private_key: path to private ssh key of remote server. :param remote_server_password: ssh password for remote server. :param requires: a dict mapping some of the component names to either True or False; having it specified, respectively, the presence or absence of the given component in the uri will be enforced. """ if isinstance(address, dict): tmp_addr = "" if address.get('user') is not None: tmp_addr = address.get('user') + '@' if address.get('host') is not None: tmp_addr += address.get('host') if address.get('volume') is not None: tmp_addr += ':/' + address.get('volume') if address.get('path') is not None: tmp_addr += address.get('path') self.components = self.parse(tmp_addr) # Verify that the original dictionary matches the parsed # dictionary. This will flag typos such as {'volume': 'vol/err'} # in the original dictionary as errors. Additionally, # extra keys will need to be flagged as an error. sanitized_address = {key: None for key in self.scheme.groupindex} sanitized_address.update(address) if sanitized_address != self.components: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) else: self.components = self.parse(address) for k, v in requires.items(): if v is None: continue if (self.components.get(k) is not None) != v: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) self.path_to_private_key = path_to_private_key self.remote_server_password = remote_server_password if execf: self.gluster_call = self.make_gluster_call(execf)
def _setup_gluster_vols(self): # Enable gluster volumes for SSL access only. for gluster_addr in six.itervalues(self.gluster_unused_vols_dict): gargs, gkw = gluster_addr.make_gluster_args( 'volume', 'set', gluster_addr.volume, NFS_EXPORT_VOL, 'off') try: self._execute(*gargs, **gkw) except exception.ProcessExecutionError as exc: msg = (_("Error in gluster volume set during volume setup. " "Volume: %(volname)s, Option: %(option)s, " "Error: %(error)s"), { 'volname': gluster_addr.volume, 'option': NFS_EXPORT_VOL, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg) gargs, gkw = gluster_addr.make_gluster_args( 'volume', 'set', gluster_addr.volume, CLIENT_SSL, 'on') try: self._execute(*gargs, **gkw) except exception.ProcessExecutionError as exc: msg = (_("Error in gluster volume set during volume setup. " "Volume: %(volname)s, Option: %(option)s, " "Error: %(error)s"), { 'volname': gluster_addr.volume, 'option': CLIENT_SSL, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg) gargs, gkw = gluster_addr.make_gluster_args( 'volume', 'set', gluster_addr.volume, SERVER_SSL, 'on') try: self._execute(*gargs, **gkw) except exception.ProcessExecutionError as exc: msg = (_("Error in gluster volume set during volume setup. " "Volume: %(volname)s, Option: %(option)s, " "Error: %(error)s"), { 'volname': gluster_addr.volume, 'option': SERVER_SSL, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg) # TODO(deepakcs) Remove this once ssl options can be # set dynamically. self._restart_gluster_vol(gluster_addr)
def do_setup(self, context): """Setup the GlusterFS volumes.""" super(GlusterfsNativeShareDriver, self).do_setup(context) # We don't use a service mount as its not necessary for us. # Do some sanity checks. if len(self.configuration.glusterfs_targets) == 0: # No volumes specified in the config file. Raise exception. msg = (_("glusterfs_targets list seems to be empty! " "Add one or more gluster volumes to work " "with in the glusterfs_targets configuration " "parameter.")) LOG.error(msg) raise exception.GlusterfsException(msg) LOG.info( _LI("Number of gluster volumes read from config: " "%(numvols)s"), {'numvols': len(self.configuration.glusterfs_targets)}) try: self._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: msg = (_("mount.glusterfs is not installed.")) LOG.error(msg) raise exception.GlusterfsException(msg) else: msg = (_("Error running mount.glusterfs.")) LOG.error(msg) raise # Update gluster_unused_vols_dict, gluster_used_vols_dict by walking # through the DB. self._update_gluster_vols_dict(context) if len(self.gluster_unused_vols_dict) == 0: # No volumes available for use as share. Warn user. msg = (_("No unused gluster volumes available for use as share! " "Create share won't be supported unless existing shares " "are deleted or add one or more gluster volumes to work " "with in the glusterfs_targets configuration parameter.")) LOG.warn(msg) else: LOG.info( _LI("Number of gluster volumes in use: " "%(inuse-numvols)s. Number of gluster volumes " "available for use as share: %(unused-numvols)s"), { 'inuse-numvols': len(self.gluster_used_vols_dict), 'unused-numvols': len(self.gluster_unused_vols_dict) }) self._setup_gluster_vols()
def create_snapshot(self, context, snapshot, share_server=None): """Creates a snapshot.""" gluster_mgr = self._share_manager(snapshot['share']) if gluster_mgr.qualified in self.gluster_nosnap_vols_dict: opret, operrno = -1, 0 operrstr = self.gluster_nosnap_vols_dict[gluster_mgr.qualified] else: args = ('--xml', 'snapshot', 'create', 'manila-' + snapshot['id'], gluster_mgr.volume) try: out, err = gluster_mgr.gluster_call(*args) except exception.ProcessExecutionError as exc: LOG.error(_LE("Error retrieving volume info: %s"), exc.stderr) raise exception.GlusterfsException("gluster %s failed" % ' '.join(args)) if not out: raise exception.GlusterfsException( 'gluster volume info %s: no data received' % gluster_mgr.volume) outxml = etree.fromstring(out) opret = int(outxml.find('opRet').text) operrno = int(outxml.find('opErrno').text) operrstr = outxml.find('opErrstr').text if opret == -1: vers = self.glusterfs_versions[gluster_mgr.host_access] if common.GlusterManager.numreduct(vers) > (3, 6): # This logic has not yet been implemented in GlusterFS 3.6 if operrno == 0: self.gluster_nosnap_vols_dict[ gluster_mgr.qualified] = operrstr msg = _("Share %(share_id)s does not support snapshots: " "%(errstr)s.") % { 'share_id': snapshot['share_id'], 'errstr': operrstr } LOG.error(msg) raise exception.ShareSnapshotNotSupported(msg) raise exception.GlusterfsException( _("Creating snapshot for share %(share_id)s failed " "with %(errno)d: %(errstr)s") % { 'share_id': snapshot['share_id'], 'errno': operrno, 'errstr': operrstr })
def xml_response_check(self, xmlout, command, countpath=None): """Sanity check for GlusterFS XML response.""" commandstr = ' '.join(command) ret = {} for e in 'opRet', 'opErrno': ret[e] = int(volxml_get(xmlout, e)) if ret == {'opRet': -1, 'opErrno': 0}: raise exception.GlusterfsException(_( 'GlusterFS command %(command)s on volume %(volume)s failed' ) % {'volume': self.volume, 'command': command}) if list(six.itervalues(ret)) != [0, 0]: errdct = {'volume': self.volume, 'command': commandstr, 'opErrstr': volxml_get(xmlout, 'opErrstr', default=None)} errdct.update(ret) raise exception.InvalidShare(_( 'GlusterFS command %(command)s on volume %(volume)s got ' 'unexpected response: ' 'opRet=%(opRet)s, opErrno=%(opErrno)s, opErrstr=%(opErrstr)s' ) % errdct) if not countpath: return count = volxml_get(xmlout, countpath) if count != '1': raise exception.InvalidShare( _('GlusterFS command %(command)s on volume %(volume)s got ' 'ambiguous response: ' '%(count)s records') % { 'volume': self.volume, 'command': commandstr, 'count': count})
def create_share_from_snapshot(self, context, share, snapshot, share_server=None): old_gmgr = self._share_manager(snapshot['share_instance']) # Snapshot clone feature in GlusterFS server essential to support this # API is available in GlusterFS server versions 3.7 and higher. So do # a version check. vers = self.glusterfs_versions[old_gmgr.host_access] minvers = (3, 7) if common.GlusterManager.numreduct(vers) < minvers: minvers_str = '.'.join(six.text_type(c) for c in minvers) vers_str = '.'.join(vers) msg = (_("GlusterFS version %(version)s on server %(server)s does " "not support creation of shares from snapshot. " "minimum requirement: %(minversion)s") % { 'version': vers_str, 'server': old_gmgr.host, 'minversion': minvers_str }) LOG.error(msg) raise exception.GlusterfsException(msg) # Clone the snapshot. The snapshot clone, a new GlusterFS volume # would serve as a share. backend_snapshot_name = self._find_actual_backend_snapshot_name( old_gmgr, snapshot) volume = ''.join(['manila-', share['id']]) args_tuple = (('snapshot', 'activate', backend_snapshot_name, 'force', '--mode=script'), ('snapshot', 'clone', volume, backend_snapshot_name)) for args in args_tuple: out, err = old_gmgr.gluster_call( *args, log=_LE("Creating share from snapshot")) # Get a manager for the the new volume/share. comp_vol = old_gmgr.components.copy() comp_vol.update({'volume': volume}) gmgr = self._glustermanager(comp_vol) export = self.driver._setup_via_manager( { 'share': share, 'manager': gmgr }, { 'share': snapshot['share_instance'], 'manager': old_gmgr }) argseq = (('set', [USER_CLONED_FROM, snapshot['share_id']]), ('set', [USER_MANILA_SHARE, share['id']]), ('start', [])) for op, opargs in argseq: args = ['volume', op, gmgr.volume] + opargs gmgr.gluster_call(*args, log=_LE("Creating share from snapshot")) self.gluster_used_vols.add(gmgr.qualified) self.private_storage.update(share['id'], {'volume': gmgr.qualified}) return export
def deny_access(self, context, share, access, share_server=None): """Deny access to a share that's using cert based auth. Remove the SSL CN (Common Name) that's allowed to access the server. """ if access['access_type'] != ACCESS_TYPE_CERT: raise exception.InvalidShareAccess( _("Only 'cert' access type " "allowed for access " "removal.")) exp_locn = share.get('export_location', None) gluster_addr = self.gluster_used_vols_dict.get(exp_locn) gargs, gkw = gluster_addr.make_gluster_args('volume', 'reset', gluster_addr.volume, AUTH_SSL_ALLOW) try: self._execute(*gargs, **gkw) except exception.ProcessExecutionError as exc: msg = (_("Error in gluster volume reset during deny access. " "Volume: %(volname)s, Option: %(option)s, " "Error: %(error)s"), { 'volname': gluster_addr.volume, 'option': AUTH_SSL_ALLOW, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg) # TODO(deepakcs) Remove this once ssl options can be # set dynamically. self._restart_gluster_vol(gluster_addr)
def _update_share_stats(self): """Retrieve stats info from the GlusterFS volume.""" # sanity check for gluster ctl mount smpb = os.stat(self.configuration.glusterfs_mount_point_base) smp = os.stat(self._get_mount_point_for_gluster_vol()) if smpb.st_dev == smp.st_dev: raise exception.GlusterfsException( _("GlusterFS control mount is not available") ) smpv = os.statvfs(self._get_mount_point_for_gluster_vol()) LOG.debug("Updating share stats") data = {} data["share_backend_name"] = self.backend_name data["vendor_name"] = 'Red Hat' data["driver_version"] = '1.0' data["storage_protocol"] = 'NFS' data['reserved_percentage'] = \ self.configuration.reserved_share_percentage data['QoS_support'] = False data['total_capacity_gb'] = (smpv.f_blocks * smpv.f_frsize) >> 30 data['free_capacity_gb'] = (smpv.f_bavail * smpv.f_frsize) >> 30 self._stats = data
def __init__(self, address, execf=None, path_to_private_key=None, remote_server_password=None, requires={}): """Initialize a GlusterManager instance. :param address: the Gluster URI (either string of [<user>@]<host>[:/<volume>[/<path>]] format or component dict with "user", "host", "volume", "path" keys). :param execf: executor function for management commands. :param path_to_private_key: path to private ssh key of remote server. :param remote_server_password: ssh password for remote server. :param requires: a dict mapping some of the component names to either True or False; having it specified, respectively, the presence or absence of the given component in the uri will be enforced. """ self.components = (address if isinstance(address, dict) else self.parse(address)) for k, v in six.iteritems(requires): if v is None: continue if (self.components.get(k) is not None) != v: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) self.path_to_private_key = path_to_private_key self.remote_server_password = remote_server_password if execf: self.gluster_call = self.make_gluster_call(execf)
def do_setup(self, context): """Native mount the GlusterFS volume and tune it.""" super(GlusterfsShareDriver, self).do_setup(context) self.gluster_address = GlusterAddress( self._read_gluster_vol_from_config() ) try: self._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.GlusterfsException( _('mount.glusterfs is not installed')) else: raise self._ensure_gluster_vol_mounted() # exporting the whole volume must be prohibited # to not to defeat access control args, kw = self.gluster_address.make_gluster_args( 'volume', 'set', self.gluster_address.volume, NFS_EXPORT_VOL, 'off') try: self._execute(*args, **kw) except exception.ProcessExecutionError as exc: LOG.error(_("Error in gluster volume set: %s") % exc.stderr) raise
def parse(cls, address): """Parse address string into component dict.""" m = cls.scheme.search(address) if not m: raise exception.GlusterfsException( _('Invalid gluster address %s.') % address) return m.groupdict()
def __init__(self, *args, **kwargs): super(GlusterfsShareDriverBase, self).__init__(*args, **kwargs) self.configuration.append_config_values(glusterfs_share_layout_opts) layout_name = self.configuration.glusterfs_share_layout if not layout_name: layout_name = self.supported_layouts[0] if layout_name not in self.supported_layouts: raise exception.GlusterfsException( _('driver %(driver)s does not support %(layout)s layout') % { 'driver': type(self).__name__, 'layout': layout_name }) self.layout = importutils.import_object( '.'.join((self.LAYOUT_PREFIX, layout_name)), self, **kwargs) # we determine snapshot support in our own scope, as # 1) the calculation based on parent method # redefinition does not work for us, as actual # glusterfs driver classes are subclassed from # *this* class, not from driver.ShareDriver # and they don't need to redefine snapshot # methods for themselves; # 2) snapshot support depends on choice of layout. self._snapshots_are_supported = getattr(self.layout, '_snapshots_are_supported', False)
def create_share(self, ctx, share, share_server=None): """Create a sub-directory/share in the GlusterFS volume.""" # probe into getting a NAS protocol helper for the share in order # to facilitate early detection of unsupported protocol type sizestr = six.text_type(share['size']) + 'GB' share_dir = '/' + share['name'] local_share_path = self._get_local_share_path(share) cmd = ['mkdir', local_share_path] # set hard limit quota on the sub-directory/share args = ('volume', 'quota', self.gluster_manager.volume, 'limit-usage', share_dir, sizestr) try: self.driver._execute(*cmd, run_as_root=True) self.gluster_manager.gluster_call(*args) except Exception as exc: if isinstance(exc, exception.ProcessExecutionError): exc = exception.GlusterfsException(exc) if isinstance(exc, exception.GlusterfsException): self._cleanup_create_share(local_share_path, share['name']) LOG.error(_LE('Unable to create share %s'), share['name']) raise exc comp_share = self.gluster_manager.components.copy() comp_share['path'] = '/' + share['name'] export_location = self.driver._setup_via_manager( {'share': share, 'manager': self._glustermanager(comp_share)}) return export_location
def _gluster_call(*args, **kwargs): logmsg = kwargs.pop('log', None) error_policy = kwargs.pop('error_policy', 'coerce') if (error_policy not in ('raw', 'coerce', 'suppress') and not isinstance(error_policy[0], int)): raise TypeError( _("undefined error_policy %s") % repr(error_policy)) try: return gluster_execf(*(('gluster', ) + args), **kwargs) except exception.ProcessExecutionError as exc: if error_policy == 'raw': raise elif error_policy == 'coerce': pass elif (error_policy == 'suppress' or exc.exit_code in error_policy): return if logmsg: LOG.error("%s: GlusterFS instrumentation failed.", logmsg) raise exception.GlusterfsException( _("GlusterFS management command '%(cmd)s' failed " "with details as follows:\n%(details)s.") % { 'cmd': ' '.join(args), 'details': exc })
def delete_share(self, context, share, share_server=None): """Delete a share on the GlusterFS volume. 1 Manila share = 1 GlusterFS volume. Put the gluster volume back in the available list. """ gmgr = self._share_manager(share) try: self._wipe_gluster_vol(gmgr) self._push_gluster_vol(gmgr.qualified) except exception.GlusterfsException: msg = (_LE("Error during delete_share request for " "share %(share_id)s"), { 'share_id': share['id'] }) LOG.error(msg) raise self.private_storage.delete(share['id']) args = ('volume', 'set', gmgr.volume, USER_MANILA_SHARE, 'NONE') try: gmgr.gluster_call(*args) except exception.ProcessExecutionError: raise exception.GlusterfsException( _("gluster %(cmd)s failed on %(vol)s") % { 'cmd': ' '.join(args), 'vol': gmgr.qualified })
def _get_local_share_path(self, share): """Determine mount path of the GlusterFS volume in the Manila host.""" local_vol_path = self._get_mount_point_for_gluster_vol() if not os.access(local_vol_path, os.R_OK): raise exception.GlusterfsException('share path %s does not exist' % local_vol_path) return os.path.join(local_vol_path, share['name'])
def _push_gluster_vol(self, exp_locn): try: self.gluster_used_vols.remove(exp_locn) except KeyError: msg = (_("Couldn't find the share in used list.")) LOG.error(msg) raise exception.GlusterfsException(msg)
def _setup_gluster_vol(self, vol): # Enable gluster volumes for SSL access only. gluster_mgr = self._glustermanager(vol) ssl_allow_opt = gluster_mgr.get_gluster_vol_option(AUTH_SSL_ALLOW) if not ssl_allow_opt: # Not having AUTH_SSL_ALLOW set is a problematic edge case. # - In GlusterFS 3.6, it implies that access is allowed to # noone, including intra-service access, which causes # problems internally in GlusterFS # - In GlusterFS 3.7, it implies that access control is # disabled, which defeats the purpose of this driver -- # so to avoid these possiblitiies, we throw an error in this case. msg = (_("Option %(option)s is not defined on gluster volume. " "Volume: %(volname)s") % { 'volname': gluster_mgr.volume, 'option': AUTH_SSL_ALLOW }) LOG.error(msg) raise exception.GlusterfsException(msg) for option, value in six.iteritems({ NFS_EXPORT_VOL: 'off', CLIENT_SSL: 'on', SERVER_SSL: 'on' }): try: gluster_mgr.gluster_call('volume', 'set', gluster_mgr.volume, option, value) except exception.ProcessExecutionError as exc: msg = (_("Error in gluster volume set during volume setup. " "volume: %(volname)s, option: %(option)s, " "value: %(value)s, error: %(error)s") % { 'volname': gluster_mgr.volume, 'option': option, 'value': value, 'error': exc.stderr }) LOG.error(msg) raise exception.GlusterfsException(msg) # TODO(deepakcs) Remove this once ssl options can be # set dynamically. self._restart_gluster_vol(gluster_mgr) return gluster_mgr
def _push_gluster_vol(self, exp_locn): try: gaddr = self.gluster_used_vols_dict.pop(exp_locn) except KeyError: msg = (_("Couldn't find the share in used list.")) LOG.error(msg) raise exception.GlusterfsException(msg) self.gluster_unused_vols_dict.update({exp_locn: gaddr})
def _check_mount_glusterfs(self): """Checks if mount.glusterfs(8) is available.""" try: self.driver._execute('mount.glusterfs', check_exit_code=False) except OSError as exc: if exc.errno == errno.ENOENT: raise exception.GlusterfsException( _('mount.glusterfs is not installed.')) else: raise
def __init__(self, address): m = self.scheme.search(address) if not m: raise exception.GlusterfsException('invalid gluster address ' + address) self.remote_user = m.group('user') self.host = m.group('host') self.volume = m.group('vol') self.qualified = address self.export = ':/'.join([self.host, self.volume])