def check_for_setup_error(self): """Return an error if the prerequisites are not met.""" if not self.configuration.maprfs_clinode_ip: msg = _( 'MapR cluster has not been specified in the configuration. ' 'Add the ip or list of ip of nodes with mapr-core installed ' 'in the "maprfs_clinode_ip" configuration parameter.') LOG.error(msg) raise exception.MapRFSException(msg=msg) if not self.configuration.maprfs_cldb_ip: LOG.warning(_LW('CLDB nodes are not specified!')) if not self.configuration.maprfs_zookeeper_ip: LOG.warning(_LW('Zookeeper nodes are not specified!')) if not self._check_maprfs_state(): msg = _('MapR-FS is not in healthy state.') LOG.error(msg) raise exception.MapRFSException(msg=msg) try: self._maprfs_util.maprfs_ls( os.path.join(self._base_volume_dir, '')) except exception.ProcessExecutionError: msg = _('Invalid "maprfs_base_volume_name". No such directory.') LOG.exception(msg) raise exception.MapRFSException(msg=msg)
def get_access_rules(self, server, share_name): raw_acls = self._get_acls(server, share_name) acls = [] for raw_acl in raw_acls: access_to = raw_acl["AccountName"] access_level = self._WIN_ACL_MAP[raw_acl["AccessRight"]] access_allow = raw_acl["AccessControlType"] == self._WIN_ACL_ALLOW if not access_allow: if access_to.lower() == "everyone" and len(raw_acls) == 1: LOG.debug("No access rules are set yet for share %s", share_name) else: LOG.warning( _LW("Found explicit deny ACE rule that was not " "created by Manila and will be ignored: %s"), raw_acl, ) continue if access_level == self._ACCESS_LEVEL_CUSTOM: LOG.warning(_LW("Found 'custom' ACE rule that will be ignored: %s"), raw_acl) continue acl = dict(access_to=access_to, access_level=access_level, access_type="user") acls.append(acl) return acls
def get_access_rules(self, server, share_name): raw_acls = self._get_acls(server, share_name) acls = [] for raw_acl in raw_acls: access_to = raw_acl['AccountName'] access_level = self._WIN_ACL_MAP[raw_acl['AccessRight']] access_allow = raw_acl["AccessControlType"] == self._WIN_ACL_ALLOW if not access_allow: if access_to.lower() == 'everyone' and len(raw_acls) == 1: LOG.debug("No access rules are set yet for share %s", share_name) else: LOG.warning( _LW("Found explicit deny ACE rule that was not " "created by Manila and will be ignored: %s"), raw_acl) continue if access_level == self._ACCESS_LEVEL_CUSTOM: LOG.warning( _LW("Found 'custom' ACE rule that will be ignored: %s"), raw_acl) continue acl = dict(access_to=access_to, access_level=access_level, access_type='user') acls.append(acl) return acls
def deny_access(self, share, access, share_server=None): """Deny access to share.""" share_proto = share['share_proto'] share_name = share['name'] share_url_type = self.helper._get_share_url_type(share_proto) share_client_type = self.helper._get_share_client_type(share_proto) access_type = access['access_type'] if share_proto == 'NFS' and access_type != 'ip': LOG.warning(_LW('Only IP access type is allowed for NFS shares.')) return elif share_proto == 'CIFS' and access_type != 'user': LOG.warning(_LW('Only USER access type is allowed for' ' CIFS shares.')) return access_to = access['access_to'] share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning(_LW('Can not get share. share_name: %s'), share_name) return access_id = self.helper._get_access_from_share(share['ID'], access_to, share_client_type) if not access_id: LOG.warning(_LW('Can not get access id from share. ' 'share_name: %s'), share_name) return self.helper._remove_access_from_share(access_id, share_client_type)
def _deny_access(self, share_name, access, share_proto): """Deny access to share.""" share_type = self._get_share_type(share_proto) share_client_type = self._get_share_client_type(share_proto) access_type = access['access_type'] if share_proto == 'NFS' and access_type != 'ip': LOG.warn(_LW('Only ip access type allowed.')) return if share_proto == 'CIFS' and access_type != 'user': LOG.warn(_LW('Only user access type allowed.')) return access_to = access['access_to'] share = self._get_share_by_name(share_name, share_type) if not share: LOG.warn(_LW('Can not get share. share_name: %s'), share_name) return access_id = self._get_access_from_share(share['ID'], access_to, share_client_type) if not access_id: LOG.warn(_LW('Can not get access id from share. share_name: %s'), share_name) return self._remove_access_from_share(access_id, share_client_type)
def call(self, method_name, user_parameters): parameters = {'retry': 'INFINITELY'} # Backend specific setting if user_parameters: parameters.update(user_parameters) call_body = {'jsonrpc': '2.0', 'method': method_name, 'params': parameters, 'id': six.text_type(self._id)} self.call_counter = 0 self._connection.connect() # prevents httplib timing issue while self.call_counter < CONNECTION_RETRIES: self.call_counter += 1 try: self._id += 1 call_body['id'] = six.text_type(self._id) LOG.debug("Posting to Quobyte backend: %s", jsonutils.dumps(call_body)) self._connection.request( "POST", self._url + '/', jsonutils.dumps(call_body), dict(Authorization=(self._credentials. get_authorization_header()))) response = self._connection.getresponse() self._throw_on_http_error(response) result = jsonutils.loads(response.read()) LOG.debug("Retrieved data from Quobyte backend: %s", result) return self._checked_for_application_error(result) except ssl.SSLError as e: # Generic catch because OpenSSL does not return # meaningful errors. if (not self._disabled_cert_verification and not self._require_cert_verify): LOG.warning(_LW( "Could not verify server certificate of " "API service against CA.")) self._connection.close() # Core HTTPSConnection does no certificate verification. self._connection = httplib.HTTPSConnection(self._netloc) self._disabled_cert_verification = True else: raise exception.QBException(_( "Client SSL subsystem returned error: %s") % e) except httplib.BadStatusLine as e: raise exception.QBException(_( "If SSL is enabled for the API service, the URL must" " start with 'https://' for the URL. Failed to parse" " status code from server response. Error was %s") % e) except (httplib.HTTPException, socket.error) as e: if self._fail_fast: raise exception.QBException(msg=six.text_type(e)) else: LOG.warning(_LW("Encountered error, retrying: %s"), six.text_type(e)) time.sleep(1) raise exception.QBException("Unable to connect to backend after " "%s retries" % six.text_type(CONNECTION_RETRIES))
def check_node(tt, nodename, required_attrs=None, optional_attrs=None, allowed_children=None, allow_pcdata=False): """Check static local constraints on a single node. The node must have the given name. The required attrs must be present, and the optional attrs may be. If allowed_children is not None, the node may have children of the given types. It can be [] for nodes that may not have any children. If it's None, it is assumed the children are validated in some other way. If allow_pcdata is true, then non-whitespace text children are allowed. (Whitespace text nodes are always allowed.) """ if not optional_attrs: optional_attrs = [] if not required_attrs: required_attrs = [] if name(tt) != nodename: LOG.warn(_LW("Expected node type %(expected)s, not %(actual)s."), {"expected": nodename, "actual": name(tt)}) # Check we have all the required attributes, and no unexpected ones tt_attrs = {} if attrs(tt) is not None: tt_attrs = attrs(tt).copy() for attr in required_attrs: if attr not in tt_attrs: LOG.warn( _LW("Expected %(attr)s attribute on %(node)s node," " but only have %(attrs)s."), {"attr": attr, "node": name(tt), "attrs": attrs(tt).keys()}, ) else: del tt_attrs[attr] for attr in optional_attrs: if attr in tt_attrs: del tt_attrs[attr] if len(tt_attrs.keys()) > 0: LOG.warn(_LW("Invalid extra attributes %s."), tt_attrs.keys()) if allowed_children is not None: for c in kids(tt): if name(c) not in allowed_children: LOG.warn( _LW("Unexpected node %(node)s under %(parent)s;" " wanted %(expected)s."), {"node": name(c), "parent": name(tt), "expected": allowed_children}, ) if not allow_pcdata: for c in tt[2]: if isinstance(c, types.StringTypes): if c.lstrip(" \t\n") != "": LOG.warn( _LW("Unexpected non-blank pcdata node %(node)s" " under %(parent)s."), {"node": repr(c), "parent": name(tt)}, )
def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) # NOTE(thingee): Backwards compat for the old extension loader path. # We can drop this post-grizzly in the H release. old_contrib_path = ('manila.api.openstack.share.contrib.' 'standard_extensions') new_contrib_path = 'manila.api.contrib.standard_extensions' if old_contrib_path in extensions: LOG.warning( _LW('osapi_share_extension is set to deprecated path: ' '%s.'), old_contrib_path) LOG.warning( _LW('Please set your flag or manila.conf settings for ' 'osapi_share_extension to: %s.'), new_contrib_path) extensions = [ e.replace(old_contrib_path, new_contrib_path) for e in extensions ] for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning( _LW('Failed to load extension %(ext_factory)s: ' '%(exc)s.'), { "ext_factory": ext_factory, "exc": exc })
def delete_share(self, share, share_server=None): """Delete share.""" share_name = share['name'] share_url_type = self.helper._get_share_url_type(share['share_proto']) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning(_LW('The share was not found. Share name:%s'), share_name) fsid = self.helper._get_fsid_by_name(share_name) if fsid: self.helper._delete_fs(fsid) return LOG.warning(_LW('The filesystem was not found.')) return share_id = share['ID'] share_fs_id = share['FSID'] if share_id: self.helper._delete_share_by_id(share_id, share_url_type) if share_fs_id: self.helper._delete_fs(share_fs_id) return share
def _load_extensions(self): """Load extensions specified on the command line.""" extensions = list(self.cls_list) # NOTE(thingee): Backwards compat for the old extension loader path. # We can drop this post-grizzly in the H release. old_contrib_path = ('manila.api.openstack.share.contrib.' 'standard_extensions') new_contrib_path = 'manila.api.contrib.standard_extensions' if old_contrib_path in extensions: LOG.warning(_LW('osapi_share_extension is set to deprecated path: ' '%s.'), old_contrib_path) LOG.warning(_LW('Please set your flag or manila.conf settings for ' 'osapi_share_extension to: %s.'), new_contrib_path) extensions = [e.replace(old_contrib_path, new_contrib_path) for e in extensions] for ext_factory in extensions: try: self.load_extension(ext_factory) except Exception as exc: LOG.warning(_LW('Failed to load extension %(ext_factory)s: ' '%(exc)s.'), {"ext_factory": ext_factory, "exc": exc})
def _deallocate_container(self, share_name, vdm_name, vdm_id=None): """Delete underneath objects of the share.""" # Delete mount point name = share_name path = '/' + name if vdm_id is None: vdm = self.get_vdm_by_name(vdm_name, allow_absence=True) vdm_id = vdm['id'] if vdm else None if vdm_id is not None: status, out = self._XMLAPI_helper.delete_mount_point( vdm_id, path, 'true') if constants.STATUS_OK != status: if self._XMLAPI_helper._is_mount_point_unexist_error(out): LOG.warn(_LW("Mount point %(path)s on %(vdm)s not found."), { 'path': path, 'vdm': vdm_name }) else: LOG.warn( _LW("Deleting mount point %(path)s on " "%(mover_name)s failed. Reason: %(err)s"), { 'path': path, 'mover_name': vdm_name, 'err': out }) else: LOG.warn( _LW("Failed to find the VDM. Try to " "delete the file system")) self._delete_filesystem(name)
def delete_snapshot(self, emc_share_driver, context, snapshot, share_server=None): """Remove share's snapshot.""" ckpt_name = snapshot['name'] status, ckpt = self._XMLAPI_helper.get_check_point_by_name(ckpt_name) if constants.STATUS_OK != status: LOG.warn(_LW("Check point not found. Reason: %s."), status) return if ckpt['id'] == '': LOG.warn( _LW("Snapshot: %(name)s not found. " "Skip the deletion.") % {'name': snapshot['name']}) return status, out = self._XMLAPI_helper.delete_check_point(ckpt['id']) if constants.STATUS_OK != status: message = (_("Could not delete check point. Reason: %s.") % out['info']) LOG.error(message) raise exception.EMCVnxXMLAPIError(err=message)
def _deallocate_container(self, share_name, vdm_name, vdm_id=None): """Delete underneath objects of the share.""" # Delete mount point name = share_name path = '/' + name if vdm_id is None: vdm = self._get_vdm_by_name(vdm_name, allow_absence=True) vdm_id = vdm['id'] if vdm else None if vdm_id is not None: status, out = self._XMLAPI_helper.delete_mount_point( vdm_id, path, 'true') if constants.STATUS_OK != status: if self._XMLAPI_helper.is_mount_point_nonexistent(out): LOG.warn(_LW("Mount point %(path)s on %(vdm)s not found."), {'path': path, 'vdm': vdm_name}) else: LOG.warn(_LW("Deleting mount point %(path)s on " "%(mover_name)s failed. Reason: %(err)s"), {'path': path, 'mover_name': vdm_name, 'err': out}) else: LOG.warn(_LW("Failed to find the VDM. Try to " "delete the file system")) self._delete_filesystem(name)
def _build_createfshare_kwargs(self, protocol, fpg, fstore, readonly, sharedir, extra_specs, comment): createfshare_kwargs = dict(fpg=fpg, fstore=fstore, sharedir=sharedir, comment=comment) if 'hp3par_flash_cache' in extra_specs: msg = _LW("hp3par_flash_cache is deprecated. Use " "hpe3par_flash_cache instead.") LOG.warning(msg) if protocol == 'nfs': createfshare_kwargs['clientip'] = '127.0.0.1' options = self._get_nfs_options(extra_specs, readonly) createfshare_kwargs['options'] = options else: createfshare_kwargs['allowip'] = '127.0.0.1' smb_opts = (ACCESS_BASED_ENUM, CONTINUOUS_AVAIL, CACHE) for smb_opt in smb_opts: opt_value = extra_specs.get('hpe3par:smb_%s' % smb_opt) if opt_value is None: opt_value = extra_specs.get('hp3par:smb_%s' % smb_opt) if opt_value: msg = _LW("hp3par:smb_* is deprecated. Use " "hpe3par:smb_* instead.") LOG.warning(msg) if opt_value: opt_key = SMB_EXTRA_SPECS_MAP[smb_opt] createfshare_kwargs[opt_key] = opt_value return createfshare_kwargs
def one_child(tt, acceptable): """Parse children of a node with exactly one child node. PCData is ignored. """ k = kids(tt) if len(k) != 1: LOG.warn(_LW('Expected just one %(item)s, got %(more)s.'), { 'item': acceptable, 'more': " ".join([t[0] for t in k]) }) child = k[0] if name(child) not in acceptable: LOG.warn( _LW('Expected one of %(item)s, got %(child)s ' 'under %(parent)s.'), { 'item': acceptable, 'child': name(child), 'parent': name(tt) }) return parse_any(child)
def get_access_rules(self, server, share_name): raw_acls = self._get_acls(server, share_name) acls = [] for raw_acl in raw_acls: access_to = raw_acl["AccountName"] access_right = raw_acl["AccessRight"] access_level = self._WIN_ACL_MAP[access_right] access_allow = raw_acl["AccessControlType"] == self._WIN_ACL_ALLOW if not access_allow: if access_to.lower() == "everyone" and len(raw_acls) == 1: LOG.debug("No access rules are set yet for share %s", share_name) else: LOG.warning( _LW("Found explicit deny ACE rule that was not " "created by Manila and will be ignored: %s"), raw_acl, ) continue if access_level == self._ACCESS_LEVEL_CUSTOM: LOG.warning(_LW("Found 'custom' ACE rule that will be ignored: %s"), raw_acl) continue elif access_right == self._WIN_ACCESS_RIGHT_FULL: LOG.warning( _LW( "Account '%(access_to)s' was given full access " "right on share %(share_name)s. Manila only " "grants 'change' access." ), {"access_to": access_to, "share_name": share_name}, ) acl = {"access_to": access_to, "access_level": access_level, "access_type": "user"} acls.append(acl) return acls
def deny_access(self, share, access, share_server=None): """Deny access to share.""" share_proto = share["share_proto"] share_name = share["name"] share_url_type = self.helper._get_share_url_type(share_proto) share_client_type = self.helper._get_share_client_type(share_proto) access_type = access["access_type"] if share_proto == "NFS" and access_type != "ip": LOG.warning(_LW("Only IP access type is allowed for NFS shares.")) return elif share_proto == "CIFS" and access_type != "user": LOG.warning(_LW("Only USER access type is allowed for" " CIFS shares.")) return access_to = access["access_to"] share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning(_LW("Can not get share. share_name: %s"), share_name) return access_id = self.helper._get_access_from_share(share["ID"], access_to, share_client_type) if not access_id: LOG.warning(_LW("Can not get access id from share. " "share_name: %s"), share_name) return self.helper._remove_access_from_share(access_id, share_client_type)
def teardown_server(self, server_details, security_services=None): """Teardown share server.""" vserver = server_details.get("vserver_name") if server_details else None if not vserver: LOG.warning( _LW( "Vserver not specified for share server being " "deleted. Deletion of share server record will " "proceed anyway." ) ) return elif not self._client.vserver_exists(vserver): LOG.warning( _LW( "Could not find Vserver for share server being " "deleted: %s. Deletion of share server " "record will proceed anyway." ), vserver, ) return self._delete_vserver(vserver, security_services=security_services)
def delete_share(self, share, share_server=None): """Delete share.""" share_name = share["name"] share_url_type = self.helper._get_share_url_type(share["share_proto"]) share = self.helper._get_share_by_name(share_name, share_url_type) if not share: LOG.warning(_LW("The share was not found. Share name:%s"), share_name) fsid = self.helper._get_fsid_by_name(share_name) if fsid: self.helper._delete_fs(fsid) return LOG.warning(_LW("The filesystem was not found.")) return share_id = share["ID"] share_fs_id = share["FSID"] if share_id: self.helper._delete_share_by_id(share_id, share_url_type) if share_fs_id: self.helper._delete_fs(share_fs_id) return share
def root_app_factory(loader, global_conf, **local_conf): if CONF.enable_v1_api: LOG.warning(_LW('The config option enable_v1_api is deprecated, is ' 'not used, and will be removed in a future release.')) if CONF.enable_v2_api: LOG.warning(_LW('The config option enable_v2_api is deprecated, is ' 'not used, and will be removed in a future release.')) return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
def root_app_factory(loader, global_conf, **local_conf): if CONF.enable_v1_api: LOG.warning( _LW('The config option enable_v1_api is deprecated, is ' 'not used, and will be removed in a future release.')) if CONF.enable_v2_api: LOG.warning( _LW('The config option enable_v2_api is deprecated, is ' 'not used, and will be removed in a future release.')) return paste.urlmap.urlmap_factory(loader, global_conf, **local_conf)
def _check_filter_function(self, stats): """Checks if a share passes a host's filter function. Returns a tuple in the format (filter_passing, filter_invalid). Both values are booleans. """ host_stats = stats['host_stats'] extra_specs = stats['extra_specs'] # Check that the share types match if extra_specs is None or 'share_backend_name' not in extra_specs: LOG.warning( _LW("No 'share_backend_name' key in extra_specs. " "Skipping share backend name check.")) elif (extra_specs['share_backend_name'] != host_stats['share_backend_name']): LOG.warning( _LW("Share backend names do not match: '%(target)s' " "vs '%(current)s' :: Skipping."), { 'target': extra_specs['share_backend_name'], 'current': host_stats['share_backend_name'] }) return False if stats['filter_function'] is None: LOG.warning(_LW("Filter function not set :: passing host.")) return True try: filter_result = self._run_evaluator(stats['filter_function'], stats) except Exception as ex: # Warn the admin for now that there is an error in the # filter function. LOG.warning( _LW("Error in filtering function " "'%(function)s' : '%(error)s' :: failing host."), { 'function': stats['filter_function'], 'error': ex, }) return False msg = _LI("Filter function result for host %(host)s: %(result)s.") args = { 'host': stats['host_stats']['host'], 'result': six.text_type(filter_result) } LOG.info(msg, args) return filter_result
def _check_share_access(self, share_proto, access_type): if share_proto == 'CIFS' and access_type != 'user': reason = _LW('Only USER access type is allowed for ' 'CIFS shares.') LOG.warning(reason) raise exception.InvalidShareAccess(reason=reason) elif share_proto == 'NFS' and access_type not in ('ip', 'user'): reason = _LW('Only IP or USER access types are allowed for ' 'NFS shares.') LOG.warning(reason) raise exception.InvalidShareAccess(reason=reason) elif share_proto not in ('NFS', 'CIFS'): reason = _LW('Unsupported protocol \"%s\" specified for ' 'access rule.') % share_proto raise exception.InvalidShareAccess(reason=reason)
def cifs_deny_access(self, name, user, is_snapshot=False): command = ['cifs-saa', 'delete', '--target-label', self.fs_name, name, user] entity_type = "share" if is_snapshot: entity_type = "snapshot" try: self._execute(command) except processutils.ProcessExecutionError as e: if ('not listed as a user' in e.stderr or 'Could not delete user/group' in e.stderr): LOG.warning(_LW('User %(user)s already not allowed to access ' '%(entity_type)s %(name)s.'), { 'entity_type': entity_type, 'user': user, 'name': name }) else: msg = _("Could not delete access of user %(user)s to " "%(entity_type)s %(name)s.") % { 'user': user, 'name': name, 'entity_type': entity_type, } LOG.exception(msg) raise exception.HNASBackendException(msg=msg)
def _deny_access_via_manager(self, gluster_mgr, context, share, access, share_server=None): """Deny access to a share that's using cert based auth. Remove the SSL CN (Common Name) that's allowed to access the server. """ if access['access_type'] != ACCESS_TYPE_CERT: raise exception.InvalidShareAccess(_("Only 'cert' access type " "allowed for access " "removal.")) ssl_allow_opt = gluster_mgr.get_vol_option(AUTH_SSL_ALLOW) ssl_allow = re.split('[ ,]', ssl_allow_opt) access_to = access['access_to'] if access_to not in ssl_allow: LOG.warning(_LW("Access to %(share)s at %(export)s is already " "denied for %(access_to)s. GlusterFS volume " "options might have been changed externally."), {'share': share['id'], 'export': gluster_mgr.qualified, 'access_to': access_to}) return ssl_allow.remove(access_to) ssl_allow_opt = ','.join(ssl_allow) gluster_mgr.set_vol_option(AUTH_SSL_ALLOW, ssl_allow_opt) dynauth = gluster_mgr.get_vol_option(DYNAMIC_AUTH) or 'off' # TODO(csaba): boolean option processing shoud be done in common if dynauth.lower() not in ('on', '1', 'true', 'yes', 'enable'): common._restart_gluster_vol(gluster_mgr)
def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plug in the interface.""" if not bridge: bridge = self.conf.ovs_integration_bridge self.check_bridge_exists(bridge) ip = ip_lib.IPWrapper() ns_dev = ip.device(device_name) if not ip_lib.device_exists(device_name, namespace=namespace): tap_name = self._get_tap_name(device_name) self._ovs_add_port(bridge, tap_name, port_id, mac_address) ns_dev.link.set_address(mac_address) # Add an interface created by ovs to the namespace. if namespace: namespace_obj = ip.ensure_namespace(namespace) namespace_obj.add_device_to_namespace(ns_dev) else: LOG.warn(_LW("Device %s already exists"), device_name) ns_dev.link.set_up()
def delete_cgsnapshot(self, context, snap_dict, share_server=None): """Deletes a consistency group snapshot.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning(_LW("Could not determine share server for CG snapshot " "being deleted: %(snap)s. Deletion of CG snapshot " "record will proceed anyway. Error: %(error)s"), {'snap': snap_dict['id'], 'error': error}) return None, None share_names = [self._get_valid_share_name(member['share_id']) for member in snap_dict.get('cgsnapshot_members', [])] snapshot_name = self._get_valid_cg_snapshot_name(snap_dict['id']) for share_name in share_names: try: self._handle_busy_snapshot(vserver_client, share_name, snapshot_name) except exception.SnapshotNotFound: LOG.info(_LI("Snapshot %(snap)s does not exist for share " "%(share)s."), {'snap': snapshot_name, 'share': share_name}) continue LOG.debug("Deleting snapshot %(snap)s for share %(share)s.", {'snap': snapshot_name, 'share': share_name}) vserver_client.delete_snapshot(share_name, snapshot_name) return None, None
def _update_host_state_map(self, context): # Get resource usage across the available share nodes: topic = CONF.share_topic share_services = db.service_get_all_by_topic(context, topic) for service in share_services: host = service['host'] # Warn about down services and remove them from host_state_map if not utils.service_is_up(service) or service['disabled']: LOG.warning(_LW("Share service is down. (host: %s).") % host) if self.host_state_map.pop(host, None): LOG.info(_LI("Removing non-active host: %s from " "scheduler cache.") % host) continue # Create and register host_state if not in host_state_map capabilities = self.service_states.get(host, None) host_state = self.host_state_map.get(host) if not host_state: host_state = self.host_state_cls( host, capabilities=capabilities, service=dict(six.iteritems(service))) self.host_state_map[host] = host_state # Update capabilities and attributes in host_state host_state.update_from_share_capability( capabilities, service=dict(six.iteritems(service)))
def kill(self): """Destroy the service object in the datastore.""" self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: LOG.warn(_LW('Service killed that has no database entry'))
def execute_with_retry(self, *cmd, **kwargs): """Retry wrapper over common shell interface.""" try: return self.execute(*cmd, **kwargs) except exception.ProcessExecutionError as e: LOG.warning(_LW("Failed to run command, got error: %s"), e) raise
def deny_access(self, context, share, access, share_server=None): """Deny access to a share. Currently only IP based access control is supported. """ if access['access_type'] != 'ip': LOG.warn(_LW('Only ip access type allowed.')) return httpclient = httplib2.Http(disable_ssl_certificate_validation=True, timeout=None) sop_share_id = self._get_share_id_by_name(httpclient, share['id']) payload = { 'action': 'delete-access-rule', 'name': '%s-%s' % (share['id'], access['access_to']), } sopuri = '/shares/' + sop_share_id headers = dict(Authorization=self.get_sop_auth_header()) uri = self.sop_target + '/sopapi' + sopuri resp_headers, resp_content = httpclient.request( uri, 'POST', body=json.dumps(payload), headers=headers) resp_code = int(resp_headers['status']) if resp_code == 202: job_loc = resp_headers['location'] self._wait_for_job_completion(httpclient, job_loc) else: raise exception.SopAPIError(err=_('received error: %s') % resp_headers['status'])
def __init__(self, user_id, project_id, is_admin=None, read_deleted="no", roles=None, remote_address=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, quota_class=None, service_catalog=None, **kwargs): """Initialize RequestContext. :param read_deleted: 'no' indicates deleted records are hidden, 'yes' indicates deleted records are visible, 'only' indicates that *only* deleted records are visible. :param overwrite: Set to False to ensure that the greenthread local copy of the index is not overwritten. :param kwargs: Extra arguments that might be present, but we ignore because they possibly came in from older rpc messages. """ if kwargs: LOG.warn(_LW('Arguments dropped when creating context: %s'), str(kwargs)) self.user_id = user_id self.project_id = project_id self.roles = roles or [] self.is_admin = is_admin if self.is_admin is None: self.is_admin = policy.check_is_admin(self.roles) elif self.is_admin and 'admin' not in self.roles: self.roles.append('admin') self.read_deleted = read_deleted self.remote_address = remote_address if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_strtime(timestamp) self.timestamp = timestamp if service_catalog: self.service_catalog = [ s for s in service_catalog if s.get('type') in ('compute', 'volume') ] else: self.service_catalog = [] if not request_id: request_id = common_context.generate_request_id() self.request_id = request_id self.auth_token = auth_token self.quota_class = quota_class if overwrite or not hasattr(local.store, 'context'): self.update_store()
def _cifs_deny_access(self, share, access, share_server): """Deny access to CIFS share.""" vdm_name = self._get_share_server_name(share_server) share_name = share['id'] if access['access_type'] != 'user': LOG.warning(_LW("Only user access type allowed for CIFS share.")) return user_name = access['access_to'] access_level = access['access_level'] if access_level == const.ACCESS_LEVEL_RW: cifs_access = constants.CIFS_ACL_FULLCONTROL else: cifs_access = constants.CIFS_ACL_READ # Check if CIFS server exists. server_name = vdm_name status, server = self._get_context('CIFSServer').get( server_name, vdm_name) if status != constants.STATUS_OK: message = (_("CIFS server %s not found.") % server_name) LOG.error(message) raise exception.EMCVmaxXMLAPIError(err=message) self._get_context('CIFSShare').deny_share_access(vdm_name, share_name, user_name, server['domain'], access=cifs_access)
def delete_snapshot(self, context, snapshot, share_server=None): """Delete a snapshot.""" LOG.debug('Deleting a snapshot: %(shr_name)s@%(snap_name)s.', { 'shr_name': snapshot['share_name'], 'snap_name': snapshot['name'] }) url = ('storage/pools/%(pool)s/filesystems/%(fs)s/snapshots/' '%(snap)s') % { 'pool': self.pool_name, 'fs': PATH_DELIMITER.join( (self.fs_prefix, snapshot['share_name'])), 'snap': snapshot['name'] } try: self.nef.delete(url) except exception.NexentaException as e: if e.kwargs['code'] == 'ENOENT': LOG.warning( _LW('snapshot %(name)s not found, response: %(msg)s'), { 'name': snapshot['name'], 'msg': e.msg }) else: raise
def cleanup_new_instance(self, new_instance): try: self.delete_instance_and_wait(new_instance) except Exception: LOG.warning(_LW("Failed to cleanup new instance during generic" " migration for share %s."), self.share['id'])
def _mount_device_with_lock(): mount_path = self._get_mount_path(share) log_data = { 'dev': volume['mountpoint'], 'path': mount_path, 'server': server_details['instance_id'], } try: if not self._is_device_mounted(mount_path, server_details, volume): LOG.debug( "Mounting '%(dev)s' to path '%(path)s' on " "server '%(server)s'.", log_data) mount_cmd = ['sudo', 'mkdir', '-p', mount_path, '&&'] mount_cmd.extend( ['sudo', 'mount', volume['mountpoint'], mount_path]) mount_cmd.extend( ['&&', 'sudo', 'chmod', '777', mount_path]) self._ssh_exec(server_details, mount_cmd) # Add mount permanently self._sync_mount_temp_and_perm_files(server_details) else: LOG.warning( _LW("Mount point '%(path)s' already exists on " "server '%(server)s'."), log_data) except exception.ProcessExecutionError as e: raise exception.ShareBackendException(msg=six.text_type(e))
def _deny_access(self, context, share, access, share_server=None): if access["access_type"] != CEPHX_ACCESS_TYPE: LOG.warning(_LW("Invalid access type '%(type)s', " "ignoring in deny."), {"type": access["access_type"]}) return self.volume_client.deauthorize(self._share_path(share), access["access_to"]) self.volume_client.evict(access["access_to"], volume_path=self._share_path(share))
def allow_migration_access(self, access): allowed = False access_ref = None try: access_ref = self.api.allow_access(self.context, self.share, access['access_type'], access['access_to']) allowed = True except exception.ShareAccessExists: LOG.warning( _LW("Access rule already allowed. " "Access %(access_to)s - Share " "%(share_id)s") % { 'access_to': access['access_to'], 'share_id': self.share['id'] }) access_list = self.api.access_get_all(self.context, self.share) for access_item in access_list: if access_item['access_to'] == access['access_to']: access_ref = access_item if access_ref and allowed: self.wait_for_access_update(self.share.instance) return access_ref
def do_setup(self, context): """Any initialization the generic driver does while starting.""" super(GenericShareDriver, self).do_setup(context) self.compute_api = compute.API() self.volume_api = volume.API() self._setup_helpers() common_sv_available = False share_server = None sv_fetch_retry_interval = 5 while not (common_sv_available or self.driver_handles_share_servers): try: # Verify availability of common server share_server = ( self.service_instance_manager.get_common_server()) common_sv_available = self._is_share_server_active( context, share_server) except Exception as ex: LOG.error(ex) if not common_sv_available: time.sleep(sv_fetch_retry_interval) LOG.warning( _LW("Waiting for the common service VM to become " "available. " "Driver is currently uninitialized. " "Share server: %(share_server)s " "Retry interval: %(retry_interval)s"), dict(share_server=share_server, retry_interval=sv_fetch_retry_interval))
def do_detach(): attached_volumes = [ vol.id for vol in self.compute_api.instance_volumes_list( self.admin_context, instance_id) ] try: volume = self._get_volume(context, share['id']) except exception.VolumeNotFound: LOG.warning( _LW("Volume not found for share %s. " "Possibly already deleted."), share['id']) volume = None if volume and volume['id'] in attached_volumes: self.compute_api.instance_volume_detach( self.admin_context, instance_id, volume['id']) t = time.time() while time.time() - t < self.configuration.max_time_to_attach: volume = self.volume_api.get(context, volume['id']) if volume['status'] in (const.STATUS_AVAILABLE, const.STATUS_ERROR): break time.sleep(1) else: err_msg = { 'volume_id': volume['id'], 'max_time': self.configuration.max_time_to_attach } raise exception.ManilaException( _('Volume %(volume_id)s has not been detached in ' '%(max_time)ss. Giving up.') % err_msg)
def cleanup_access_rules(self, share_instance, share_server): try: self.revert_access_rules(share_instance, share_server) except Exception: LOG.warning(_LW("Failed to cleanup access rules during generic" " migration for share %s."), self.share['id'])
def _mount_device_with_lock(): mount_path = self._get_mount_path(share) log_data = { 'dev': volume['mountpoint'], 'path': mount_path, 'server': server_details['instance_id'], } try: if not self._is_device_mounted(mount_path, server_details, volume): LOG.debug("Mounting '%(dev)s' to path '%(path)s' on " "server '%(server)s'.", log_data) mount_cmd = ['sudo mkdir -p', mount_path, '&&'] mount_cmd.extend(['sudo mount', volume['mountpoint'], mount_path]) mount_cmd.extend(['&& sudo chmod 777', mount_path]) self._ssh_exec(server_details, mount_cmd) # Add mount permanently self._sync_mount_temp_and_perm_files(server_details) else: LOG.warning(_LW("Mount point '%(path)s' already exists on " "server '%(server)s'."), log_data) except exception.ProcessExecutionError as e: raise exception.ShareBackendException(msg=six.text_type(e))
def delete_share(self, context, share, share_server=None): """Is called to remove a share.""" pool_name = self.private_storage.get(share['id'], 'pool_name') dataset_name = self.private_storage.get(share['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(share) out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] != dataset_name: continue # Delete dataset's snapshots first out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name) snapshots = self.parse_zfs_answer(out) full_snapshot_prefix = ( dataset_name + '@' + self.replica_snapshot_prefix) for snap in snapshots: if full_snapshot_prefix in snap['NAME']: self._delete_dataset_or_snapshot_with_retry(snap['NAME']) self._get_share_helper( share['share_proto']).remove_exports(dataset_name) self._delete_dataset_or_snapshot_with_retry(dataset_name) break else: LOG.warning( _LW("Share with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted."), {'id': share['id'], 'name': dataset_name}) self.private_storage.delete(share['id'])
def delete_snapshot(self, context, snapshot, share_server=None): """Deletes a snapshot of a share.""" try: vserver, vserver_client = self._get_vserver( share_server=share_server) except (exception.InvalidInput, exception.VserverNotSpecified, exception.VserverNotFound) as error: LOG.warning(_LW("Could not determine share server for snapshot " "being deleted: %(snap)s. Deletion of snapshot " "record will proceed anyway. Error: %(error)s"), {'snap': snapshot['id'], 'error': error}) return share_name = self._get_valid_share_name(snapshot['share_id']) snapshot_name = self._get_valid_snapshot_name(snapshot['id']) try: self._handle_busy_snapshot(vserver_client, share_name, snapshot_name) except exception.SnapshotNotFound: LOG.info(_LI("Snapshot %s does not exist."), snapshot_name) return LOG.debug('Deleting snapshot %(snap)s for share %(share)s.', {'snap': snapshot_name, 'share': share_name}) vserver_client.delete_snapshot(share_name, snapshot_name)
def delete_replica(self, context, replica_list, replica_snapshots, replica, share_server=None): """Deletes a replica. This is called on the destination backend.""" pool_name = self.private_storage.get(replica['id'], 'pool_name') dataset_name = self.private_storage.get(replica['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(replica) # Delete dataset's snapshots first out, err = self.zfs('list', '-r', '-t', 'snapshot', pool_name) data = self.parse_zfs_answer(out) for datum in data: if dataset_name in datum['NAME']: self._delete_dataset_or_snapshot_with_retry(datum['NAME']) # Now we delete dataset itself out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: self._get_share_helper( replica['share_proto']).remove_exports(dataset_name) self._delete_dataset_or_snapshot_with_retry(dataset_name) break else: LOG.warning( _LW("Share replica with '%(id)s' ID and '%(name)s' NAME is " "absent on backend. Nothing has been deleted."), {'id': replica['id'], 'name': dataset_name}) self.private_storage.delete(replica['id'])
def delete_share(self, context, share, share_server=None): """Delete a share on the GlusterFS volume. 1 Manila share = 1 GlusterFS volume. Put the gluster volume back in the available list. """ exp_locn = share.get("export_location", None) try: # Get the gluster address associated with the export. gmgr = self.gluster_used_vols_dict[exp_locn] except KeyError: msg = ( _LW("Invalid request. Ignoring delete_share request for " "share %(share_id)s"), {"share_id": share["id"]}, ) LOG.warn(msg) return try: self._wipe_gluster_vol(gmgr) self._push_gluster_vol(exp_locn) except exception.GlusterfsException: msg = (_LE("Error during delete_share request for " "share %(share_id)s"), {"share_id": share["id"]}) LOG.error(msg) raise
def remove_storage(self, share): to_remove = self._get_lv_device(share) try: self._execute("umount", to_remove, run_as_root=True) except exception.ProcessExecutionError as e: LOG.warning(_LW("Failed to umount helper directory %s."), to_remove) LOG.error(e) # (aovchinnikov): bug 1621784 manifests itself in jamming logical # volumes, so try removing once and issue warning until it is fixed. try: self._execute("lvremove", "-f", "--autobackup", "n", to_remove, run_as_root=True) except exception.ProcessExecutionError as e: LOG.warning(_LW("Failed to remove logical volume %s.") % to_remove) LOG.error(e)
def _allow_access_via_manager(self, gluster_mgr, context, share, access, share_server=None): """Allow access to a share using certs. Add the SSL CN (Common Name) that's allowed to access the server. """ if access['access_type'] != ACCESS_TYPE_CERT: raise exception.InvalidShareAccess(_("Only 'cert' access type " "allowed")) ssl_allow_opt = gluster_mgr.get_vol_option(AUTH_SSL_ALLOW) # wrt. GlusterFS' parsing of auth.ssl-allow, please see code from # https://github.com/gluster/glusterfs/blob/v3.6.2/ # xlators/protocol/auth/login/src/login.c#L80 # until end of gf_auth() function ssl_allow = re.split('[ ,]', ssl_allow_opt) access_to = access['access_to'] if access_to in ssl_allow: LOG.warning(_LW("Access to %(share)s at %(export)s is already " "granted for %(access_to)s. GlusterFS volume " "options might have been changed externally."), {'share': share['id'], 'export': gluster_mgr.qualified, 'access_to': access_to}) return ssl_allow.append(access_to) ssl_allow_opt = ','.join(ssl_allow) gluster_mgr.set_vol_option(AUTH_SSL_ALLOW, ssl_allow_opt)
def plug(self, device_name, port_id, mac_address, bridge=None, namespace=None, prefix=None): """Plugin the interface.""" ip = ip_lib.IPWrapper() if prefix: tap_name = device_name.replace(prefix, 'tap') else: tap_name = device_name.replace(self.DEV_NAME_PREFIX, 'tap') if not ip_lib.device_exists(device_name, namespace=namespace): # Create ns_veth in a namespace if one is configured. root_veth, ns_veth = ip.add_veth(tap_name, device_name, namespace2=namespace) ns_veth.link.set_address(mac_address) else: ns_veth = ip.device(device_name) root_veth = ip.device(tap_name) LOG.warn(_LW("Device %s already exists"), device_name) root_veth.link.set_up() ns_veth.link.set_up()
def __init__(self, share_driver=None, service_name=None, *args, **kwargs): """Load the driver from args, or from flags.""" self.configuration = manila.share.configuration.Configuration( share_manager_opts, config_group=service_name) self._verify_unused_share_server_cleanup_interval() super(ShareManager, self).__init__(service_name='share', *args, **kwargs) if not share_driver: share_driver = self.configuration.share_driver if share_driver in MAPPING: msg_args = {'old': share_driver, 'new': MAPPING[share_driver]} LOG.warning(_LW("Driver path %(old)s is deprecated, update your " "configuration to the new path %(new)s"), msg_args) share_driver = MAPPING[share_driver] ctxt = context.get_admin_context() private_storage = drivers_private_data.DriverPrivateData( context=ctxt, backend_host=self.host, config_group=self.configuration.config_group ) self.driver = importutils.import_object( share_driver, private_storage=private_storage, configuration=self.configuration )
def host_passes(self, host_state, filter_properties): """Return True if host has sufficient capacity.""" volume_size = filter_properties.get('size') if host_state.free_capacity_gb is None: # Fail Safe LOG.error(_LE("Free capacity not set: " "volume node info collection broken.")) return False free_space = host_state.free_capacity_gb if free_space == 'infinite' or free_space == 'unknown': # NOTE(zhiteng) for those back-ends cannot report actual # available capacity, we assume it is able to serve the # request. Even if it was not, the retry mechanism is # able to handle the failure by rescheduling return True reserved = float(host_state.reserved_percentage) / 100 free = math.floor(free_space * (1 - reserved)) if free < volume_size: LOG.warning(_LW("Insufficient free space for volume creation " "(requested / avail): " "%(requested)s/%(available)s"), {'requested': volume_size, 'available': free}) return free >= volume_size