def NodeUnstageVolume(self, request, context): Utils.validate_params_exists(request, ['volume_id', 'staging_target_path']) reqJson = MessageToJson(request) self.logger.debug( 'NodeUnstageVolume called with request: {}'.format(reqJson)) volume_id = request.volume_id staging_target_path = request.staging_target_path nvmesh_volume_name = volume_id if os.path.exists(staging_target_path): FileSystemManager.umount(target=staging_target_path) if os.path.isfile(staging_target_path): self.logger.debug( 'NodeUnstageVolume removing stage bind file: {}'.format( staging_target_path)) os.remove(staging_target_path) elif os.path.isdir(staging_target_path): self.logger.debug( 'NodeUnstageVolume removing stage dir: {}'.format( staging_target_path)) FileSystemManager.remove_dir(staging_target_path) else: self.logger.warning( 'NodeUnstageVolume - mount path {} not found.'.format( staging_target_path)) Utils.nvmesh_detach_volume(nvmesh_volume_name) return NodeUnstageVolumeResponse()
def ControllerUnpublishVolume(self, request, context): Utils.validate_params_exists(request, ['node_id', 'volume_id']) nvmesh_vol_name = request.volume_id self._validate_volume_exists(nvmesh_vol_name) self._validate_node_exists(request.node_id) return ControllerUnpublishVolumeResponse()
def NodeStageVolume(self, request, context): Utils.validate_params_exists( request, ['volume_id', 'staging_target_path', 'volume_capability']) volume_id = request.volume_id staging_target_path = request.staging_target_path volume_capability = request.volume_capability secrets = request.secrets publish_context = request.publish_context volume_context = request.volume_context reqJson = MessageToJson(request) self.logger.debug( 'NodeStageVolume called with request: {}'.format(reqJson)) access_mode = volume_capability.access_mode.mode access_type = self._get_block_or_mount_volume(request) nvmesh_volume_name = volume_id block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) # run nvmesh attach locally requested_nvmesh_access_mode = Consts.AccessMode.to_nvmesh(access_mode) Utils.nvmesh_attach_volume(nvmesh_volume_name, requested_nvmesh_access_mode) Utils.wait_for_volume_io_enabled(nvmesh_volume_name) if access_type == Consts.VolumeAccessType.MOUNT: mount_request = volume_capability.mount self.logger.info( 'Requested Mounted FileSystem Volume with fs_type={}'.format( mount_request.fs_type)) fs_type = mount_request.fs_type or 'ext4' mount_flags = [] if mount_request.mount_flags: for flag in mount_request.mount_flags.split(' '): mount_flags.append(flag) FileSystemManager.format_block_device(block_device_path, fs_type) if FileSystemManager.is_mounted(staging_target_path): self.logger.warning( 'path {} is already mounted'.format(staging_target_path)) FileSystemManager.mount(source=block_device_path, target=staging_target_path) elif access_type == Consts.VolumeAccessType.BLOCK: self.logger.info('Requested Block Volume') # We do not mount here, NodePublishVolume will mount directly from the block device to the publish_path # This is because Kubernetes automatically creates a directory in the staging_path else: self.logger.Info('Unknown AccessType {}'.format(access_type)) return NodeStageVolumeResponse()
def ValidateVolumeCapabilities(self, request, context): Utils.validate_params_exists(request, ['volume_id', 'volume_capabilities']) nvmesh_vol_name = request.volume_id #UNUSED - capabilities = request.volume_capabilities # always return True confirmed = ValidateVolumeCapabilitiesResponse.Confirmed( volume_capabilities=request.volume_capabilities) return ValidateVolumeCapabilitiesResponse(confirmed=confirmed)
def NodePublishVolume(self, request, context): # NodePublishVolume: This method is called to mount the volume from staging to target path. Utils.validate_params_exists(request, ['volume_id', 'target_path']) volume_id = request.volume_id nvmesh_volume_name = volume_id staging_target_path = request.staging_target_path publish_path = request.target_path volume_capability = request.volume_capability access_mode = volume_capability.access_mode.mode readonly = request.readonly access_type = self._get_block_or_mount_volume(request) block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) reqJson = MessageToJson(request) self.logger.debug( 'NodePublishVolume called with request: {}'.format(reqJson)) if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name): raise DriverError( StatusCode.NOT_FOUND, 'nvmesh volume {} was not found under /dev/nvmesh/'.format( nvmesh_volume_name)) flags = [] # K8s Bug Workaround: readonly flag is not sent to CSI, so we try to also infer from the AccessMode if readonly or access_mode == Consts.AccessMode.MULTI_NODE_READER_ONLY: flags.append('-o ro') if access_type == Consts.VolumeAccessType.BLOCK: # create an empty file for bind mount with open(publish_path, 'w'): pass # bind directly from block device to publish_path self.logger.debug( 'NodePublishVolume trying to bind mount as block device {} to {}' .format(block_device_path, publish_path)) FileSystemManager.bind_mount(source=block_device_path, target=publish_path, flags=flags) else: self.logger.debug( 'NodePublishVolume trying to bind mount {} to {}'.format( staging_target_path, publish_path)) FileSystemManager.bind_mount(source=staging_target_path, target=publish_path, flags=flags) return NodePublishVolumeResponse()
def NodeUnpublishVolume(self, request, context): Utils.validate_params_exists(request, ['volume_id', 'target_path']) target_path = request.target_path reqJson = MessageToJson(request) self.logger.debug( 'NodeUnpublishVolume called with request: {}'.format(reqJson)) if not os.path.exists(target_path): raise DriverError(StatusCode.NOT_FOUND, 'mount path {} not found'.format(target_path)) if not FileSystemManager.is_mounted(mount_path=target_path): self.logger.debug( 'NodeUnpublishVolume: {} is already not mounted'.format( target_path)) else: FileSystemManager.umount(target=target_path) block_device_publish_path = target_path + '/mount' if os.path.isfile(block_device_publish_path): self.logger.debug( 'NodeUnpublishVolume removing publish bind file: {}'.format( block_device_publish_path)) os.remove(block_device_publish_path) if os.path.isfile(block_device_publish_path): raise DriverError(StatusCode.INTERNAL, 'node-driver unable to delete publish path') if os.path.isdir(target_path): self.logger.debug( 'NodeUnpublishVolume removing publish dir: {}'.format( target_path)) FileSystemManager.remove_dir(target_path) if os.path.isdir(target_path): raise DriverError( StatusCode.INTERNAL, 'node-driver unable to delete publish directory') elif os.path.isfile(target_path): self.logger.debug( 'NodeUnpublishVolume removing publish file: {}'.format( target_path)) os.remove(target_path) self.logger.debug( 'NodeUnpublishVolume finished successfully for request: {}'.format( reqJson)) return NodeUnpublishVolumeResponse()
def NodeStageVolume(self, request, context): Utils.validate_params_exists( request, ['volume_id', 'staging_target_path', 'volume_capability']) zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) staging_target_path = request.staging_target_path volume_capability = request.volume_capability secrets = request.secrets publish_context = request.publish_context volume_context = request.volume_context reqJson = MessageToJson(request) self.logger.debug( 'NodeStageVolume called with request: {}'.format(reqJson)) access_mode = volume_capability.access_mode.mode access_type = self._get_block_or_mount_volume(request) block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name): # run nvmesh attach locally requested_nvmesh_access_mode = Consts.AccessMode.to_nvmesh( access_mode) Utils.nvmesh_attach_volume(nvmesh_volume_name, requested_nvmesh_access_mode) try: Utils.wait_for_volume_io_enabled(nvmesh_volume_name) if access_type == Consts.VolumeAccessType.MOUNT: mount_request = volume_capability.mount self.logger.info( 'Requested Mounted FileSystem Volume with fs_type={}'. format(mount_request.fs_type)) fs_type = mount_request.fs_type or Consts.FSType.EXT4 mount_permissions, mount_options = self._parse_mount_options( mount_request) mkfs_options = volume_context.get('mkfsOptions', '') FileSystemManager.format_block_device(block_device_path, fs_type, mkfs_options) if FileSystemManager.is_mounted(staging_target_path): self.logger.warning('path {} is already mounted'.format( staging_target_path)) FileSystemManager.mount(source=block_device_path, target=staging_target_path, mount_options=mount_options) FileSystemManager.chmod( mount_permissions or Consts.DEFAULT_MOUNT_PERMISSIONS, staging_target_path) elif access_type == Consts.VolumeAccessType.BLOCK: self.logger.info('Requested Block Volume') # We do not mount here, NodePublishVolume will mount directly from the block device to the publish_path # This is because Kubernetes automatically creates a directory in the staging_path else: self.logger.info('Unknown AccessType {}'.format(access_type)) except Exception as staging_err: # Cleanup - un-mount and detach the volume try: if FileSystemManager.is_mounted(staging_target_path): FileSystemManager.umount(staging_target_path) Utils.nvmesh_detach_volume(nvmesh_volume_name) except Exception as cleanup_err: self.logger.warning( 'Failed to cleanup and detach device after attached and staging failed. Error: %s' % cleanup_err) # Re-raise the initial exception raise staging_err self.logger.debug( 'NodeStageVolume finished successfully for request: {}'.format( reqJson)) return NodeStageVolumeResponse()
def NodePublishVolume(self, request, context): # NodePublishVolume: This method is called to mount the volume from staging to target path. Utils.validate_params_exists(request, ['volume_id', 'target_path']) zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) staging_target_path = request.staging_target_path publish_path = request.target_path volume_capability = request.volume_capability access_mode = volume_capability.access_mode.mode readonly = request.readonly access_type = self._get_block_or_mount_volume(request) volume_context = request.volume_context podInfo = self._extract_pod_info_from_volume_context(volume_context) # K8s Bug Workaround: readonly flag is not sent to CSI, so we try to also infer from the AccessMode is_readonly = readonly or access_mode == Consts.AccessMode.MULTI_NODE_READER_ONLY block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) reqJson = MessageToJson(request) self.logger.debug( 'NodePublishVolume called with request: {}'.format(reqJson)) self.logger.debug('NodePublishVolume podInfo: {}'.format(podInfo)) if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name): raise DriverError( StatusCode.NOT_FOUND, 'nvmesh volume {} was not found under /dev/nvmesh/'.format( nvmesh_volume_name)) requested_mount_permissions, mount_options = self._parse_mount_options( volume_capability.mount) if is_readonly: mount_options.append('ro') if access_type == Consts.VolumeAccessType.BLOCK: # create an empty file for bind mount of a block device with open(publish_path, 'w'): pass # bind directly from block device to publish_path self.logger.debug( 'NodePublishVolume trying to bind mount as block device {} to {}' .format(block_device_path, publish_path)) FileSystemManager.bind_mount(source=block_device_path, target=publish_path, mount_options=mount_options) else: self.logger.debug( 'NodePublishVolume creating directory for bind mount at {}'. format(publish_path)) # create an empty dir for bind mount of a file system if not os.path.isdir(publish_path): os.makedirs(publish_path) self.logger.debug( 'NodePublishVolume trying to bind mount {} to {}'.format( staging_target_path, publish_path)) FileSystemManager.bind_mount(source=staging_target_path, target=publish_path, mount_options=mount_options) if not is_readonly: FileSystemManager.chmod( requested_mount_permissions or Consts.DEFAULT_MOUNT_PERMISSIONS, publish_path) self.logger.debug( 'NodePublishVolume finished successfully for request: {}'.format( reqJson)) return NodePublishVolumeResponse()