def ControllerExpandVolume(self, request, context): capacity_in_bytes = request.capacity_range.required_bytes zone, nvmesh_vol_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) log = self.logger.getChild('ExpandVolume-%s' % nvmesh_vol_name) volume_api = VolumeAPIPool.get_volume_api_for_zone(zone, log) volume = self.get_nvmesh_volume(volume_api, nvmesh_vol_name) # Call Node Expansion Method to Expand a FileSystem # For a Block Device there is no need to do anything on the node node_expansion_required = True if 'fsType' in volume.csi_metadata else False # Extend Volume volume.capacity = capacity_in_bytes self.logger.debug("ControllerExpandVolume volume={}".format( str(volume))) err, out = volume_api.update([volume]) if err: raise DriverError(StatusCode.NOT_FOUND, err) self.logger.debug( "ControllerExpandVolumeResponse: capacity_in_bytes={}, node_expansion_required={}" .format(capacity_in_bytes, node_expansion_required)) return ControllerExpandVolumeResponse( capacity_bytes=capacity_in_bytes, node_expansion_required=node_expansion_required)
def DeleteVolume(self, request, context): Utils.validate_param_exists(request, 'volume_id') volume_id = request.volume_id log = self.logger.getChild("DeleteVolume-%s" % volume_id) log.debug('delete called') zone, nvmesh_vol_name = Utils.zone_and_vol_name_from_co_id(volume_id) #secrets = request.secrets volume_api = VolumeAPIPool.get_volume_api_for_zone(zone, log) err, out = volume_api.delete([NVMeshVolume(_id=nvmesh_vol_name)]) if err: log.error(err) raise DriverError(StatusCode.INTERNAL, err) log.debug(out) if not out[0]['success']: err = out[0]['error'] if err == "Couldn't find the specified volume" or err.startswith( "Failed to find marked volume"): # Idempotency - Trying to remove a Volume that doesn't exists, perhaps already deleted # should return success log.debug("Volume already deleted") pass else: raise DriverError(StatusCode.FAILED_PRECONDITION, err) else: log.debug("Volume deleted successfully from zone %s" % zone) self.volume_to_zone_mapping.remove(nvmesh_vol_name) return DeleteVolumeResponse()
def NodeExpandVolume(self, request, context): # if this function was called, assume the Controller checked that this volume is a FileSystem Mounted Volume. # So we will resize the File System here volume_id = request.volume_id volume_path = request.volume_path capacity_range = request.capacity_range reqJson = MessageToJson(request) self.logger.debug( 'NodeExpandVolume called with request: {}'.format(reqJson)) zone, nvmesh_vol_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) block_device_path = Utils.get_nvmesh_block_device_path(nvmesh_vol_name) self.logger.debug( 'NodeExpandVolume zone: {} nvmesh_vol_name: {} block_device_path: {}' .format(zone, nvmesh_vol_name, block_device_path)) fs_type = FileSystemManager.get_fs_type(block_device_path) self.logger.debug('fs_type={}'.format(fs_type)) attempts_left = 20 resized = False while not resized and attempts_left: exit_code, stdout, stderr = FileSystemManager.expand_file_system( block_device_path, fs_type) if 'Nothing to do!' in stderr: block_device_size = FileSystemManager.get_block_device_size( block_device_path) self.logger.warning( 'File System not resized. block device size is {}'.format( block_device_size)) attempts_left = attempts_left - 1 Utils.interruptable_sleep(2) else: resized = True if not attempts_left: raise DriverError( StatusCode.INTERNAL, 'Back-Off trying to expand {} FileSystem on volume {}'.format( fs_type, block_device_path)) self.logger.debug( 'Finished Expanding File System of type {} on volume {}'.format( fs_type, block_device_path)) return NodeExpandVolumeResponse()
def NodeUnstageVolume(self, request, context): Utils.validate_params_exists(request, ['volume_id', 'staging_target_path']) reqJson = MessageToJson(request) self.logger.debug( 'NodeUnstageVolume called with request: {}'.format(reqJson)) staging_target_path = request.staging_target_path zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) if os.path.exists(staging_target_path): FileSystemManager.umount(target=staging_target_path) if os.path.isfile(staging_target_path): self.logger.debug( 'NodeUnstageVolume removing stage bind file: {}'.format( staging_target_path)) os.remove(staging_target_path) elif os.path.isdir(staging_target_path): self.logger.debug( 'NodeUnstageVolume removing stage dir: {}'.format( staging_target_path)) FileSystemManager.remove_dir(staging_target_path) else: self.logger.warning( 'NodeUnstageVolume - mount path {} not found.'.format( staging_target_path)) Utils.nvmesh_detach_volume(nvmesh_volume_name) self.logger.debug( 'NodeUnstageVolume finished successfully for request: {}'.format( reqJson)) return NodeUnstageVolumeResponse()
def NodeStageVolume(self, request, context): Utils.validate_params_exists( request, ['volume_id', 'staging_target_path', 'volume_capability']) zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) staging_target_path = request.staging_target_path volume_capability = request.volume_capability secrets = request.secrets publish_context = request.publish_context volume_context = request.volume_context reqJson = MessageToJson(request) self.logger.debug( 'NodeStageVolume called with request: {}'.format(reqJson)) access_mode = volume_capability.access_mode.mode access_type = self._get_block_or_mount_volume(request) block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name): # run nvmesh attach locally requested_nvmesh_access_mode = Consts.AccessMode.to_nvmesh( access_mode) Utils.nvmesh_attach_volume(nvmesh_volume_name, requested_nvmesh_access_mode) try: Utils.wait_for_volume_io_enabled(nvmesh_volume_name) if access_type == Consts.VolumeAccessType.MOUNT: mount_request = volume_capability.mount self.logger.info( 'Requested Mounted FileSystem Volume with fs_type={}'. format(mount_request.fs_type)) fs_type = mount_request.fs_type or Consts.FSType.EXT4 mount_permissions, mount_options = self._parse_mount_options( mount_request) mkfs_options = volume_context.get('mkfsOptions', '') FileSystemManager.format_block_device(block_device_path, fs_type, mkfs_options) if FileSystemManager.is_mounted(staging_target_path): self.logger.warning('path {} is already mounted'.format( staging_target_path)) FileSystemManager.mount(source=block_device_path, target=staging_target_path, mount_options=mount_options) FileSystemManager.chmod( mount_permissions or Consts.DEFAULT_MOUNT_PERMISSIONS, staging_target_path) elif access_type == Consts.VolumeAccessType.BLOCK: self.logger.info('Requested Block Volume') # We do not mount here, NodePublishVolume will mount directly from the block device to the publish_path # This is because Kubernetes automatically creates a directory in the staging_path else: self.logger.info('Unknown AccessType {}'.format(access_type)) except Exception as staging_err: # Cleanup - un-mount and detach the volume try: if FileSystemManager.is_mounted(staging_target_path): FileSystemManager.umount(staging_target_path) Utils.nvmesh_detach_volume(nvmesh_volume_name) except Exception as cleanup_err: self.logger.warning( 'Failed to cleanup and detach device after attached and staging failed. Error: %s' % cleanup_err) # Re-raise the initial exception raise staging_err self.logger.debug( 'NodeStageVolume finished successfully for request: {}'.format( reqJson)) return NodeStageVolumeResponse()
def NodePublishVolume(self, request, context): # NodePublishVolume: This method is called to mount the volume from staging to target path. Utils.validate_params_exists(request, ['volume_id', 'target_path']) zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) staging_target_path = request.staging_target_path publish_path = request.target_path volume_capability = request.volume_capability access_mode = volume_capability.access_mode.mode readonly = request.readonly access_type = self._get_block_or_mount_volume(request) volume_context = request.volume_context podInfo = self._extract_pod_info_from_volume_context(volume_context) # K8s Bug Workaround: readonly flag is not sent to CSI, so we try to also infer from the AccessMode is_readonly = readonly or access_mode == Consts.AccessMode.MULTI_NODE_READER_ONLY block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) reqJson = MessageToJson(request) self.logger.debug( 'NodePublishVolume called with request: {}'.format(reqJson)) self.logger.debug('NodePublishVolume podInfo: {}'.format(podInfo)) if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name): raise DriverError( StatusCode.NOT_FOUND, 'nvmesh volume {} was not found under /dev/nvmesh/'.format( nvmesh_volume_name)) requested_mount_permissions, mount_options = self._parse_mount_options( volume_capability.mount) if is_readonly: mount_options.append('ro') if access_type == Consts.VolumeAccessType.BLOCK: # create an empty file for bind mount of a block device with open(publish_path, 'w'): pass # bind directly from block device to publish_path self.logger.debug( 'NodePublishVolume trying to bind mount as block device {} to {}' .format(block_device_path, publish_path)) FileSystemManager.bind_mount(source=block_device_path, target=publish_path, mount_options=mount_options) else: self.logger.debug( 'NodePublishVolume creating directory for bind mount at {}'. format(publish_path)) # create an empty dir for bind mount of a file system if not os.path.isdir(publish_path): os.makedirs(publish_path) self.logger.debug( 'NodePublishVolume trying to bind mount {} to {}'.format( staging_target_path, publish_path)) FileSystemManager.bind_mount(source=staging_target_path, target=publish_path, mount_options=mount_options) if not is_readonly: FileSystemManager.chmod( requested_mount_permissions or Consts.DEFAULT_MOUNT_PERMISSIONS, publish_path) self.logger.debug( 'NodePublishVolume finished successfully for request: {}'.format( reqJson)) return NodePublishVolumeResponse()