def DeleteVolume(self, request, context): Utils.validate_param_exists(request, 'volume_id') volume_id = request.volume_id log = self.logger.getChild("DeleteVolume-%s" % volume_id) log.debug('delete called') zone, nvmesh_vol_name = Utils.zone_and_vol_name_from_co_id(volume_id) #secrets = request.secrets volume_api = VolumeAPIPool.get_volume_api_for_zone(zone, log) err, out = volume_api.delete([NVMeshVolume(_id=nvmesh_vol_name)]) if err: log.error(err) raise DriverError(StatusCode.INTERNAL, err) log.debug(out) if not out[0]['success']: err = out[0]['error'] if err == "Couldn't find the specified volume" or err.startswith( "Failed to find marked volume"): # Idempotency - Trying to remove a Volume that doesn't exists, perhaps already deleted # should return success log.debug("Volume already deleted") pass else: raise DriverError(StatusCode.FAILED_PRECONDITION, err) else: log.debug("Volume deleted successfully from zone %s" % zone) self.volume_to_zone_mapping.remove(nvmesh_vol_name) return DeleteVolumeResponse()
def DeleteVolume(self, request, context): Utils.validate_param_exists(request, 'volume_id') volume_id = request.volume_id nvmesh_vol_name = volume_id #secrets = request.secrets err, out = VolumeAPI().delete([NVMeshVolume(_id=nvmesh_vol_name)]) if err: self.logger.error(err) raise DriverError(StatusCode.INTERNAL, err) self.logger.debug(out) if not out[0]['success']: err = out[0]['error'] if err == "Couldn't find the specified volume": # Idempotency - Trying to remove a Volume that doesn't exists, perhaps already deleted # should return success pass else: raise DriverError(StatusCode.FAILED_PRECONDITION, err) return DeleteVolumeResponse()
def create_volume_on_a_valid_zone(self, volume, zones, log): zones_left = set(zones) while True: selected_zone = ZoneSelectionManager.pick_zone(list(zones_left)) zones_left.remove(selected_zone) try: is_zone_disabled = self.topology_service.topology.is_zone_disabled( selected_zone) if is_zone_disabled: raise DriverError( StatusCode.RESOURCE_EXHAUSTED, 'Zone {} is disabled. Skipping this zone'.format( selected_zone)) self.create_volume_in_zone(volume, selected_zone, log) return selected_zone except DriverError as ex: if ex.code != StatusCode.RESOURCE_EXHAUSTED: raise log.warning(ex.message) if len(zones_left): log.info( 'retrying volume creation for {} on zones: {}'.format( volume.name, ','.join(list(zones_left)))) else: raise DriverError( StatusCode.RESOURCE_EXHAUSTED, 'Failed to create volume on all zones ({})'.format( ', '.join(zones)))
def expand_file_system(block_device_path, fs_type): fs_type = fs_type.strip() if fs_type == 'devtmpfs': raise DriverError( StatusCode.INVALID_ARGUMENT, 'Device not formatted with FileSystem found fs type {}'.format( fs_type)) elif fs_type.startswith('ext'): cmd = 'resize2fs {}'.format(block_device_path) elif fs_type == 'xfs': cmd = 'xfs_growfs {}'.format(block_device_path) else: raise DriverError(StatusCode.INVALID_ARGUMENT, 'unknown fs_type {}'.format(fs_type)) exit_code, stdout, stderr = Utils.run_command(cmd) logger.debug("resize file-system finished {} {} {}".format( exit_code, stdout, stderr)) if exit_code != 0: raise DriverError( StatusCode.INTERNAL, 'Error expanding File System {} on block device {}'.format( fs_type, block_device_path)) return exit_code, stdout, stderr
def NodeUnpublishVolume(self, request, context): Utils.validate_params_exists(request, ['volume_id', 'target_path']) target_path = request.target_path reqJson = MessageToJson(request) self.logger.debug( 'NodeUnpublishVolume called with request: {}'.format(reqJson)) if not os.path.exists(target_path): raise DriverError(StatusCode.NOT_FOUND, 'mount path {} not found'.format(target_path)) if not FileSystemManager.is_mounted(mount_path=target_path): self.logger.debug( 'NodeUnpublishVolume: {} is already not mounted'.format( target_path)) else: FileSystemManager.umount(target=target_path) block_device_publish_path = target_path + '/mount' if os.path.isfile(block_device_publish_path): self.logger.debug( 'NodeUnpublishVolume removing publish bind file: {}'.format( block_device_publish_path)) os.remove(block_device_publish_path) if os.path.isfile(block_device_publish_path): raise DriverError(StatusCode.INTERNAL, 'node-driver unable to delete publish path') if os.path.isdir(target_path): self.logger.debug( 'NodeUnpublishVolume removing publish dir: {}'.format( target_path)) FileSystemManager.remove_dir(target_path) if os.path.isdir(target_path): raise DriverError( StatusCode.INTERNAL, 'node-driver unable to delete publish directory') elif os.path.isfile(target_path): self.logger.debug( 'NodeUnpublishVolume removing publish file: {}'.format( target_path)) os.remove(target_path) self.logger.debug( 'NodeUnpublishVolume finished successfully for request: {}'.format( reqJson)) return NodeUnpublishVolumeResponse()
def _build_metadata_field(self, req_dict): capabilities = req_dict['volumeCapabilities'] csi_metadata = { 'csi_name': req_dict['name'], } for param_name in req_dict['parameters'].keys(): csi_metadata[param_name] = req_dict['parameters'][param_name] for capability in capabilities: if 'mount' in capability: csi_metadata['fsType'] = capability['mount'].get( 'fsType', Consts.FSType.EXT4) csi_metadata['volumeMode'] = 'mount' elif 'block' in capability: csi_metadata['volumeMode'] = 'block' if 'accessMode' in capability: access_mode = capability['accessMode']['mode'] if Consts.AccessMode.fromCsiString( access_mode ) not in Consts.AccessMode.allowed_access_modes(): self.logger.warning( 'Requested mode {} is not enforced by NVMesh Storage backend' .format(access_mode)) if 'fsType' in csi_metadata and 'block' in csi_metadata: raise DriverError( StatusCode.INVALID_ARGUMENT, 'Error: Contradicting capabilities both Block Volume and FileSystem Volume were requested for volume {}. request: {}' .format(req_dict['name'], req_dict)) csi_metadata = Utils.sanitize_json_object_keys(csi_metadata) return csi_metadata
def get_zone_from_topology(logger, topology_requirements): if Config.TOPOLOGY_TYPE == consts.TopologyType.SINGLE_ZONE_CLUSTER: return consts.SINGLE_CLUSTER_ZONE_NAME # provisioner sidecar container should have --strict-topology flag set # If volumeBindingMode is Immediate - all cluster topology will be received # If volumeBindingMode is WaitForFirstConsumer - Only the topology of the node to which the pod is scheduled will be given try: topology_key = TopologyUtils.get_topology_key() preferred_topologies = topology_requirements.get('preferred') if len(preferred_topologies) == 1: selected_zone = preferred_topologies[0]['segments'][ topology_key] else: zones = map(lambda t: t['segments'][topology_key], preferred_topologies) selected_zone = ZoneSelectionManager.pick_zone(zones) except Exception as ex: raise ValueError('Failed to get zone from topology. Error: %s' % ex) if not selected_zone: raise DriverError(StatusCode.INVALID_ARGUMENT, 'Failed to get zone from topology') logger.debug('_get_zone_from_topology selected zone is {}'.format( selected_zone)) return selected_zone
def CreateVolume(self, request, context): request_uuid = str(uuid.uuid4())[:8] Utils.validate_param_exists(request, 'name') request_name = request.name nvmesh_vol_name = Utils.volume_id_to_nvmesh_name(request_name) log = self.logger.getChild("CreateVolume:%s(request:%s)" % (request_name, request_uuid)) volume_cache = self.volume_to_zone_mapping.get_or_create_new( nvmesh_vol_name) if volume_cache.lock.locked(): log.debug( "volume already has a request in a progress, waiting for lock to be released" ) with volume_cache.lock: log.debug("processing request") if volume_cache.csi_volume: if volume_cache.csi_volume.capacity_bytes != self._parse_required_capacity( request.capacity_range): raise DriverError( StatusCode.FAILED_PRECONDITION, 'Volume already exists with different capacity') log.info('Returning volume from cache') return CreateVolumeResponse(volume=volume_cache.csi_volume) csiVolume = self.do_create_volume(log, nvmesh_vol_name, request) volume_cache.csi_volume = csiVolume return CreateVolumeResponse(volume=csiVolume)
def ControllerExpandVolume(self, request, context): capacity_in_bytes = request.capacity_range.required_bytes zone, nvmesh_vol_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) log = self.logger.getChild('ExpandVolume-%s' % nvmesh_vol_name) volume_api = VolumeAPIPool.get_volume_api_for_zone(zone, log) volume = self.get_nvmesh_volume(volume_api, nvmesh_vol_name) # Call Node Expansion Method to Expand a FileSystem # For a Block Device there is no need to do anything on the node node_expansion_required = True if 'fsType' in volume.csi_metadata else False # Extend Volume volume.capacity = capacity_in_bytes self.logger.debug("ControllerExpandVolume volume={}".format( str(volume))) err, out = volume_api.update([volume]) if err: raise DriverError(StatusCode.NOT_FOUND, err) self.logger.debug( "ControllerExpandVolumeResponse: capacity_in_bytes={}, node_expansion_required={}" .format(capacity_in_bytes, node_expansion_required)) return ControllerExpandVolumeResponse( capacity_bytes=capacity_in_bytes, node_expansion_required=node_expansion_required)
def _validate_node_exists(self, node_id): filterObj = [MongoObj(field='node_id', value=node_id)] projection = [MongoObj(field='_id', value=1)] err, matches = TargetAPI().get(filter=filterObj, projection=projection) if err or not len(matches): raise DriverError(StatusCode.NOT_FOUND, 'Could not find Node with id {}'.format(node_id))
def _validate_volume_exists(self, nvmesh_vol_name): filterObj = [MongoObj(field='_id', value=nvmesh_vol_name)] projection = [MongoObj(field='_id', value=1)] err, out = VolumeAPI().get(filter=filterObj, projection=projection) if err or not len(out): raise DriverError( StatusCode.NOT_FOUND, 'Could not find Volume with id {}'.format(nvmesh_vol_name))
def get_nvmesh_volume( self, volume_api, nvmesh_vol_name, ): filterObj = [MongoObj(field='_id', value=nvmesh_vol_name)] err, out = volume_api.get(filter=filterObj) if err: raise DriverError(StatusCode.INTERNAL, err) if not isinstance(out, list): raise DriverError(StatusCode.INTERNAL, out) if not len(out): raise DriverError( StatusCode.NOT_FOUND, 'Volume {} Could not be found'.format(nvmesh_vol_name)) return out[0]
def _handle_create_volume_errors(self, err, data, volume, zone, mgmt_server, log): failed_to_create_msg = 'Failed to create volume {vol_name} in zone {zone} ({mgmt})'.format( vol_name=volume.name, zone=zone, mgmt=mgmt_server) SCHEMA_ERROR = 422 if err: if err.get('code') in [SCHEMA_ERROR]: raise DriverError( StatusCode.RESOURCE_EXHAUSTED, failed_to_create_msg + '. Response: {} Volume Requested: {}'.format( err, str(volume))) else: # Failed to Connect to Management or other HTTP Error self.topology_service.topology.disable_zone(zone) raise DriverError( StatusCode.RESOURCE_EXHAUSTED, '{} Error: {}'.format(failed_to_create_msg, err)) else: # management returned a response self.topology_service.topology.make_sure_zone_enabled(zone) if not type(data) == list or not data[0].get('success'): volume_already_exists = 'Name already Exists' in data[0].get( 'error') or 'duplicate key error' in json.dumps( data[0].get('error')) if volume_already_exists: existing_capacity = self._get_nvmesh_volume_capacity( volume.name, log, zone) if volume.capacity == existing_capacity: # Idempotency - same Name same Capacity - return success pass else: raise DriverError( StatusCode.ALREADY_EXISTS, 'Volume already exists with different capacity. Details: {}' .format(data)) else: raise DriverError( StatusCode.RESOURCE_EXHAUSTED, failed_to_create_msg + '. Response: {} Volume Requested: {}'.format( data, str(volume)))
def _get_block_or_mount_volume(self, request): volume_capability = request.volume_capability if volume_capability.HasField('mount'): return Consts.VolumeAccessType.MOUNT elif volume_capability.HasField('block'): return Consts.VolumeAccessType.BLOCK else: raise DriverError( StatusCode.INVALID_ARGUMENT, 'at least one of volume_capability.block, volume_capability.mount must be set' )
def NodePublishVolume(self, request, context): # NodePublishVolume: This method is called to mount the volume from staging to target path. Utils.validate_params_exists(request, ['volume_id', 'target_path']) volume_id = request.volume_id nvmesh_volume_name = volume_id staging_target_path = request.staging_target_path publish_path = request.target_path volume_capability = request.volume_capability access_mode = volume_capability.access_mode.mode readonly = request.readonly access_type = self._get_block_or_mount_volume(request) block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) reqJson = MessageToJson(request) self.logger.debug( 'NodePublishVolume called with request: {}'.format(reqJson)) if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name): raise DriverError( StatusCode.NOT_FOUND, 'nvmesh volume {} was not found under /dev/nvmesh/'.format( nvmesh_volume_name)) flags = [] # K8s Bug Workaround: readonly flag is not sent to CSI, so we try to also infer from the AccessMode if readonly or access_mode == Consts.AccessMode.MULTI_NODE_READER_ONLY: flags.append('-o ro') if access_type == Consts.VolumeAccessType.BLOCK: # create an empty file for bind mount with open(publish_path, 'w'): pass # bind directly from block device to publish_path self.logger.debug( 'NodePublishVolume trying to bind mount as block device {} to {}' .format(block_device_path, publish_path)) FileSystemManager.bind_mount(source=block_device_path, target=publish_path, flags=flags) else: self.logger.debug( 'NodePublishVolume trying to bind mount {} to {}'.format( staging_target_path, publish_path)) FileSystemManager.bind_mount(source=staging_target_path, target=publish_path, flags=flags) return NodePublishVolumeResponse()
def _get_nvmesh_volume_capacity(self, nvmesh_vol_name): filterObj = [MongoObj(field='_id', value=nvmesh_vol_name)] projection = [ MongoObj(field='_id', value=1), MongoObj(field='capacity', value=1) ] err, out = VolumeAPI().get(filter=filterObj, projection=projection) if err or not len(out): raise DriverError(StatusCode.INTERNAL, err) return out[0]['capacity']
def format_block_device(block_device_path, fs_type, mkfs_options): # check if already formatted, and if format meets request current_fs_type = FileSystemManager.get_fs_type(block_device_path) logger.debug('current_fs_type={}'.format(current_fs_type)) if current_fs_type == fs_type: logger.debug('{} is already formatted to {}'.format(block_device_path, current_fs_type)) return if current_fs_type != '': raise DriverError(StatusCode.INVALID_ARGUMENT, '{} is formatted to {} but requested {}'.format(block_device_path, current_fs_type, fs_type)) FileSystemManager.mkfs(fs_type=fs_type, target_path=block_device_path, flags=[mkfs_options])
def ListVolumes(self, request, context): max_entries = request.max_entries starting_token = request.starting_token try: page = int(starting_token or 0) except ValueError: raise DriverError(StatusCode.ABORTED, "Invalid starting_token") if starting_token and not max_entries: raise DriverError(StatusCode.ABORTED, "Invalid starting_token") count = max_entries or 0 projection = [ MongoObj(field='_id', value=1), MongoObj(field='capacity', value=1) ] # TODO: we should probably iterate over all management servers and return all volumes while populating the volume.accessible_topology field zone = '' volume_api = VolumeAPIPool.get_volume_api_for_zone(zone, self.logger) err, nvmeshVolumes = volume_api.get(projection=projection, page=page, count=count) if err: raise DriverError(StatusCode.INTERNAL, err) def convertNVMeshVolumeToCSIVolume(volume): vol = Volume(volume_id=volume._id, capacity_bytes=volume.capacity) return ListVolumesResponse.Entry(volume=vol) entries = map(convertNVMeshVolumeToCSIVolume, nvmeshVolumes) next_token = str(page + 1) if not len(entries): DriverError(StatusCode.ABORTED, "No more Entries") return ListVolumesResponse(entries=entries, next_token=next_token)
def get_node_zone_or_wait(self, node_id): attempts_left = 6 backoff = BackoffDelayWithStopEvent(self.stop_event, initial_delay=2, factor=2, max_delay=60) while attempts_left > 0: try: return TopologyUtils.get_node_zone(node_id) except NodeNotFoundInTopology: attempts_left = attempts_left - 1 self.logger.debug( 'Could not find this node (%s) in the topology. waiting %d seconds before trying again' % (node_id, backoff.current_delay)) stopped_flag = backoff.wait() if stopped_flag: raise DriverError(StatusCode.INTERNAL, 'Driver stopped') raise DriverError( StatusCode.INTERNAL, 'Could not find node %s in any of the zones in the topology. Check nvmesh-csi-topology ConfigMap' % node_id)
def _get_nvmesh_volume_capacity(self, nvmesh_vol_name, log, zone=None): volume_api = VolumeAPIPool.get_volume_api_for_zone(zone, log) filterObj = [MongoObj(field='_id', value=nvmesh_vol_name)] projection = [ MongoObj(field='_id', value=1), MongoObj(field='capacity', value=1) ] err, out = volume_api.get(filter=filterObj, projection=projection) if err or not len(out): raise DriverError(StatusCode.INTERNAL, err) return out[0].capacity
def get_nvmesh_volume(self, nvmesh_vol_name, minimalFields=False): filterObj = [MongoObj(field='_id', value=nvmesh_vol_name)] projection = None if minimalFields: projection = [ MongoObj(field='_id', value=1), MongoObj(field='capacity', value=1), MongoObj(field='status', value=1), MongoObj(field='csi_metadata', value=1) ] err, out = VolumeAPI().get(filter=filterObj, projection=projection) if err: raise DriverError(StatusCode.INTERNAL, err) if not isinstance(out, list): raise DriverError(StatusCode.INTERNAL, out) if not len(out): raise DriverError( StatusCode.NOT_FOUND, 'Volume {} Could not be found'.format(nvmesh_vol_name)) return out[0]
def ListVolumes(self, request, context): max_entries = request.max_entries starting_token = request.starting_token try: page = int(starting_token or 0) except ValueError: raise DriverError(StatusCode.ABORTED, "Invalid starting_token") if starting_token and not max_entries: raise DriverError(StatusCode.ABORTED, "Invalid starting_token") count = max_entries or 0 projection = [ MongoObj(field='_id', value=1), MongoObj(field='capacity', value=1) ] err, nvmeshVolumes = VolumeAPI().get(projection=projection, page=page, count=count) if err: raise DriverError(StatusCode.INTERNAL, err) def convertNVMeshVolumeToCSIVolume(volume): vol = Volume(volume_id=volume._id, capacity_bytes=volume.capacity) return ListVolumesResponse.Entry(volume=vol) entries = map(convertNVMeshVolumeToCSIVolume, nvmeshVolumes) next_token = str(page + 1) if not len(entries): DriverError(StatusCode.ABORTED, "No more Entries") return ListVolumesResponse(entries=entries, next_token=next_token)
def NodeExpandVolume(self, request, context): # if this function was called, assume the Controller checked that this volume is a FileSystem Mounted Volume. # So we will resize the File System here volume_id = request.volume_id volume_path = request.volume_path capacity_range = request.capacity_range reqJson = MessageToJson(request) self.logger.debug( 'NodeExpandVolume called with request: {}'.format(reqJson)) zone, nvmesh_vol_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) block_device_path = Utils.get_nvmesh_block_device_path(nvmesh_vol_name) self.logger.debug( 'NodeExpandVolume zone: {} nvmesh_vol_name: {} block_device_path: {}' .format(zone, nvmesh_vol_name, block_device_path)) fs_type = FileSystemManager.get_fs_type(block_device_path) self.logger.debug('fs_type={}'.format(fs_type)) attempts_left = 20 resized = False while not resized and attempts_left: exit_code, stdout, stderr = FileSystemManager.expand_file_system( block_device_path, fs_type) if 'Nothing to do!' in stderr: block_device_size = FileSystemManager.get_block_device_size( block_device_path) self.logger.warning( 'File System not resized. block device size is {}'.format( block_device_size)) attempts_left = attempts_left - 1 Utils.interruptable_sleep(2) else: resized = True if not attempts_left: raise DriverError( StatusCode.INTERNAL, 'Back-Off trying to expand {} FileSystem on volume {}'.format( fs_type, block_device_path)) self.logger.debug( 'Finished Expanding File System of type {} on volume {}'.format( fs_type, block_device_path)) return NodeExpandVolumeResponse()
def get_fs_type(target_path): # returns an empty string for a block device that has no FileSystem on it # An alternate method is to use `df --output=fstype {target_path} | tail -1` but this will return "devtmpfs" if the block device has no FileSystem on it cmd = "blkid -o export {}".format(target_path) exit_code, stdout, stderr = Utils.run_command(cmd) try: blkid_output = stdout.strip() if blkid_output == '': return blkid_output for line in blkid_output.split('\n'): key, value = line.split('=') if key == 'TYPE': return value raise ValueError('Could not find TYPE key in blkid output') except Exception as ex: raise DriverError(StatusCode.INVALID_ARGUMENT, 'Could not determine file system type for path {}. Error: {}'.format(target_path, ex))
def _get_topology(self): self.logger.debug('_get_topology called TopologyType=%s' % Config.TOPOLOGY_TYPE) topology_info = {} if Config.TOPOLOGY_TYPE == Consts.TopologyType.MULTIPLE_NVMESH_CLUSTERS: zone = self.get_node_zone_or_wait(self.node_id) topology_key = TopologyUtils.get_topology_key() topology_info[topology_key] = zone elif Config.TOPOLOGY_TYPE == Consts.TopologyType.SINGLE_ZONE_CLUSTER: topology_key = TopologyUtils.get_topology_key() topology_info[topology_key] = Consts.SINGLE_CLUSTER_ZONE_NAME else: raise DriverError( StatusCode.INVALID_ARGUMENT, 'Unsupported Config.TOPOLOGY_TYPE of %s' % Config.TOPOLOGY_TYPE) self.logger.debug('Node topology: %s' % topology_info) return Topology(segments=topology_info)
def CreateVolume(self, request, context): Utils.validate_param_exists(request, 'name') name = request.name capacity = self._parse_required_capacity(request.capacity_range) parameters = request.parameters #UNUSED - secrets = request.secrets #UNUSED - volume_content_source = request.volume_content_source #UNUSED - accessibility_requirements = request.accessibility_requirements reqJson = MessageToJson(request) self.logger.debug('create volume request: {}'.format(reqJson)) reqDict = MessageToDict(request) capabilities = reqDict['volumeCapabilities'] is_file_system = False is_block_device = False csi_metadata = {'csi_name': name, 'capabilities': capabilities} for capability in capabilities: if 'mount' in capability: is_file_system = True csi_metadata['fsType'] = capability['mount']['fsType'] else: csi_metadata['block'] = True access_mode = capability['accessMode']['mode'] if Consts.AccessMode.fromCsiString( access_mode) not in Consts.AccessMode.allowed_access_modes( ): self.logger.warning( 'Requested mode {} is not enforced by NVMesh Storage backend' .format(access_mode)) if is_file_system and is_block_device: raise DriverError( StatusCode.INVALID_ARGUMENT, 'Error: Contradicting capabilities both Block Volume and FileSystem Volume were requested for volume {}. request: {}' .format(name, reqJson)) nvmesh_vol_name = Utils.volume_id_to_nvmesh_name(name) nvmesh_params = {} self.logger.debug('create volume parameters: {}'.format(parameters)) if 'vpg' in parameters: self.logger.debug('Creating Volume from VPG {}'.format( parameters['vpg'])) nvmesh_params['VPG'] = parameters['vpg'] # This is a workaround since the nvmesh create volume api expects a 'RAIDLevel' # but if 'VPG' is present 'RAIDLevel' field will be ignored # and the RAIDLevel will be fetched from the VPG. nvmesh_params['RAIDLevel'] = RAIDLevels.CONCATENATED else: self.logger.debug('Creating without VPG') for param in parameters: nvmesh_params[param] = parameters[param] self._handle_non_vpg_params(nvmesh_params) self.logger.debug('nvmesh_params = {}'.format(nvmesh_params)) volume = NVMeshVolume(name=nvmesh_vol_name, capacity=capacity, csi_metadata=csi_metadata, **nvmesh_params) self.logger.debug('Creating volume: {}'.format(str(volume))) err, data = VolumeAPI().save([volume]) if err: raise DriverError( StatusCode.RESOURCE_EXHAUSTED, 'Error: {} Details: {} Volume Requested: {}'.format( err, data, str(volume))) elif not type(data) == list or not data[0]['success']: if 'Name already Exists' in data[0]['error']: existing_capacity = self._get_nvmesh_volume_capacity( nvmesh_vol_name) if capacity == existing_capacity: # Idempotency - same Name same Capacity - return success pass else: raise DriverError( StatusCode.ALREADY_EXISTS, 'Error: {} Details: {}'.format(err, data)) else: raise DriverError(StatusCode.RESOURCE_EXHAUSTED, 'Error: {} Details: {}'.format(err, data)) err, details = self._wait_for_volume_status( volume._id, NVMeshConsts.VolumeStatuses.ONLINE) if err: if err == 'Timed out Waiting for Volume to be Online': raise DriverError(StatusCode.FAILED_PRECONDITION, 'Error: {} Details: {}'.format(err, details)) else: raise DriverError(StatusCode.INVALID_ARGUMENT, err) else: self.logger.debug(details) # we return the nvmesh_vol_name that we created to the CO # all subsequent requests for this volume will have volume_id of the nvmesh volume name csiVolume = Volume(volume_id=nvmesh_vol_name, capacity_bytes=capacity) return CreateVolumeResponse(volume=csiVolume)
def NodePublishVolume(self, request, context): # NodePublishVolume: This method is called to mount the volume from staging to target path. Utils.validate_params_exists(request, ['volume_id', 'target_path']) zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id( request.volume_id) staging_target_path = request.staging_target_path publish_path = request.target_path volume_capability = request.volume_capability access_mode = volume_capability.access_mode.mode readonly = request.readonly access_type = self._get_block_or_mount_volume(request) volume_context = request.volume_context podInfo = self._extract_pod_info_from_volume_context(volume_context) # K8s Bug Workaround: readonly flag is not sent to CSI, so we try to also infer from the AccessMode is_readonly = readonly or access_mode == Consts.AccessMode.MULTI_NODE_READER_ONLY block_device_path = Utils.get_nvmesh_block_device_path( nvmesh_volume_name) reqJson = MessageToJson(request) self.logger.debug( 'NodePublishVolume called with request: {}'.format(reqJson)) self.logger.debug('NodePublishVolume podInfo: {}'.format(podInfo)) if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name): raise DriverError( StatusCode.NOT_FOUND, 'nvmesh volume {} was not found under /dev/nvmesh/'.format( nvmesh_volume_name)) requested_mount_permissions, mount_options = self._parse_mount_options( volume_capability.mount) if is_readonly: mount_options.append('ro') if access_type == Consts.VolumeAccessType.BLOCK: # create an empty file for bind mount of a block device with open(publish_path, 'w'): pass # bind directly from block device to publish_path self.logger.debug( 'NodePublishVolume trying to bind mount as block device {} to {}' .format(block_device_path, publish_path)) FileSystemManager.bind_mount(source=block_device_path, target=publish_path, mount_options=mount_options) else: self.logger.debug( 'NodePublishVolume creating directory for bind mount at {}'. format(publish_path)) # create an empty dir for bind mount of a file system if not os.path.isdir(publish_path): os.makedirs(publish_path) self.logger.debug( 'NodePublishVolume trying to bind mount {} to {}'.format( staging_target_path, publish_path)) FileSystemManager.bind_mount(source=staging_target_path, target=publish_path, mount_options=mount_options) if not is_readonly: FileSystemManager.chmod( requested_mount_permissions or Consts.DEFAULT_MOUNT_PERMISSIONS, publish_path) self.logger.debug( 'NodePublishVolume finished successfully for request: {}'.format( reqJson)) return NodePublishVolumeResponse()