def NodeUnpublishVolume(self, request, context): # TODO: Validation and handle target_path failures if not request.volume_id: errmsg = "Volume ID is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.NodeUnpublishVolumeResponse() if not request.target_path: errmsg = "Target path is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.NodeUnpublishVolumeResponse() logging.debug( logf( "Received the unmount request", volume=request.volume_id, )) unmount_volume(request.target_path) return csi_pb2.NodeUnpublishVolumeResponse()
def NodeUnpublishVolume(self, request, context): # TODO: Validation and handle target_path failures logging.debug(logf( "Received the unmount request", volume=request.volume_id, )) unmount_volume(request.target_path) return csi_pb2.NodeUnpublishVolumeResponse()
def CreateVolume(self, request, context): start_time = time.time() logging.debug(logf("Create Volume request", request=request)) pvsize = request.capacity_range.required_bytes pvtype = PV_TYPE_SUBVOL for vol_capability in request.volume_capabilities: # using getattr to avoid Pylint error single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if vol_capability.access_mode.mode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK logging.debug( logf("Found PV type", pvtype=pvtype, capabilities=request.volume_capabilities)) # TODO: Check the available space under lock # Add everything from parameter as filter item filters = {} for pkey, pvalue in request.parameters.items(): filters[pkey] = pvalue logging.debug(logf("Filters applied to choose storage", **filters)) # UID is stored at the time of installation in configmap. uid = None with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file: uid = uid_file.read() ext_volume = None if filters['hostvol_type'] == 'External': ext_volume = check_external_volume(request) if ext_volume: mntdir = os.path.join(HOSTVOL_MOUNTDIR, ext_volume['name']) if not filters.get('kadalu-format', None): # No need to keep the mount on controller unmount_volume(mntdir) logging.info( logf("Volume (External) created", name=request.name, size=pvsize, hostvol=ext_volume['name'], pvtype=pvtype, volpath=ext_volume['host'], duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external", uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": filters['hostvol_type'], "hostvol": ext_volume['name'], "pvtype": pvtype, "gserver": ext_volume['host'], "fstype": "xfs", "options": ext_volume['options'], } }) # The external volume should be used as kadalu host vol # TODO: handle the case where host-volume is full # can-be-fixed-by-an-intern if pvtype == PV_TYPE_VIRTBLOCK: vol = create_virtblock_volume(mntdir, request.name, pvsize) else: vol = create_subdir_volume(mntdir, request.name, pvsize) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=ext_volume['name'], pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external-kadalu", uid) # Pass required argument to get mount working on # nodeplugin through volume_context return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": filters['hostvol_type'], "hostvol": ext_volume['name'], "pvtype": pvtype, "path": vol.volpath, "gserver": ext_volume['host'], "fstype": "xfs", "options": ext_volume['options'], } }) # If external volume not found logging.debug( logf("Here as checking external volume failed", external_volume=ext_volume)) errmsg = "External Storage provided not valid" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() host_volumes = get_pv_hosting_volumes(filters) logging.debug( logf("Got list of hosting Volumes", volumes=",".join(v['name'] for v in host_volumes))) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) if pvtype == PV_TYPE_VIRTBLOCK: vol = create_virtblock_volume(mntdir, request.name, pvsize) else: vol = create_subdir_volume(mntdir, request.name, pvsize) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=hostvol, pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) update_free_size(hostvol, -pvsize) send_analytics_tracker("pvc-%s" % filters['hostvol_type'], uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": filters['hostvol_type'], "hostvol": hostvol, "pvtype": pvtype, "path": vol.volpath, "fstype": "xfs" } })