def CreateVolume(self, request, context): pvsize = request.capacity_range.required_bytes # TODO: Check the available space under lock host_volumes = get_pv_hosting_volumes() hostvol = "" for hvol in host_volumes: mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol) # Try to mount the Host Volume, handle failure if already mounted mount_glusterfs(hvol, mntdir) if is_space_available(mntdir, pvsize): hostvol = hvol break if hostvol == "": raise Exception("no Hosting Volumes available, add more storage") pvtype = PV_TYPE_SUBVOL for vol_capability in request.volume_capabilities: # using getattr to avoid Pylint error single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if vol_capability.access_mode.mode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK volpath = os.path.join(HOSTVOL_MOUNTDIR, hostvol, pvtype, request.name) if pvtype == PV_TYPE_VIRTBLOCK: # Create a file with required size os.makedirs(os.path.dirname(volpath), exist_ok=True) volpath_fd = os.open(volpath, os.O_CREAT | os.O_RDWR) os.close(volpath_fd) os.truncate(volpath, pvsize) # TODO: Multiple FS support based on volume_capability mount option execute(MKFS_XFS_CMD, volpath) else: # Create a subdir os.makedirs(volpath) # TODO: Set BackendQuota using RPC to sidecar # container of each glusterfsd pod return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "hostvol": hostvol, "pvtype": pvtype, "fstype": "xfs" } })
def DeleteVolume(self, request, context): hostvol = request.volume_context.get("hostvol", "") mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) # Try to mount the Host Volume, handle # failure if already mounted mount_glusterfs(hostvol, mntdir) # TODO: get pvtype from storage class pvtype = request.volume_context.get("pvtype", "") volpath = os.path.join(mntdir, pvtype, request.name) if pvtype == PV_TYPE_VIRTBLOCK: os.remove(volpath) else: os.removedirs(volpath) return csi_pb2.DeleteVolumeResponse()
def NodePublishVolume(self, request, context): volume = request.volume_context.get("hostvol", "") mntdir = os.path.join(HOSTVOL_MOUNTDIR, volume) # Try to mount the Host Volume, handle failure if already mounted mount_glusterfs(volume, mntdir) # Mount the PV pvtype = request.volume_context.get("pvtype", "") pvpath = os.path.join(mntdir, pvtype, request.volume_id) # TODO: Handle Volume capability mount flags if pvtype == PV_TYPE_VIRTBLOCK: execute(MOUNT_CMD, "-t", request.volume_context.get("fstype", "xfs"), pvpath, request.target_path) else: # pv type is subdir execute(MOUNT_CMD, "--bind", pvpath, request.target_path) return csi_pb2.NodePublishVolumeResponse()