def DeleteVolume(self, req, context): """DeleteVolume implements csi DeleteVolume.""" # Delete requires a Cinder Volume Object, so do a get # first, we're using volume_id here vref = self.volume_api.get(self.cctxt, req.volume_id) self.volume_api.delete(self.cctxt, vref) return csi_pb2.CreateVolumeResponse()
def CreateVolume(self, request, context): pvsize = request.capacity_range.required_bytes # TODO: Check the available space under lock host_volumes = get_pv_hosting_volumes() hostvol = "" for hvol in host_volumes: mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol) # Try to mount the Host Volume, handle failure if already mounted mount_glusterfs(hvol, mntdir) if is_space_available(mntdir, pvsize): hostvol = hvol break if hostvol == "": raise Exception("no Hosting Volumes available, add more storage") pvtype = PV_TYPE_SUBVOL for vol_capability in request.volume_capabilities: # using getattr to avoid Pylint error single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if vol_capability.access_mode.mode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK volpath = os.path.join(HOSTVOL_MOUNTDIR, hostvol, pvtype, request.name) if pvtype == PV_TYPE_VIRTBLOCK: # Create a file with required size os.makedirs(os.path.dirname(volpath), exist_ok=True) volpath_fd = os.open(volpath, os.O_CREAT | os.O_RDWR) os.close(volpath_fd) os.truncate(volpath, pvsize) # TODO: Multiple FS support based on volume_capability mount option execute(MKFS_XFS_CMD, volpath) else: # Create a subdir os.makedirs(volpath) # TODO: Set BackendQuota using RPC to sidecar # container of each glusterfsd pod return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "hostvol": hostvol, "pvtype": pvtype, "fstype": "xfs" } })
def CreateVolume(self, req, context): """CreateVolume implements csi CreateVolume.""" volume_name = req.name if len(req.name) < 1: volume_name = str(uuid.uuid4()) # FIXME(jdg): Figure out sizing, should be able to just # use a similar pattern as above with name, but there are # some details around using "range" etc and the conversion # from bytes to GiB volume_size_gig = 1 volume_type_name = req.parameters.get('type', None) volume_az = req.parameters.get('availability', None) vref = self.volume_api.create(self.cctxt, volume_size_gig, volume_name, None) # FIXME(jdg): This is still wrong, it doesn't serialize correctly for a grpc response, but # I haven't figured out what's wrong yet csi_create_response = csi_pb2.CreateVolumeResponse() csi_create_response.volume.id = vref.id csi_create_response.volume.capacity_bytes = 1 return csi_create_response
def CreateVolume(self, request, context): start_time = time.time() logging.debug(logf("Create Volume request", request=request)) if not request.name: errmsg = "Volume name is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() if not request.volume_capabilities: errmsg = "Volume Capabilities is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() # Check for same name and different capacity volume = search_volume(request.name) if volume: if volume.size != request.capacity_range.required_bytes: errmsg = "Failed to create volume with same name with different capacity" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.ALREADY_EXISTS) return csi_pb2.CreateVolumeResponse() pvsize = request.capacity_range.required_bytes pvtype = PV_TYPE_SUBVOL is_block = False storage_options = request.parameters.get("storage_options", "") # Mounted BlockVolume is requested via Storage Class. # GlusterFS File Volume may not be useful for some workloads # they can request for the Virtual Block formated and mounted # as default MountVolume. if request.parameters.get("pv_type", "").lower() == "block": pvtype = PV_TYPE_VIRTBLOCK is_block = True # RawBlock volume is requested via PVC if is_block_request(request): pvtype = PV_TYPE_RAWBLOCK is_block = True if is_block: single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") # Multi node writer is not allowed for PV_TYPE_VIRTBLOCK/PV_TYPE_RAWBLOCK if pvc_access_mode(request) != single_node_writer: errmsg = "Only SINGLE_NODE_WRITER is allowed for block Volume" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() logging.debug( logf("Found PV type", pvtype=pvtype, capabilities=request.volume_capabilities)) # TODO: Check the available space under lock # Add everything from parameter as filter item filters = {} for pkey, pvalue in request.parameters.items(): filters[pkey] = pvalue logging.debug(logf("Filters applied to choose storage", **filters)) # UID is stored at the time of installation in configmap. uid = None with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file: uid = uid_file.read() host_volumes = get_pv_hosting_volumes(filters) logging.debug( logf("Got list of hosting Volumes", volumes=",".join(v['name'] for v in host_volumes))) hostvol = None ext_volume = None data = {} hostvoltype = filters.get("hostvol_type", None) if not hostvoltype: # This means, the request came on 'kadalu' storage class type. # Randomize the entries so we can issue PV from different storage random.shuffle(host_volumes) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() info_file_path = os.path.join(VOLINFO_DIR, "%s.info" % hostvol) with open(info_file_path) as info_file: data = json.load(info_file) hostvoltype = data['type'] kformat = filters.get('kadalu_format', "native") if hostvoltype == 'External': ext_volume = check_external_volume(request, host_volumes) if ext_volume: mntdir = os.path.join(HOSTVOL_MOUNTDIR, ext_volume['name']) # By default 'kadalu_format' is set to 'native' as part of CRD # definition if kformat == 'non-native': # If 'kadalu_format' is 'non-native', the request will be # considered as to map 1 PV to 1 Gluster volume # No need to keep the mount on controller unmount_glusterfs(mntdir) logging.info( logf("Volume (External) created", name=request.name, size=pvsize, mount=mntdir, hostvol=ext_volume['g_volname'], pvtype=pvtype, volpath=ext_volume['g_host'], duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external", uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['g_options'], "kformat": kformat, } }) # The external volume should be used as kadalu host vol if not is_hosting_volume_free(ext_volume['name'], pvsize): logging.error( logf("Hosting volume is full. Add more storage", volume=ext_volume['name'])) errmsg = "External resource is exhausted" context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() if pvtype in [PV_TYPE_VIRTBLOCK, PV_TYPE_RAWBLOCK]: vol = create_block_volume(pvtype, mntdir, request.name, pvsize) else: use_gluster_quota = False if (os.path.isfile("/etc/secret-volume/ssh-privatekey") \ and "SECRET_GLUSTERQUOTA_SSH_USERNAME" in os.environ): use_gluster_quota = True secret_private_key = "/etc/secret-volume/ssh-privatekey" secret_username = os.environ.get( 'SECRET_GLUSTERQUOTA_SSH_USERNAME', None) hostname = filters.get("gluster_hosts", None) gluster_vol_name = filters.get("gluster_volname", None) vol = create_subdir_volume(mntdir, request.name, pvsize, use_gluster_quota) quota_size = pvsize quota_path = vol.volpath if use_gluster_quota is False: logging.debug(logf("Set Quota in the native way")) else: logging.debug( logf("Set Quota using gluster directory Quota")) errmsg = execute_gluster_quota_command( secret_private_key, secret_username, hostname, gluster_vol_name, quota_path, quota_size) if errmsg: context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=ext_volume['name'], pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external-kadalu", uid) # Pass required argument to get mount working on # nodeplugin through volume_context return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "path": vol.volpath, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['g_options'], "kformat": kformat, } }) # If external volume not found logging.debug( logf("Here as checking external volume failed", external_volume=ext_volume)) errmsg = "External Storage provided not valid" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() if not hostvol: # Randomize the entries so we can issue PV from different storage random.shuffle(host_volumes) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() if kformat == 'non-native': # Then mount the whole volume as PV msg = "non-native way of Kadalu mount expected" logging.info(msg) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": hostvol, "pvtype": pvtype, "fstype": "xfs", "kformat": kformat, } }) mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) if pvtype in [PV_TYPE_VIRTBLOCK, PV_TYPE_RAWBLOCK]: vol = create_block_volume(pvtype, mntdir, request.name, pvsize) else: use_gluster_quota = False vol = create_subdir_volume(mntdir, request.name, pvsize, use_gluster_quota) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=hostvol, pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) update_free_size(hostvol, request.name, -pvsize) send_analytics_tracker("pvc-%s" % hostvoltype, uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": hostvol, "pvtype": pvtype, "path": vol.volpath, "fstype": "xfs", "kformat": kformat, "storage_options": storage_options } })
def ControllerExpandVolume(self, request, context): """ Controller plugin RPC call implementation of EXPAND_VOLUME """ start_time = time.time() if not request.volume_id: errmsg = "Volume ID is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.ControllerExpandVolumeResponse() if not request.capacity_range: errmsg = "Capacity Range is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.ControllerExpandVolumeResponse() if not request.capacity_range.required_bytes: errmsg = "Required Bytes is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.ControllerExpandVolumeResponse() expansion_requested_pvsize = request.capacity_range.required_bytes # Get existing volume existing_volume = search_volume(request.volume_id) if not existing_volume: errmsg = logf("Unable to find volume", volume_id=request.volume_id) logging.error(errmsg) context.set_details(str(errmsg)) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.ControllerExpandVolumeResponse() if existing_volume.extra['kformat'] == 'non-native': errmsg = "PV with kadalu_format == non-native doesn't support Expansion" logging.error(errmsg) # But lets not fail the call, and continue here return csi_pb2.ControllerExpandVolumeResponse() # Volume size before expansion existing_pvsize = existing_volume.size pvname = existing_volume.volname logging.info( logf("Existing PV size and Expansion requested PV size", existing_pvsize=existing_pvsize, expansion_requested_pvsize=expansion_requested_pvsize)) pvtype = PV_TYPE_SUBVOL single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if request.volume_capability.AccessMode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK logging.debug( logf("Found PV type", pvtype=pvtype, capability=request.volume_capability)) hostvol = existing_volume.hostvol mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) use_gluster_quota = False # Check free-size in storage-pool before expansion if not is_hosting_volume_free(hostvol, expansion_requested_pvsize): logging.error( logf("Hosting volume is full. Add more storage", volume=hostvol)) errmsg = "Host volume resource is exhausted" context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() hostvoltype = existing_volume.extra['hostvoltype'] if pvtype == PV_TYPE_VIRTBLOCK: update_virtblock_volume(mntdir, pvname, expansion_requested_pvsize) expand_volume(mntdir) else: update_subdir_volume(mntdir, hostvoltype, pvname, expansion_requested_pvsize) if hostvoltype == 'External': # Use Gluster quota if set if (os.path.isfile("/etc/secret-volume/ssh-privatekey") \ and "SECRET_GLUSTERQUOTA_SSH_USERNAME" in os.environ): use_gluster_quota = True # Can be true only if its 'External' if use_gluster_quota: secret_private_key = "/etc/secret-volume/ssh-privatekey" secret_username = os.environ.get( 'SECRET_GLUSTERQUOTA_SSH_USERNAME', None) logging.debug( logf("Set Quota (expand) using gluster directory Quota")) errmsg = execute_gluster_quota_command( secret_private_key, secret_username, existing_volume.extra['ghost'], existing_volume.extra['gvolname'], existing_volume.volpath, expansion_requested_pvsize) if errmsg: context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.ControllerExpandVolumeResponse() logging.info( logf("Volume expanded", name=pvname, size=expansion_requested_pvsize, hostvol=hostvol, pvtype=pvtype, volpath=existing_volume.volpath, duration_seconds=time.time() - start_time)) # sizechanged is the additional change to be # subtracted from storage-pool sizechange = expansion_requested_pvsize - existing_pvsize update_free_size(hostvol, pvname, -sizechange) # if not hostvoltype: # hostvoltype = "unknown" # send_analytics_tracker("pvc-%s" % hostvoltype, uid) return csi_pb2.ControllerExpandVolumeResponse( capacity_bytes=int(expansion_requested_pvsize))
def CreateVolume(self, request, context): start_time = time.time() logging.debug(logf("Create Volume request", request=request)) pvsize = request.capacity_range.required_bytes pvtype = PV_TYPE_SUBVOL # 'latest' finds a place here, because only till 0.5.0 version # we had 'latest' as a separate version. After that, 'latest' is # just a link to latest version. if KADALU_VERSION in ["0.5.0", "0.4.0", "0.3.0"]: for vol_capability in request.volume_capabilities: # using getattr to avoid Pylint error single_node_writer = getattr( csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if vol_capability.access_mode.mode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK logging.debug( logf("Found PV type", pvtype=pvtype, capabilities=request.volume_capabilities)) # TODO: Check the available space under lock # Add everything from parameter as filter item filters = {} for pkey, pvalue in request.parameters.items(): filters[pkey] = pvalue logging.debug(logf("Filters applied to choose storage", **filters)) # UID is stored at the time of installation in configmap. uid = None with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file: uid = uid_file.read() host_volumes = get_pv_hosting_volumes(filters) logging.debug( logf("Got list of hosting Volumes", volumes=",".join(v['name'] for v in host_volumes))) ext_volume = None hostvoltype = filters.get("hostvol_type", None) if hostvoltype == 'External': ext_volume = check_external_volume(request, host_volumes) if ext_volume: mntdir = os.path.join(HOSTVOL_MOUNTDIR, ext_volume['name']) if not filters.get('kadalu-format', None): # No need to keep the mount on controller unmount_glusterfs(mntdir) logging.info( logf("Volume (External) created", name=request.name, size=pvsize, mount=mntdir, hostvol=ext_volume['g_volname'], pvtype=pvtype, volpath=ext_volume['g_host'], duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external", uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['options'], } }) # The external volume should be used as kadalu host vol # TODO: handle the case where host-volume is full # can-be-fixed-by-an-intern if pvtype == PV_TYPE_VIRTBLOCK: vol = create_virtblock_volume(mntdir, request.name, pvsize) else: vol = create_subdir_volume(mntdir, request.name, pvsize) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=ext_volume['name'], pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external-kadalu", uid) # Pass required argument to get mount working on # nodeplugin through volume_context return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "path": vol.volpath, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['g_options'], } }) # If external volume not found logging.debug( logf("Here as checking external volume failed", external_volume=ext_volume)) errmsg = "External Storage provided not valid" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() # Randomize the entries so we can issue PV from different storage random.shuffle(host_volumes) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) if pvtype == PV_TYPE_VIRTBLOCK: vol = create_virtblock_volume(mntdir, request.name, pvsize) else: vol = create_subdir_volume(mntdir, request.name, pvsize) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=hostvol, pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) update_free_size(hostvol, request.name, -pvsize) send_analytics_tracker("pvc-%s" % hostvoltype, uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": hostvol, "pvtype": pvtype, "path": vol.volpath, "fstype": "xfs" } })
def ControllerExpandVolume(self, request, context): """ Controller plugin RPC call implementation of EXPAND_VOLUME """ start_time = time.time() if not request.volume_id: errmsg = "Volume ID is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.ControllerExpandVolumeResponse() expansion_requested_pvsize = request.capacity_range.required_bytes # Get existing volume existing_volume = search_volume(request.volume_id) # Volume size before expansion existing_pvsize = existing_volume.size pvname = existing_volume.volname logging.info( logf("Existing PV size and Expansion requested PV size", existing_pvsize=existing_pvsize, expansion_requested_pvsize=expansion_requested_pvsize)) pvtype = PV_TYPE_SUBVOL single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if request.volume_capability.AccessMode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK logging.debug( logf("Found PV type", pvtype=pvtype, capability=request.volume_capability)) hostvol = existing_volume.hostvol mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) # Check free-size in storage-pool before expansion if not is_hosting_volume_free(hostvol, expansion_requested_pvsize): logging.error( logf("Hosting volume is full. Add more storage", volume=hostvol)) errmsg = "Host volume resource is exhausted" context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() if pvtype == PV_TYPE_VIRTBLOCK: update_virtblock_volume(mntdir, pvname, expansion_requested_pvsize) expand_volume(mntdir) else: update_subdir_volume(mntdir, pvname, expansion_requested_pvsize) logging.info( logf("Volume expanded", name=pvname, size=expansion_requested_pvsize, hostvol=hostvol, pvtype=pvtype, volpath=existing_volume.volpath, duration_seconds=time.time() - start_time)) # sizechanged is the additional change to be # subtracted from storage-pool sizechange = expansion_requested_pvsize - existing_pvsize update_free_size(hostvol, pvname, -sizechange) # if not hostvoltype: # hostvoltype = "unknown" # send_analytics_tracker("pvc-%s" % hostvoltype, uid) return csi_pb2.ControllerExpandVolumeResponse( capacity_bytes=int(expansion_requested_pvsize))
def CreateVolume(self, request, context): start_time = time.time() logging.debug(logf( "Create Volume request", request=request )) pvsize = request.capacity_range.required_bytes # TODO: Check the available space under lock # Add everything from parameter as filter item filters = {} for pkey, pvalue in request.parameters.items(): filters[pkey] = pvalue logging.debug(logf( "Filters applied to choose storage", **filters )) host_volumes = get_pv_hosting_volumes(filters) logging.debug(logf( "Got list of hosting Volumes", volumes=",".join(host_volumes) )) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() pvtype = PV_TYPE_SUBVOL for vol_capability in request.volume_capabilities: # using getattr to avoid Pylint error single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if vol_capability.access_mode.mode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK logging.debug(logf( "Found PV type", pvtype=pvtype, capabilities=request.volume_capabilities )) if pvtype == PV_TYPE_VIRTBLOCK: vol = create_virtblock_volume( os.path.join(HOSTVOL_MOUNTDIR, hostvol), request.name, pvsize) else: vol = create_subdir_volume( os.path.join(HOSTVOL_MOUNTDIR, hostvol), request.name, pvsize) logging.info(logf( "Volume created", name=request.name, size=pvsize, hostvol=hostvol, pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time )) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "hostvol": hostvol, "pvtype": pvtype, "path": vol.volpath, "fstype": "xfs" } } )
def DeleteVolume(self, req, context): cc = self._get_client_session() cc.volumes.delete(req.volume_id) return csi_pb2.CreateVolumeResponse()