def main(): """Main""" config.load_incluster_config() # As per the issue https://github.com/kubernetes-client/python/issues/254 clnt = client.Configuration() #go and get a copy of the default config clnt.verify_ssl = False #set verify_ssl to false in that config client.Configuration.set_default( clnt) #make that config the default for all new clients core_v1_client = client.CoreV1Api() k8s_client = client.ApiClient() # ConfigMap uid = deploy_config_map(core_v1_client) # CSI Pods deploy_csi_pods(core_v1_client) # Storage Class deploy_storage_class() # Send Analytics Tracker # The information from this analytics is available for # developers to understand and build project in a better # way send_analytics_tracker("operator", uid) # Watch CRD crd_watch(core_v1_client, k8s_client)
def start(): """ Start the Gluster Brick Process """ brick_device = os.environ.get("BRICK_DEVICE", None) brick_path = os.environ["BRICK_PATH"] if brick_device is not None and brick_device != "": brickfs = os.environ.get("BRICK_FS", "xfs") create_and_mount_brick(brick_device, brick_path, brickfs) volume_id = os.environ["VOLUME_ID"] brick_path_name = brick_path.strip("/").replace("/", "-") volname = os.environ["VOLUME"] nodename = os.environ["HOSTNAME"] create_brickdir(brick_path) verify_brickdir_xattr_support(brick_path) set_volume_id_xattr(brick_path, volume_id) volfile_id = "%s.%s.%s" % (volname, nodename, brick_path_name) volfile_path = os.path.join(VOLFILES_DIR, "%s.vol" % volfile_id) generate_brick_volfile(volfile_path, volname) # UID is stored at the time of installation in configmap. uid = None with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file: uid = uid_file.read() # Send Analytics Tracker # The information from this analytics is available for # developers to understand and build project in a better way send_analytics_tracker("server", uid) os.execv( "/usr/sbin/glusterfsd", [ "/usr/sbin/glusterfsd", "-N", "--volfile-id", volfile_id, "-p", "/var/run/gluster/glusterfsd-%s.pid" % brick_path_name, "-S", "/var/run/gluster/brick.socket", "--brick-name", brick_path, "-l", "-", # Log to stderr "--xlator-option", "*-posix.glusterd-uuid=%s" % os.environ["NODEID"], "--process-name", "brick", "--brick-port", "24007", "--xlator-option", "%s-server.listen-port=24007" % volname, "-f", volfile_path ])
def start(): """ Start the Gluster Brick Process """ brick_device = os.environ.get("BRICK_DEVICE", None) brick_path = os.environ["BRICK_PATH"] if brick_device is not None and brick_device != "": brickfs = os.environ.get("BRICK_FS", "xfs") create_and_mount_brick(brick_device, brick_path, brickfs) volume_id = os.environ["VOLUME_ID"] brick_path_name = brick_path.strip("/").replace("/", "-") volname = os.environ["VOLUME"] nodename = os.environ["HOSTNAME"] create_brickdir(brick_path) verify_brickdir_xattr_support(brick_path) set_volume_id_xattr(brick_path, volume_id) volfile_id = "%s.%s.%s" % (volname, nodename, brick_path_name) volfile_path = os.path.join(VOLFILES_DIR, "%s.vol" % volfile_id) generate_brick_volfile(volfile_path, volname) # Send Analytics Tracker # The information from this analytics is available for # developers to understand and build project in a better way send_analytics_tracker("server") os.execv( "/usr/sbin/glusterfsd", [ "/usr/sbin/glusterfsd", "-N", "--volfile-id", volfile_id, "-p", "/var/run/gluster/glusterfsd-%s.pid" % brick_path_name, # TODO: Change socket file name "-S", "/var/run/gluster/b99981c29a4c396c.socket", "--brick-name", brick_path, "-l", "-", # Log to stderr # TODO: Change Node ID "--xlator-option", "*-posix.glusterd-uuid=6958dddc-1842-4ee0-92df-b6a060dfba5e", "--process-name", "brick", "--brick-port", "24007", "--xlator-option", "%s-server.listen-port=24007" % volname, "-f", volfile_path ])
def main(): """Main""" config.load_incluster_config() core_v1_client = client.CoreV1Api() k8s_client = client.ApiClient() # ConfigMap deploy_config_map(core_v1_client) # CSI Pods deploy_csi_pods(core_v1_client) # Storage Class deploy_storage_class() # Send Analytics Tracker # The information from this analytics is available for # developers to understand and build project in a better # way send_analytics_tracker("operator") # Watch CRD crd_watch(core_v1_client, k8s_client)
def CreateVolume(self, request, context): start_time = time.time() logging.debug(logf("Create Volume request", request=request)) if not request.name: errmsg = "Volume name is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() if not request.volume_capabilities: errmsg = "Volume Capabilities is empty and must be provided" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() # Check for same name and different capacity volume = search_volume(request.name) if volume: if volume.size != request.capacity_range.required_bytes: errmsg = "Failed to create volume with same name with different capacity" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.ALREADY_EXISTS) return csi_pb2.CreateVolumeResponse() pvsize = request.capacity_range.required_bytes pvtype = PV_TYPE_SUBVOL is_block = False storage_options = request.parameters.get("storage_options", "") # Mounted BlockVolume is requested via Storage Class. # GlusterFS File Volume may not be useful for some workloads # they can request for the Virtual Block formated and mounted # as default MountVolume. if request.parameters.get("pv_type", "").lower() == "block": pvtype = PV_TYPE_VIRTBLOCK is_block = True # RawBlock volume is requested via PVC if is_block_request(request): pvtype = PV_TYPE_RAWBLOCK is_block = True if is_block: single_node_writer = getattr(csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") # Multi node writer is not allowed for PV_TYPE_VIRTBLOCK/PV_TYPE_RAWBLOCK if pvc_access_mode(request) != single_node_writer: errmsg = "Only SINGLE_NODE_WRITER is allowed for block Volume" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() logging.debug( logf("Found PV type", pvtype=pvtype, capabilities=request.volume_capabilities)) # TODO: Check the available space under lock # Add everything from parameter as filter item filters = {} for pkey, pvalue in request.parameters.items(): filters[pkey] = pvalue logging.debug(logf("Filters applied to choose storage", **filters)) # UID is stored at the time of installation in configmap. uid = None with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file: uid = uid_file.read() host_volumes = get_pv_hosting_volumes(filters) logging.debug( logf("Got list of hosting Volumes", volumes=",".join(v['name'] for v in host_volumes))) hostvol = None ext_volume = None data = {} hostvoltype = filters.get("hostvol_type", None) if not hostvoltype: # This means, the request came on 'kadalu' storage class type. # Randomize the entries so we can issue PV from different storage random.shuffle(host_volumes) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() info_file_path = os.path.join(VOLINFO_DIR, "%s.info" % hostvol) with open(info_file_path) as info_file: data = json.load(info_file) hostvoltype = data['type'] kformat = filters.get('kadalu_format', "native") if hostvoltype == 'External': ext_volume = check_external_volume(request, host_volumes) if ext_volume: mntdir = os.path.join(HOSTVOL_MOUNTDIR, ext_volume['name']) # By default 'kadalu_format' is set to 'native' as part of CRD # definition if kformat == 'non-native': # If 'kadalu_format' is 'non-native', the request will be # considered as to map 1 PV to 1 Gluster volume # No need to keep the mount on controller unmount_glusterfs(mntdir) logging.info( logf("Volume (External) created", name=request.name, size=pvsize, mount=mntdir, hostvol=ext_volume['g_volname'], pvtype=pvtype, volpath=ext_volume['g_host'], duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external", uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['g_options'], "kformat": kformat, } }) # The external volume should be used as kadalu host vol if not is_hosting_volume_free(ext_volume['name'], pvsize): logging.error( logf("Hosting volume is full. Add more storage", volume=ext_volume['name'])) errmsg = "External resource is exhausted" context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() if pvtype in [PV_TYPE_VIRTBLOCK, PV_TYPE_RAWBLOCK]: vol = create_block_volume(pvtype, mntdir, request.name, pvsize) else: use_gluster_quota = False if (os.path.isfile("/etc/secret-volume/ssh-privatekey") \ and "SECRET_GLUSTERQUOTA_SSH_USERNAME" in os.environ): use_gluster_quota = True secret_private_key = "/etc/secret-volume/ssh-privatekey" secret_username = os.environ.get( 'SECRET_GLUSTERQUOTA_SSH_USERNAME', None) hostname = filters.get("gluster_hosts", None) gluster_vol_name = filters.get("gluster_volname", None) vol = create_subdir_volume(mntdir, request.name, pvsize, use_gluster_quota) quota_size = pvsize quota_path = vol.volpath if use_gluster_quota is False: logging.debug(logf("Set Quota in the native way")) else: logging.debug( logf("Set Quota using gluster directory Quota")) errmsg = execute_gluster_quota_command( secret_private_key, secret_username, hostname, gluster_vol_name, quota_path, quota_size) if errmsg: context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=ext_volume['name'], pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external-kadalu", uid) # Pass required argument to get mount working on # nodeplugin through volume_context return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "path": vol.volpath, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['g_options'], "kformat": kformat, } }) # If external volume not found logging.debug( logf("Here as checking external volume failed", external_volume=ext_volume)) errmsg = "External Storage provided not valid" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() if not hostvol: # Randomize the entries so we can issue PV from different storage random.shuffle(host_volumes) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() if kformat == 'non-native': # Then mount the whole volume as PV msg = "non-native way of Kadalu mount expected" logging.info(msg) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": hostvol, "pvtype": pvtype, "fstype": "xfs", "kformat": kformat, } }) mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) if pvtype in [PV_TYPE_VIRTBLOCK, PV_TYPE_RAWBLOCK]: vol = create_block_volume(pvtype, mntdir, request.name, pvsize) else: use_gluster_quota = False vol = create_subdir_volume(mntdir, request.name, pvsize, use_gluster_quota) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=hostvol, pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) update_free_size(hostvol, request.name, -pvsize) send_analytics_tracker("pvc-%s" % hostvoltype, uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": hostvol, "pvtype": pvtype, "path": vol.volpath, "fstype": "xfs", "kformat": kformat, "storage_options": storage_options } })
def CreateVolume(self, request, context): start_time = time.time() logging.debug(logf("Create Volume request", request=request)) pvsize = request.capacity_range.required_bytes pvtype = PV_TYPE_SUBVOL # 'latest' finds a place here, because only till 0.5.0 version # we had 'latest' as a separate version. After that, 'latest' is # just a link to latest version. if KADALU_VERSION in ["0.5.0", "0.4.0", "0.3.0"]: for vol_capability in request.volume_capabilities: # using getattr to avoid Pylint error single_node_writer = getattr( csi_pb2.VolumeCapability.AccessMode, "SINGLE_NODE_WRITER") if vol_capability.access_mode.mode == single_node_writer: pvtype = PV_TYPE_VIRTBLOCK logging.debug( logf("Found PV type", pvtype=pvtype, capabilities=request.volume_capabilities)) # TODO: Check the available space under lock # Add everything from parameter as filter item filters = {} for pkey, pvalue in request.parameters.items(): filters[pkey] = pvalue logging.debug(logf("Filters applied to choose storage", **filters)) # UID is stored at the time of installation in configmap. uid = None with open(os.path.join(VOLINFO_DIR, "uid")) as uid_file: uid = uid_file.read() host_volumes = get_pv_hosting_volumes(filters) logging.debug( logf("Got list of hosting Volumes", volumes=",".join(v['name'] for v in host_volumes))) ext_volume = None hostvoltype = filters.get("hostvol_type", None) if hostvoltype == 'External': ext_volume = check_external_volume(request, host_volumes) if ext_volume: mntdir = os.path.join(HOSTVOL_MOUNTDIR, ext_volume['name']) if not filters.get('kadalu-format', None): # No need to keep the mount on controller unmount_glusterfs(mntdir) logging.info( logf("Volume (External) created", name=request.name, size=pvsize, mount=mntdir, hostvol=ext_volume['g_volname'], pvtype=pvtype, volpath=ext_volume['g_host'], duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external", uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['options'], } }) # The external volume should be used as kadalu host vol # TODO: handle the case where host-volume is full # can-be-fixed-by-an-intern if pvtype == PV_TYPE_VIRTBLOCK: vol = create_virtblock_volume(mntdir, request.name, pvsize) else: vol = create_subdir_volume(mntdir, request.name, pvsize) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=ext_volume['name'], pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) send_analytics_tracker("pvc-external-kadalu", uid) # Pass required argument to get mount working on # nodeplugin through volume_context return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": ext_volume['name'], "pvtype": pvtype, "path": vol.volpath, "gvolname": ext_volume['g_volname'], "gserver": ext_volume['g_host'], "fstype": "xfs", "options": ext_volume['g_options'], } }) # If external volume not found logging.debug( logf("Here as checking external volume failed", external_volume=ext_volume)) errmsg = "External Storage provided not valid" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return csi_pb2.CreateVolumeResponse() # Randomize the entries so we can issue PV from different storage random.shuffle(host_volumes) hostvol = mount_and_select_hosting_volume(host_volumes, pvsize) if hostvol is None: errmsg = "No Hosting Volumes available, add more storage" logging.error(errmsg) context.set_details(errmsg) context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) return csi_pb2.CreateVolumeResponse() mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol) if pvtype == PV_TYPE_VIRTBLOCK: vol = create_virtblock_volume(mntdir, request.name, pvsize) else: vol = create_subdir_volume(mntdir, request.name, pvsize) logging.info( logf("Volume created", name=request.name, size=pvsize, hostvol=hostvol, pvtype=pvtype, volpath=vol.volpath, duration_seconds=time.time() - start_time)) update_free_size(hostvol, request.name, -pvsize) send_analytics_tracker("pvc-%s" % hostvoltype, uid) return csi_pb2.CreateVolumeResponse( volume={ "volume_id": request.name, "capacity_bytes": pvsize, "volume_context": { "type": hostvoltype, "hostvol": hostvol, "pvtype": pvtype, "path": vol.volpath, "fstype": "xfs" } })