def deploy_storage_class(obj): """Deploys the default and custom storage class for KaDalu if not exists""" # Deploy defalut Storage Class api_instance = client.StorageV1Api() scs = api_instance.list_storage_class() sc_names = [] for tmpl in os.listdir(MANIFESTS_DIR): if tmpl.startswith("storageclass-") and tmpl.endswith(".j2"): sc_names.append( tmpl.replace("storageclass-", "").replace(".yaml.j2", "")) installed_scs = [item.metadata.name for item in scs.items] for sc_name in sc_names: filename = os.path.join(MANIFESTS_DIR, "storageclass-%s.yaml" % sc_name) if sc_name in installed_scs: logging.info( logf("StorageClass already present, continuing with Apply", manifest=filename)) template(filename, namespace=NAMESPACE, kadalu_version=VERSION, hostvol_name=obj["metadata"]["name"], kadalu_format=obj["spec"].get("kadalu_format", "native")) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info(logf("Deployed StorageClass", manifest=filename))
def deploy_config_map(core_v1_client): """Deploys the template configmap if not exists""" configmaps = core_v1_client.list_namespaced_config_map(NAMESPACE) uid = uuid.uuid4() upgrade = False for item in configmaps.items: if item.metadata.name == KADALU_CONFIG_MAP: logging.info( logf("Found existing configmap. Updating", name=item.metadata.name)) # Don't overwrite UID info. configmap_data = core_v1_client.read_namespaced_config_map( KADALU_CONFIG_MAP, NAMESPACE) if configmap_data.data.get("uid", None): uid = configmap_data.data["uid"] upgrade = True # Keep the config details required to be preserved. # Deploy Config map filename = os.path.join(MANIFESTS_DIR, "configmap.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION, uid=uid) if not upgrade: lib_execute(KUBECTL_CMD, CREATE_CMD, "-f", filename) logging.info( logf("ConfigMap Deployed", manifest=filename, uid=uid, upgrade=upgrade)) return uid, upgrade
def handle_added(core_v1_client, obj): """ New Volume is requested. Update the configMap and deploy """ if not validate_volume_request(obj): # TODO: Delete Custom resource logging.debug(logf("validation of volume request failed", yaml=obj)) return # Ignore if already deployed volname = obj["metadata"]["name"] pods = core_v1_client.list_namespaced_pod(NAMESPACE) for pod in pods.items: if pod.metadata.name.startswith("server-" + volname + "-"): logging.debug( logf("Ignoring already deployed server statefulsets", storagename=volname)) return # Add new entry in the existing config map configmap_data = core_v1_client.read_namespaced_config_map( KADALU_CONFIG_MAP, NAMESPACE) if configmap_data.data.get("%s.info" % volname, None): # Volume already exists logging.debug( logf("Ignoring already updated volume config", storagename=volname)) return # Generate new Volume ID if obj["spec"].get("volume_id", None) is None: obj["spec"]["volume_id"] = str(uuid.uuid1()) # Apply existing Volume ID to recreate storage pool from existing device/path else: logging.info( logf("Applying existing volume id", volume_id=obj["spec"]["volume_id"])) voltype = obj["spec"]["type"] if voltype == VOLUME_TYPE_EXTERNAL: handle_external_storage_addition(core_v1_client, obj) return # Generate Node ID for each storage device. for idx, _ in enumerate(obj["spec"]["storage"]): obj["spec"]["storage"][idx]["node_id"] = "node-%d" % idx # Storage Class deploy_storage_class(obj) update_config_map(core_v1_client, obj) deploy_server_pods(obj) filename = os.path.join(MANIFESTS_DIR, "services.yaml") template(filename, namespace=NAMESPACE, volname=volname) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info(logf("Deployed Service", volname=volname, manifest=filename))
def delete_storage_class(hostvol_name, _): """ Deletes deployed External and Custom StorageClass """ sc_name = "kadalu." + hostvol_name lib_execute(KUBECTL_CMD, DELETE_CMD, "sc", sc_name) logging.info(logf("Deleted Storage class", volname=hostvol_name))
def handle_modified(core_v1_client, obj): """ Handle when Volume option is updated or Volume state is changed to maintenance """ # TODO: Handle Volume maintenance mode volname = obj["metadata"]["name"] voltype = obj["spec"]["type"] if voltype == VOLUME_TYPE_EXTERNAL: # Modification of 'External' volume type is not supported logging.info( logf("Modification of 'External' volume type is not supported", storagename=volname)) return # It doesn't make sense to support Replica1 also in this operation. if voltype == VOLUME_TYPE_REPLICA_1: # Modification of 'External' volume type is not supported logging.info( logf("Modification of '%s' volume type is not supported" % VOLUME_TYPE_REPLICA_1, storagename=volname)) return if not validate_volume_request(obj): logging.debug(logf("validation of volume request failed", yaml=obj)) return configmap_data = core_v1_client.read_namespaced_config_map( KADALU_CONFIG_MAP, NAMESPACE) if not configmap_data.data.get("%s.info" % volname, None): # Volume doesn't exists logging.error(logf("Volume config not found", storagename=volname)) return # Volume ID (uuid) is already generated, re-use cfgmap = json.loads(configmap_data.data[volname + ".info"]) # Get volume-id from config map obj["spec"]["volume_id"] = cfgmap["volume_id"] # Set Node ID for each storage device from configmap for idx, _ in enumerate(obj["spec"]["storage"]): obj["spec"]["storage"][idx]["node_id"] = cfgmap["bricks"][idx][ "node_id"] # Add new entry in the existing config map update_config_map(core_v1_client, obj) deploy_server_pods(obj) filename = os.path.join(MANIFESTS_DIR, "services.yaml") template(filename, namespace=NAMESPACE, volname=volname) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info(logf("Deployed Service", volname=volname, manifest=filename))
def deploy_server_pods(obj): """ Deploy server pods depending on type of Hosting Volume and other options specified """ # Deploy server pod volname = obj["metadata"]["name"] voltype = obj["spec"]["type"] pv_reclaim_policy = obj["spec"].get("pvReclaimPolicy", "delete") tolerations = obj["spec"].get("tolerations") docker_user = os.environ.get("DOCKER_USER", "kadalu") shd_required = False if voltype in (VOLUME_TYPE_REPLICA_3, VOLUME_TYPE_REPLICA_2, VOLUME_TYPE_DISPERSE): shd_required = True template_args = { "namespace": NAMESPACE, "kadalu_version": VERSION, "images_hub": IMAGES_HUB, "docker_user": docker_user, "volname": volname, "voltype": voltype, "pvReclaimPolicy": pv_reclaim_policy, "volume_id": obj["spec"]["volume_id"], "shd_required": shd_required } # One StatefulSet per Brick for idx, storage in enumerate(obj["spec"]["storage"]): template_args["host_brick_path"] = storage.get("path", "") template_args["kube_hostname"] = storage.get("node", "") # TODO: Understand the need, and usage of suffix serverpod_name = get_brick_hostname(volname, idx, suffix=False) template_args["serverpod_name"] = serverpod_name template_args["brick_path"] = "/bricks/%s/data/brick" % volname template_args["brick_index"] = idx template_args["brick_device"] = storage.get("device", "") template_args["pvc_name"] = storage.get("pvc", "") template_args["brick_device_dir"] = get_brick_device_dir(storage) template_args["brick_node_id"] = storage["node_id"] template_args["k8s_dist"] = K8S_DIST template_args["verbose"] = VERBOSE filename = os.path.join(MANIFESTS_DIR, "server.yaml") template(filename, **template_args) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info( logf("Deployed Server pod", volname=volname, manifest=filename, node=storage.get("node", ""))) add_tolerations("statefulsets", serverpod_name, tolerations) add_tolerations("daemonset", NODE_PLUGIN, tolerations)
def handle_deleted(core_v1_client, obj): """ If number of pvs provisioned from that volume is zero - Delete the respective server pods If number of pvs is not zero, wait or periodically check for num_pvs. Delete Server pods only when pvs becomes zero. """ volname = obj["metadata"]["name"] storage_info_data = get_configmap_data(volname) logging.info(logf("Delete requested", volname=volname)) pv_count = get_num_pvs(storage_info_data) if pv_count == -1: logging.error( logf("Storage delete failed. Failed to get PV count", number_of_pvs=pv_count, storage=volname)) return if pv_count != 0: logging.warning( logf("Storage delete failed. Storage is not empty", number_of_pvs=pv_count, storage=volname)) elif pv_count == 0: if storage_info_data.get("type") == "External": # We can't delete external volume but cleanup StorageClass and # Configmap volname = "kadalu.external." + volname lib_execute(KUBECTL_CMD, DELETE_CMD, "sc", volname) logging.info(logf( "Deleted Storage class", volname=volname, )) delete_config_map(core_v1_client, obj) else: delete_server_pods(storage_info_data, obj) delete_config_map(core_v1_client, obj) filename = os.path.join(MANIFESTS_DIR, "services.yaml") template(filename, namespace=NAMESPACE, volname=volname) lib_execute(KUBECTL_CMD, DELETE_CMD, "-f", filename) logging.info( logf("Deleted Service", volname=volname, manifest=filename)) return
def delete_server_pods(storage_info_data, obj): """ Delete server pods depending on type of Hosting Volume and other options specified """ volname = obj["metadata"]["name"] voltype = storage_info_data['type'] volumeid = storage_info_data['volume_id'] docker_user = os.environ.get("DOCKER_USER", "kadalu") shd_required = False if voltype in (VOLUME_TYPE_REPLICA_3, VOLUME_TYPE_REPLICA_2): shd_required = True template_args = { "namespace": NAMESPACE, "kadalu_version": VERSION, "docker_user": docker_user, "images_hub": IMAGES_HUB, "volname": volname, "voltype": voltype, "volume_id": volumeid, "shd_required": shd_required } bricks = storage_info_data['bricks'] # Traverse all bricks from configmap for brick in bricks: idx = brick['brick_index'] template_args["host_brick_path"] = brick['host_brick_path'] template_args["kube_hostname"] = brick['kube_hostname'] template_args["serverpod_name"] = get_brick_hostname(volname, idx, suffix=False) template_args["brick_path"] = "/bricks/%s/data/brick" % volname template_args["brick_index"] = idx template_args["brick_device"] = brick['brick_device'] template_args["pvc_name"] = brick['pvc_name'] template_args["brick_device_dir"] = brick['brick_device_dir'] template_args["brick_node_id"] = brick['node_id'] template_args["k8s_dist"] = K8S_DIST filename = os.path.join(MANIFESTS_DIR, "server.yaml") template(filename, **template_args) lib_execute(KUBECTL_CMD, DELETE_CMD, "-f", filename) logging.info( logf("Deleted Server pod", volname=volname, manifest=filename, node=brick['node']))
def deploy_csi_pods(core_v1_client): """ Look for CSI pods, if any one CSI pod found then that means it is deployed """ pods = core_v1_client.list_namespaced_pod(NAMESPACE) for pod in pods.items: if pod.metadata.name.startswith(CSI_POD_PREFIX): logging.info("Updating already deployed CSI pods") # Deploy CSI Pods api_instance = client.VersionApi().get_code() if api_instance.major > "1" or api_instance.major == "1" and \ api_instance.minor >= "22": csi_driver_version = csi_driver_object_api_version() if csi_driver_version is not None and \ csi_driver_version != "v1": lib_execute(KUBECTL_CMD, DELETE_CMD, "csidriver", "kadalu") logging.info( logf("Deleted existing CSI Driver object", csi_driver_version=csi_driver_version)) filename = os.path.join(MANIFESTS_DIR, "csi-driver-object-v1.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) elif api_instance.major > "1" or api_instance.major == "1" and \ api_instance.minor >= "14": filename = os.path.join(MANIFESTS_DIR, "csi-driver-object.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) else: filename = os.path.join(MANIFESTS_DIR, "csi-driver-crd.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) filename = os.path.join(MANIFESTS_DIR, "csi.yaml") docker_user = os.environ.get("DOCKER_USER", "kadalu") template( filename, namespace=NAMESPACE, kadalu_version=VERSION, docker_user=docker_user, k8s_dist=K8S_DIST, kubelet_dir=KUBELET_DIR, verbose=VERBOSE, ) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info(logf("Deployed CSI Pods", manifest=filename))
def handle_external_storage_addition(core_v1_client, obj): """Deploy service(One service per Volume)""" volname = obj["metadata"]["name"] details = obj["spec"]["details"] pv_reclaim_policy = obj["spec"].get("pvReclaimPolicy", "delete") tolerations = obj["spec"].get("tolerations") hosts = [] ghost = details.get("gluster_host", None) ghosts = details.get("gluster_hosts", None) if ghost: hosts.append(ghost) if ghosts: hosts.extend(ghosts) data = { "volname": volname, "volume_id": obj["spec"]["volume_id"], "type": VOLUME_TYPE_EXTERNAL, "pvReclaimPolicy": pv_reclaim_policy, # CRD would set 'native' but just being cautious "kadalu_format": obj["spec"].get("kadalu_format", "native"), "gluster_hosts": ",".join(hosts), "gluster_volname": details["gluster_volname"], "gluster_options": details.get("gluster_options", ""), } # Add new entry in the existing config map configmap_data = core_v1_client.read_namespaced_config_map( KADALU_CONFIG_MAP, NAMESPACE) volinfo_file = "%s.info" % volname configmap_data.data[volinfo_file] = json.dumps(data) core_v1_client.patch_namespaced_config_map(KADALU_CONFIG_MAP, NAMESPACE, configmap_data) logging.info( logf("Updated configmap", name=KADALU_CONFIG_MAP, volname=volname)) filename = os.path.join(MANIFESTS_DIR, "external-storageclass.yaml") template(filename, **data) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info( logf("Deployed External StorageClass", volname=volname, manifest=filename)) add_tolerations("daemonset", NODE_PLUGIN, tolerations)
def add_tolerations(resource, name, tolerations): """Adds tolerations to kubernetes resource/name object""" if tolerations is None: return patch = {"spec": {"template": {"spec": {"tolerations": tolerations}}}} try: lib_execute(KUBECTL_CMD, PATCH_CMD, resource, name, "-p", json.dumps(patch), "--type=merge") except CommandException as err: errmsg = f"Unable to patch {resource}/{name} with tolerations \ {str(tolerations)}" logging.error(logf(errmsg, error=err)) logging.info( logf("Added tolerations", resource=resource, name=name, tolerations=str(tolerations))) return