示例#1
0
def yield_hostvol_mount():
    """Yields mount directory where hostvol is mounted"""
    host_volumes = get_pv_hosting_volumes()
    info_exist = False
    for volume in host_volumes:
        hvol = volume['name']
        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol)
        try:
            mount_glusterfs(volume, mntdir)
        except CommandException as excep:
            logging.error(
                logf("Unable to mount volume", hvol=hvol, excep=excep.args))
            # We aren't able to mount this specific hostvol
            yield None
        logging.info(logf("Volume is mounted successfully", hvol=hvol))
        info_path = os.path.join(mntdir, 'info')
        if os.path.isdir(info_path):
            # After mounting a hostvol, start looking for PVC from '/mnt/<pool>/info'
            info_exist = True
            yield info_path
    if not info_exist:
        # A generator should yield "something", to signal "StopIteration" if
        # there's no info file on any pool, there should be empty yield
        # Note: raise StopIteration =~ return, but return with a reason is
        # better.
        return "No storage pool exists"
示例#2
0
def deploy_storage_class():
    """Deploys the default storage class for KaDalu if not exists"""

    api_instance = client.StorageV1Api()
    scs = api_instance.list_storage_class()
    sc_names = []
    for tmpl in os.listdir(MANIFESTS_DIR):
        if tmpl.startswith("storageclass-"):
            sc_names.append(
                tmpl.replace("storageclass-", "").replace(".yaml.j2", ""))

    installed_scs = [item.metadata.name for item in scs.items]
    for sc_name in sc_names:
        filename = os.path.join(MANIFESTS_DIR,
                                "storageclass-%s.yaml" % sc_name)
        if sc_name in installed_scs:
            logging.info(
                logf("Ignoring already deployed StorageClass",
                     manifest=filename))
            continue

        # Deploy Storage Class
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        execute(KUBECTL_CMD, "create", "-f", filename)
        logging.info(logf("Deployed StorageClass", manifest=filename))
示例#3
0
def validate_ext_details(obj):
    """Validate external Volume details"""
    clusterdata = obj["spec"].get("details", None)
    if not clusterdata:
        logging.error(logf("External Cluster details not given."))
        return False

    valid = 0
    if len(clusterdata) > 1:
        logging.error(logf("Multiple External Cluster details given."))
        return False

    for cluster in clusterdata:
        if cluster.get('gluster_host', None):
            valid += 1
        if cluster.get('gluster_volname', None):
            valid += 1

    if valid != 2:
        logging.error(logf("No 'host' and 'volname' details provided."))
        return False

    logging.debug(logf("External Storage %s successfully validated" % \
                       obj["metadata"].get("name", "<unknown>")))
    return True
示例#4
0
def yield_list_of_pvcs(max_entries=0):
    """Yields list of PVCs limited at 'max_entries'"""
    # List of tuples containing PVC Name and Size
    pvcs = []
    for idx, value in enumerate(wrap_pvc(yield_pvc_from_hostvol)):
        pvc, last = value
        token = "" if last else str(idx)
        pvcs.append(pvc)
        # Main logic is to 'yield' values when one of the below is observed:
        # 1. If max_entries is set and we collected max_entries of PVCs
        # 2. If max_entries is set and we are at last PVC (unaligned total PVCs
        # against max_entries)
        # 3. No max_entries is set (~all) and we are at last PVC yield all
        # pylint: disable=too-many-boolean-expressions
        if (max_entries and len(pvcs) == max_entries) or (
                max_entries and last) or (not max_entries and last):
            # As per spec 'token' has to be string, we are simply using current
            # PVC count as 'token' and validating the same
            next_token = yield
            logging.debug(logf("Received token", next_token=next_token))
            if next_token and not last and (int(next_token) !=
                                            int(token) - max_entries):
                return
            logging.debug(
                logf("Yielding PVC set and next token is ",
                     token=token,
                     pvcs=pvcs))
            yield pvcs, token
            pvcs *= 0
示例#5
0
def deploy_config_map(core_v1_client):
    """Deploys the template configmap if not exists"""

    configmaps = core_v1_client.list_namespaced_config_map(NAMESPACE)
    create_cmd = "create"
    uid = uuid.uuid4()
    for item in configmaps.items:
        if item.metadata.name == KADALU_CONFIG_MAP:
            logging.info(
                logf("Found existing configmap. Updating",
                     name=item.metadata.name))

            create_cmd = "apply"
            # Don't overwrite UID info.
            configmap_data = core_v1_client.read_namespaced_config_map(
                KADALU_CONFIG_MAP, NAMESPACE)
            if configmap_data.data.get("uid", None):
                uid = configmap_data.data["uid"]
            # Keep the config details required to be preserved.

    # Deploy Config map
    filename = os.path.join(MANIFESTS_DIR, "configmap.yaml")
    template(filename, namespace=NAMESPACE, kadalu_version=VERSION, uid=uid)
    execute(KUBECTL_CMD, create_cmd, "-f", filename)
    logging.info(logf("Deployed ConfigMap", manifest=filename))
    return uid
示例#6
0
def check_external_volume(pv_request):
    """Mount hosting volume"""
    # Assumption is, this has to have 'hostvol_type' as External.
    params = {}
    for pkey, pvalue in pv_request.parameters.items():
        params[pkey] = pvalue

    hvol = {
        "host": params['gluster_host'],
        "name": params['gluster_volname'],
        "options": params['gluster_options'],
    }
    mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol['name'])

    mount_glusterfs_with_host(hvol['name'], mntdir, hvol['host'], hvol['options'])

    time.sleep(0.37)

    if not is_gluster_mount_proc_running(hvol['name'], mntdir):
        logging.debug(logf(
            "Mount failed",
            hvol=hvol,
            mntdir=mntdir
        ))
        return None

    logging.debug(logf(
        "Mount successful",
        hvol=hvol
    ))

    return hvol
示例#7
0
def handle_external_storage_addition(core_v1_client, obj):
    """Deploy service(One service per Volume)"""
    volname = obj["metadata"]["name"]
    details = obj["spec"]["details"][0]

    data = {
        "volname": volname,
        "volume_id": obj["spec"]["volume_id"],
        "type": "External",
        "kadalu-format": True,
        "gluster_host": details["gluster_host"],
        "gluster_volname": details["gluster_volname"],
        "gluster_options": details.get("gluster_options", "ignore-me"),
    }

    # Add new entry in the existing config map
    configmap_data = core_v1_client.read_namespaced_config_map(
        KADALU_CONFIG_MAP, NAMESPACE)
    volinfo_file = "%s.info" % volname
    configmap_data.data[volinfo_file] = json.dumps(data)

    core_v1_client.patch_namespaced_config_map(KADALU_CONFIG_MAP, NAMESPACE,
                                               configmap_data)
    logging.info(
        logf("Updated configmap", name=KADALU_CONFIG_MAP, volname=volname))

    filename = os.path.join(MANIFESTS_DIR, "external-storageclass.yaml")
    template(filename, **data)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(
        logf("Deployed External StorageClass",
             volname=volname,
             manifest=filename))
示例#8
0
def create_virtblock_volume(hostvol_mnt, volname, size):
    """Create virtual block volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Create a file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(
        logf("Created virtblock directory", path=os.path.dirname(volpath)))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)
    os.truncate(volpath_full, size)
    logging.debug(
        logf("Truncated file to required size", path=volpath, size=size))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(
        logf("Created Filesystem", path=volpath, command=MKFS_XFS_CMD))
    save_pv_metadata(hostvol_mnt, volpath, size)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
示例#9
0
def create_and_mount_brick(brick_device, brick_path, brickfs):
    """
    Create brick filesystem and mount the brick. Currently
    only xfs is supported
    """

    # If brick device path is not starts with /dev then use
    # /brickdev prefix. Brick device directory passed by the user
    # is mounted as /brickdev to avoid mixing with any other
    # dirs inside container.
    if not brick_device.startswith("/dev/"):
        brick_device = "/brickdev/" + os.path.basename(brick_device)

    mountdir = os.path.dirname(brick_path)
    os.makedirs(mountdir,
                mode=0o755,
                exist_ok=True)

    try:
        execute("mount", brick_device, mountdir)
    except CommandException as err:
        if 'wrong fs type' in err.err:
            # This error pops up when we do mount on an empty device or wrong fs
            # Try doing a mkfs and try mount
            try:
                execute("mkfs.xfs", brick_device)
            except CommandException as err:
                if "appears to contain an existing filesystem" not in err.err:
                    logging.error(logf(
                        "Failed to create file system",
                        fstype=brickfs,
                        device=brick_device,
                    ))
                    sys.exit(1)
                else:
                    pass
                try:
                    execute("mount", brick_device, mountdir)
                except CommandException as err:
                    logging.error(logf(
                        "Failed to mount export brick (after mkfs)",
                        fstype=brickfs,
                        device=brick_device,
                        mountdir=mountdir,
                        error=err,
                    ))
                    sys.exit(1)

        elif 'already mounted' not in err.err:
            logging.error(logf(
                "Failed to mount export brick",
                fstype=brickfs,
                device=brick_device,
                mountdir=mountdir,
                error=err,
            ))
            sys.exit(1)

        else:
            pass
示例#10
0
def update_config_map(core_v1_client, obj):
    """
    Volinfo of new hosting Volume is generated and updated to ConfigMap
    """
    volname = obj["metadata"]["name"]
    voltype = obj["spec"]["type"]
    data = {
        "namespace": NAMESPACE,
        "kadalu_version": VERSION,
        "volname": volname,
        "volume_id": obj["spec"]["volume_id"],
        "type": voltype,
        "bricks": [],
        "options": obj["spec"].get("options", {})
    }

    # Add new entry in the existing config map
    configmap_data = core_v1_client.read_namespaced_config_map(
        KADALU_CONFIG_MAP, NAMESPACE)

    # For each brick, add brick path and node id
    bricks = obj["spec"]["storage"]
    for idx, storage in enumerate(bricks):
        data["bricks"].append({
            "brick_path": "/bricks/%s/data/brick" % volname,
            "kube_hostname": storage.get("node", ""),
            "node": get_brick_hostname(volname, idx),
            "node_id": storage["node_id"],
            "host_brick_path": storage.get("path", ""),
            "brick_device": storage.get("device", ""),
            "pvc_name": storage.get("pvc", ""),
            "brick_device_dir": get_brick_device_dir(storage),
            "brick_index": idx
        })

    if voltype == VOLUME_TYPE_REPLICA_2:
        tiebreaker = obj["spec"].get("tiebreaker", None)
        if not tiebreaker:
            logging.warning(
                logf(
                    "No 'tiebreaker' provided for replica2 "
                    "config. Using default tie-breaker.kadalu.io:/mnt",
                    volname=volname))
            # Add default tiebreaker if no tie-breaker option provided
            tiebreaker = {
                "node": "tie-breaker.kadalu.io",
                "path": "/mnt",
            }
        if not tiebreaker.get("port", None):
            tiebreaker["port"] = 24007

        data["tiebreaker"] = tiebreaker

    volinfo_file = "%s.info" % volname
    configmap_data.data[volinfo_file] = json.dumps(data)

    core_v1_client.patch_namespaced_config_map(KADALU_CONFIG_MAP, NAMESPACE,
                                               configmap_data)
    logging.info(
        logf("Updated configmap", name=KADALU_CONFIG_MAP, volname=volname))
示例#11
0
def update_free_size(hostvol, sizechange):
    """Update the free size in respective hosting Volume's stat file"""

    # Check for mount availability before updating the free size
    retry_errors(os.statvfs, [os.path.join(HOSTVOL_MOUNTDIR, hostvol)], [ENOTCONN])

    stat_file_path = os.path.join(HOSTVOL_MOUNTDIR, hostvol, ".stat")

    with statfile_lock:
        with open(stat_file_path+".tmp", "w") as stat_file_tmp:
            with open(stat_file_path) as stat_file:
                data = json.load(stat_file)
                data["free_size"] += sizechange
                stat_file_tmp.write(json.dumps(data))
                logging.debug(logf(
                    "Updated .stat.tmp file",
                    hostvol=hostvol,
                    before=data["free_size"] - sizechange,
                    after=data["free_size"]
                ))

        os.rename(stat_file_path+".tmp", stat_file_path)
        logging.debug(logf(
            "Renamed .stat.tmp to .stat file",
            hostvol=hostvol
        ))
示例#12
0
def deploy_storage_class(obj):
    """Deploys the default and custom storage class for KaDalu if not exists"""

    # Deploy defalut Storage Class
    api_instance = client.StorageV1Api()
    scs = api_instance.list_storage_class()
    sc_names = []
    for tmpl in os.listdir(MANIFESTS_DIR):
        if tmpl.startswith("storageclass-") and tmpl.endswith(".j2"):
            sc_names.append(
                tmpl.replace("storageclass-", "").replace(".yaml.j2", ""))

    installed_scs = [item.metadata.name for item in scs.items]
    for sc_name in sc_names:
        filename = os.path.join(MANIFESTS_DIR,
                                "storageclass-%s.yaml" % sc_name)
        if sc_name in installed_scs:
            logging.info(
                logf("StorageClass already present, continuing with Apply",
                     manifest=filename))

        template(filename,
                 namespace=NAMESPACE,
                 kadalu_version=VERSION,
                 hostvol_name=obj["metadata"]["name"],
                 kadalu_format=obj["spec"].get("kadalu_format", "native"))
        lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
        logging.info(logf("Deployed StorageClass", manifest=filename))
示例#13
0
def handle_added(core_v1_client, obj):
    """
    New Volume is requested. Update the configMap and deploy
    """

    if not validate_volume_request(obj):
        # TODO: Delete Custom resource
        return

    # Ignore if already deployed
    volname = obj["metadata"]["name"]
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith("server-" + volname + "-"):
            logging.debug(
                logf("Ignoring already deployed server statefulsets",
                     storagename=volname))
            return

    # Generate new Volume ID
    obj["spec"]["volume_id"] = str(uuid.uuid1())

    update_config_map(core_v1_client, obj)
    deploy_server_pods(obj)

    # Deploy service(One service per Volume)
    volname = obj["metadata"]["name"]
    filename = os.path.join(MANIFESTS_DIR, "services.yaml")
    template(filename, namespace=NAMESPACE, volname=volname)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(logf("Deployed Service", volname=volname, manifest=filename))
示例#14
0
    def NodePublishVolume(self, request, context):
        start_time = time.time()
        hostvol = request.volume_context.get("hostvol", "")
        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol)
        pvpath = request.volume_context.get("path", "")
        pvtype = request.volume_context.get("pvtype", "")
        pvpath_full = os.path.join(mntdir, pvpath)

        logging.debug(logf(
            "Received the mount request",
            volume=request.volume_id,
            hostvol=hostvol,
            pvpath=pvpath,
            pvtype=pvtype
        ))

        mount_glusterfs(hostvol, mntdir)
        logging.debug(logf(
            "Mounted Hosting Volume",
            pv=request.volume_id,
            hostvol=hostvol,
            mntdir=mntdir,
        ))
        # Mount the PV
        # TODO: Handle Volume capability mount flags
        mount_volume(pvpath_full, request.target_path, pvtype, fstype=None)
        logging.info(logf(
            "Mounted PV",
            volume=request.volume_id,
            pvpath=pvpath,
            pvtype=pvtype,
            hostvol=hostvol,
            duration_seconds=time.time() - start_time
        ))
        return csi_pb2.NodePublishVolumeResponse()
示例#15
0
def handle_added(core_v1_client, obj):
    """
    New Volume is requested. Update the configMap and deploy
    """

    if not validate_volume_request(obj):
        # TODO: Delete Custom resource
        logging.debug(logf("validation of volume request failed", yaml=obj))
        return

    # Ignore if already deployed
    volname = obj["metadata"]["name"]
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith("server-" + volname + "-"):
            logging.debug(
                logf("Ignoring already deployed server statefulsets",
                     storagename=volname))
            return

    # Add new entry in the existing config map
    configmap_data = core_v1_client.read_namespaced_config_map(
        KADALU_CONFIG_MAP, NAMESPACE)

    if configmap_data.data.get("%s.info" % volname, None):
        # Volume already exists
        logging.debug(
            logf("Ignoring already updated volume config",
                 storagename=volname))
        return

    # Generate new Volume ID
    if obj["spec"].get("volume_id", None) is None:
        obj["spec"]["volume_id"] = str(uuid.uuid1())
    # Apply existing Volume ID to recreate storage pool from existing device/path
    else:
        logging.info(
            logf("Applying existing volume id",
                 volume_id=obj["spec"]["volume_id"]))

    voltype = obj["spec"]["type"]
    if voltype == VOLUME_TYPE_EXTERNAL:
        handle_external_storage_addition(core_v1_client, obj)
        return

    # Generate Node ID for each storage device.
    for idx, _ in enumerate(obj["spec"]["storage"]):
        obj["spec"]["storage"][idx]["node_id"] = "node-%d" % idx

    # Storage Class
    deploy_storage_class(obj)

    update_config_map(core_v1_client, obj)
    deploy_server_pods(obj)

    filename = os.path.join(MANIFESTS_DIR, "services.yaml")
    template(filename, namespace=NAMESPACE, volname=volname)
    lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
    logging.info(logf("Deployed Service", volname=volname, manifest=filename))
示例#16
0
def create_subdir_volume(hostvol_mnt, volname, size):
    """Create sub directory Volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_SUBVOL, volhash, volname)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Check for mount availability before creating subdir volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Create a subdir
    makedirs(os.path.join(hostvol_mnt, volpath))
    logging.debug(logf("Created PV directory", pvdir=volpath))

    # Write info file so that Brick's quotad sidecar
    # container picks it up.
    save_pv_metadata(hostvol_mnt, volpath, size)

    # Wait for quota set
    # TODO: Handle Timeout
    pvsize_buffer = size * 0.05  # 5%
    pvsize_min = (size - pvsize_buffer)
    pvsize_max = (size + pvsize_buffer)
    logging.debug(
        logf(
            "Watching df of pv directory",
            pvdir=volpath,
            pvsize_buffer=pvsize_buffer,
        ))

    count = 0
    while True:
        count += 1
        pvstat = retry_errors(os.statvfs, [os.path.join(hostvol_mnt, volpath)],
                              [ENOTCONN])
        volsize = pvstat.f_blocks * pvstat.f_bsize
        if pvsize_min < volsize < pvsize_max:
            logging.debug(
                logf("Matching df output, Quota set successful",
                     volsize=volsize,
                     num_tries=count))
            break

        if count >= 6:
            logging.warning(
                logf("Waited for some time, Quota set failed, continuing.",
                     volsize=volsize,
                     num_tries=count))
            break

        time.sleep(1)

    return Volume(
        volname=volname,
        voltype=PV_TYPE_SUBVOL,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
示例#17
0
def mount_glusterfs_with_host(volname, target_path, host, options=None):
    """Mount Glusterfs Volume"""

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, target_path):
        logging.debug(logf(
            "Already mounted",
            mount=target_path
        ))
        return

    if not os.path.exists(target_path):
        makedirs(target_path)

    # FIXME: make this better later (an issue for external contribution)
    # opt_array = None
    # if options:
    #     opt_array = []
    #     for opt in options.split(","):
    #         if not opt or opt == "":
    #             break
    #         for k,v in opt.split("="):
    #             if k == "log-level":
    #                 opt_array.append("--log-level")
    #                 opt_array.append(v)
    #                 # TODO: handle more options, and document them

    # Fix the log, so we can check it out later
    # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
    log_file = "/var/log/gluster/gluster.log"
    cmd = [
        GLUSTERFS_CMD,
        "--process-name", "fuse",
        "-l", "%s" % log_file,
        "--volfile-id", volname,
        "-s", host,
        target_path
    ]
    # if opt_array:
    #     cmd.extend(opt_array)
    #
    # # add mount point after all options
    # cmd.append(target_path)
    logging.debug(logf(
        "glusterfs command",
        cmd=cmd
    ))

    try:
        execute(*cmd)
    except CommandException as err:
        logging.info(logf(
            "mount command failed",
            cmd=cmd,
            error=err,
        ))

    return
示例#18
0
def mount_glusterfs(volume, target_path):
    """Mount Glusterfs Volume"""
    if not os.path.exists(target_path):
        makedirs(target_path)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted", mount=target_path))
        return

    # This is just to prevent multiple requests getting here in parallel
    target_path_lock = "%s.lock" % target_path

    while True:
        if not os.path.exists(target_path_lock):
            # Need to create a dummy file, no need to do IO
            # Hence no open and close business
            os.mknod(target_path_lock)
            break
        time.sleep(0.2)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted (2nd try)", mount=target_path))
        os.unlink(target_path_lock)
        return

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        mount_glusterfs_with_host(volume['g_volname'], target_path,
                                  volume['g_host'], volume['g_options'])
        os.unlink(target_path_lock)
        return

    generate_client_volfile(volume['name'])
    # Fix the log, so we can check it out later
    # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
    log_file = "/var/log/gluster/gluster.log"
    cmd = [
        GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
        "--volfile-id", volume['name'], "-f",
        "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), target_path
    ]
    try:
        execute(*cmd)
    except Exception as err:
        logging.error(
            logf("error to execute command",
                 volume=volume,
                 cmd=cmd,
                 error=format(err)))
        os.unlink(target_path_lock)
        raise err

    os.unlink(target_path_lock)
    return
示例#19
0
文件: main.py 项目: vatsa287/kadalu
def handle_modified(core_v1_client, obj):
    """
    Handle when Volume option is updated or Volume
    state is changed to maintenance
    """
    # TODO: Handle Volume maintenance mode

    volname = obj["metadata"]["name"]

    voltype = obj["spec"]["type"]
    if voltype == VOLUME_TYPE_EXTERNAL:
        # Modification of 'External' volume type is not supported
        logging.info(
            logf("Modification of 'External' volume type is not supported",
                 storagename=volname))
        return

    # It doesn't make sense to support Replica1 also in this operation.
    if voltype == VOLUME_TYPE_REPLICA_1:
        # Modification of 'External' volume type is not supported
        logging.info(
            logf("Modification of '%s' volume type is not supported" %
                 VOLUME_TYPE_REPLICA_1,
                 storagename=volname))
        return

    if not validate_volume_request(obj):
        logging.debug(logf("validation of volume request failed", yaml=obj))
        return

    configmap_data = core_v1_client.read_namespaced_config_map(
        KADALU_CONFIG_MAP, NAMESPACE)

    if not configmap_data.data.get("%s.info" % volname, None):
        # Volume doesn't exists
        logging.error(logf("Volume config not found", storagename=volname))
        return

    # Volume ID (uuid) is already generated, re-use
    cfgmap = json.loads(configmap_data.data[volname + ".info"])
    # Get volume-id from config map
    obj["spec"]["volume_id"] = cfgmap["volume_id"]

    # Set Node ID for each storage device from configmap
    for idx, _ in enumerate(obj["spec"]["storage"]):
        obj["spec"]["storage"][idx]["node_id"] = cfgmap["bricks"][idx][
            "node_id"]

    # Add new entry in the existing config map
    update_config_map(core_v1_client, obj)
    deploy_server_pods(obj)

    filename = os.path.join(MANIFESTS_DIR, "services.yaml")
    template(filename, namespace=NAMESPACE, volname=volname)
    lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
    logging.info(logf("Deployed Service", volname=volname, manifest=filename))
示例#20
0
def mount_glusterfs(volume, mountpoint, is_client=False):
    """Mount Glusterfs Volume"""
    if volume["type"] == "External":
        volname = volume['g_volname']
    else:
        volname = volume["name"]

    # Ignore if already glusterfs process running for that volume
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted", mount=mountpoint))
        return

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted (2nd try)", mount=mountpoint))
        return

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volume['g_volname'], mountpoint,
                                      volume['g_host'], volume['g_options'],
                                      is_client)
        return

    with mount_lock:
        generate_client_volfile(volume['name'])
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % mountpoint.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volume['name'], "--fs-display-name",
            "kadalu:%s" % volume['name'], "-f",
            "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), mountpoint
        ]

        ## required for 'simple-quota'
        if not is_client:
            cmd.extend(["--client-pid", "-14"])

        try:
            execute(*cmd)
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return
def delete_volume(volname):
    """Delete virtual block, sub directory volume, or External"""
    vol = search_volume(volname)
    if vol is not None:
        logging.debug(
            logf("Volume found for delete",
                 volname=vol.volname,
                 voltype=vol.voltype,
                 volhash=vol.volhash,
                 hostvol=vol.hostvol))
        volpath = os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, vol.volpath)
        try:
            if vol.voltype == PV_TYPE_SUBVOL:
                os.removedirs(volpath)
            else:
                os.remove(volpath)
        except OSError as err:
            logging.info(
                logf(
                    "Error while deleting volume",
                    volpath=volpath,
                    voltype=vol.voltype,
                    error=err,
                ))

        logging.debug(
            logf("Volume deleted", volpath=volpath, voltype=vol.voltype))

        # Delete Metadata file
        info_file_path = os.path.join(HOSTVOL_MOUNTDIR, vol.hostvol, "info",
                                      vol.volpath + ".json")

        try:
            with open(info_file_path) as info_file:
                data = json.load(info_file)
                # We assume there would be a create before delete, but while
                # developing thats not true. There can be a delete request for
                # previously created pvc, which would be assigned to you once
                # you come up. We can't fail then.
                update_free_size(vol.hostvol, data["size"])

            os.remove(info_file_path)
            logging.debug(
                logf("Removed volume metadata file",
                     path="info/" + vol.volpath + ".json",
                     hostvol=vol.hostvol))
        except OSError as err:
            logging.info(
                logf(
                    "Error while removing the file",
                    path="info/" + vol.volpath + ".json",
                    hostvol=vol.hostvol,
                    error=err,
                ))
示例#22
0
文件: main.py 项目: vatsa287/kadalu
def handle_deleted(core_v1_client, obj):
    """
    If number of pvs provisioned from that volume
    is zero - Delete the respective server pods
    If number of pvs is not zero, wait or periodically
    check for num_pvs. Delete Server pods only when pvs becomes zero.
    """

    volname = obj["metadata"]["name"]

    storage_info_data = get_configmap_data(volname)

    logging.info(logf("Delete requested", volname=volname))

    pv_count = get_num_pvs(storage_info_data)

    if pv_count == -1:
        logging.error(
            logf("Storage delete failed. Failed to get PV count",
                 number_of_pvs=pv_count,
                 storage=volname))
        return

    if pv_count != 0:

        logging.warning(
            logf("Storage delete failed. Storage is not empty",
                 number_of_pvs=pv_count,
                 storage=volname))

    elif pv_count == 0:

        if storage_info_data.get("type") == "External":
            # We can't delete external volume but cleanup StorageClass and
            # Configmap
            volname = "kadalu.external." + volname
            lib_execute(KUBECTL_CMD, DELETE_CMD, "sc", volname)
            logging.info(logf(
                "Deleted Storage class",
                volname=volname,
            ))
            delete_config_map(core_v1_client, obj)

        else:
            delete_server_pods(storage_info_data, obj)
            delete_config_map(core_v1_client, obj)

            filename = os.path.join(MANIFESTS_DIR, "services.yaml")
            template(filename, namespace=NAMESPACE, volname=volname)
            lib_execute(KUBECTL_CMD, DELETE_CMD, "-f", filename)
            logging.info(
                logf("Deleted Service", volname=volname, manifest=filename))

    return
示例#23
0
def deploy_csi_pods(core_v1_client):
    """
    Look for CSI pods, if any one CSI pod found then
    that means it is deployed
    """
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith(CSI_POD_PREFIX):
            logging.info("Updating already deployed CSI pods")

    # Deploy CSI Pods
    api_instance = client.VersionApi().get_code()

    if api_instance.major > "1" or api_instance.major == "1" and \
       api_instance.minor >= "22":

        csi_driver_version = csi_driver_object_api_version()
        if csi_driver_version is not None and \
           csi_driver_version != "v1":
            lib_execute(KUBECTL_CMD, DELETE_CMD, "csidriver", "kadalu")
            logging.info(
                logf("Deleted existing CSI Driver object",
                     csi_driver_version=csi_driver_version))

        filename = os.path.join(MANIFESTS_DIR, "csi-driver-object-v1.yaml")
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)

    elif api_instance.major > "1" or api_instance.major == "1" and \
       api_instance.minor >= "14":
        filename = os.path.join(MANIFESTS_DIR, "csi-driver-object.yaml")
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)

    else:
        filename = os.path.join(MANIFESTS_DIR, "csi-driver-crd.yaml")
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)

    filename = os.path.join(MANIFESTS_DIR, "csi.yaml")
    docker_user = os.environ.get("DOCKER_USER", "kadalu")
    template(
        filename,
        namespace=NAMESPACE,
        kadalu_version=VERSION,
        docker_user=docker_user,
        k8s_dist=K8S_DIST,
        kubelet_dir=KUBELET_DIR,
        verbose=VERBOSE,
    )

    lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename)
    logging.info(logf("Deployed CSI Pods", manifest=filename))
示例#24
0
def create_virtblock_volume(hostvol_mnt, volname, size):
    """Create virtual block volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf(
        "Volume hash",
        volhash=volhash
    ))

    # Check for mount availability before creating virtblock volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Create a file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(logf(
        "Created virtblock directory",
        path=os.path.dirname(volpath)
    ))

    if os.path.exists(volpath_full):
        rand = time.time()
        logging.info(logf(
            "Getting 'Create request' on existing file, renaming.",
            path=volpath_full, random=rand
        ))
        os.rename(volpath_full, "%s.%s" % (volpath_full, rand))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)
    os.truncate(volpath_full, size)
    logging.debug(logf(
        "Truncated file to required size",
        path=volpath,
        size=size
    ))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(logf(
        "Created Filesystem",
        path=volpath,
        command=MKFS_XFS_CMD
    ))
    save_pv_metadata(hostvol_mnt, volpath, size)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
示例#25
0
def yield_hostvol_mount():
    """Yields mount directory where hostvol is mounted"""
    host_volumes = get_pv_hosting_volumes()
    for volume in host_volumes:
        hvol = volume['name']
        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol)
        try:
            mount_glusterfs(volume, mntdir)
        except CommandException as excep:
            logging.error(
                logf("Unable to mount volume", hvol=hvol, excep=excep.args))
            return
        logging.info(logf("Volume is mounted successfully", hvol=hvol))
        # After mounting a hostvol, start looking for PVC from '/mntdir/info'
        yield os.path.join(mntdir, 'info')
示例#26
0
def deploy_config_map(core_v1_client):
    """Deploys the template configmap if not exists"""

    configmaps = core_v1_client.list_namespaced_config_map(NAMESPACE)
    for item in configmaps.items:
        if item.metadata.name == KADALU_CONFIG_MAP:
            logging.debug(
                logf("Found existing configmap", name=item.metadata.name))
            return

    # Deploy Config map
    filename = os.path.join(MANIFESTS_DIR, "configmap.yaml")
    template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(logf("Deployed ConfigMap", manifest=filename))
示例#27
0
def validate_volume_request(obj):
    """Validate the Volume request for Replica options, number of bricks etc"""
    if not obj.get("spec", None):
        logging.error("Storage 'spec' not specified")
        return False

    voltype = obj["spec"].get("type", None)
    if voltype is None:
        logging.error("Storage type not specified")
        return False

    if voltype not in VALID_HOSTING_VOLUME_TYPES:
        logging.error(
            logf("Invalid Storage type",
                 valid_types=",".join(VALID_HOSTING_VOLUME_TYPES),
                 provided_type=voltype))
        return False

    if voltype == "External":
        return validate_ext_details(obj)

    bricks = obj["spec"].get("storage", [])
    if not bricks_validation(bricks):
        return False

    if (voltype == VOLUME_TYPE_REPLICA_1 and len(bricks) != 1) or \
       (voltype == VOLUME_TYPE_REPLICA_3 and len(bricks) != 3):
        logging.error("Invalid number of storage directories/devices"
                      " specified")
        return False

    if voltype == VOLUME_TYPE_REPLICA_2:
        if len(bricks) != 2:
            logging.error("Invalid number of storage directories/devices"
                          " specified")
            return False

        tiebreaker = obj["spec"].get("tiebreaker", None)
        if tiebreaker and (not tiebreaker.get("node", None)
                           or not tiebreaker.get("path", None)):
            logging.error(
                logf("'tiebreaker' provided for replica2 "
                     "config is not valid"))
            return False

    logging.debug(logf("Storage %s successfully validated" % \
                       obj["metadata"].get("name", "<unknown>")))
    return True
示例#28
0
def crd_watch(core_v1_client, k8s_client):
    """
    Watches the CRD to provision new PV Hosting Volumes
    """
    crds = client.CustomObjectsApi(k8s_client)
    k8s_watch = watch.Watch()
    resource_version = ""
    for event in k8s_watch.stream(crds.list_cluster_custom_object,
                                  "kadalu-operator.storage",
                                  "v1alpha1",
                                  "kadalustorages",
                                  resource_version=resource_version):
        obj = event["object"]
        operation = event['type']
        spec = obj.get("spec")
        if not spec:
            continue
        metadata = obj.get("metadata")
        resource_version = metadata['resourceVersion']
        logging.debug(logf("Event", operation=operation, object=repr(obj)))
        if operation == "ADDED":
            handle_added(core_v1_client, obj)
        elif operation == "MODIFIED":
            handle_modified()
        elif operation == "DELETED":
            handle_deleted()
示例#29
0
def handle_quota(quota_report, brick_path, volname, pvtype):
    """Sets Quota if info file is available"""

    volhash = get_volname_hash(volname)
    volpath = get_volume_path(pvtype, volhash, volname)
    subdir_path = os.path.join(brick_path, volpath)
    projid = "#%d" % os.lstat(subdir_path).st_ino
    limit_hard = 0
    for line in quota_report:
        if line.startswith(projid):
            limit_hard = int(line.split()[3])
            break

    # Quota is already set, continue
    # TODO: Handle PV resize requests
    if limit_hard > 0:
        return

    pvinfo_file_path = os.path.join(brick_path, "info", volpath + ".json")
    if os.path.exists(pvinfo_file_path):
        data = {}
        with open(pvinfo_file_path) as pvinfo_file:
            data = json.loads(pvinfo_file.read().strip())

            try:
                set_quota(os.path.dirname(brick_path), subdir_path,
                          data["size"])
            except CommandException as err:
                logging.error(
                    logf("Failed to set Quota",
                         err=err.err,
                         path=subdir_path.replace(brick_path, ""),
                         size=data["size"]))
    return
示例#30
0
def deploy_csi_pods(core_v1_client):
    """
    Look for CSI pods, if any one CSI pod found then
    that means it is deployed
    """
    create_cmd = "create"
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith(CSI_POD_PREFIX):
            logging.info("Updating already deployed CSI pods")
            create_cmd = "apply"

    # Deploy CSI Pods
    api_instance = client.VersionApi().get_code()
    if api_instance.major > "1" or api_instance.major == "1" and \
       api_instance.minor >= "14":
        filename = os.path.join(MANIFESTS_DIR, "csi-driver-object.yaml")
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        execute(KUBECTL_CMD, create_cmd, "-f", filename)
    else:
        filename = os.path.join(MANIFESTS_DIR, "csi-driver-crd.yaml")
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        execute(KUBECTL_CMD, create_cmd, "-f", filename)

    filename = os.path.join(MANIFESTS_DIR, "csi.yaml")
    docker_user = os.environ.get("DOCKER_USER", "kadalu")
    template(filename,
             namespace=NAMESPACE,
             kadalu_version=VERSION,
             docker_user=docker_user,
             k8s_dist=K8S_DIST,
             kubelet_dir=KUBELET_DIR)
    execute(KUBECTL_CMD, create_cmd, "-f", filename)
    logging.info(logf("Deployed CSI Pods", manifest=filename))