Пример #1
0
def handle_external_storage_addition(core_v1_client, obj):
    """Deploy service(One service per Volume)"""
    volname = obj["metadata"]["name"]
    details = obj["spec"]["details"][0]

    data = {
        "volname": volname,
        "volume_id": obj["spec"]["volume_id"],
        "type": "External",
        "kadalu-format": True,
        "gluster_host": details["gluster_host"],
        "gluster_volname": details["gluster_volname"],
        "gluster_options": details.get("gluster_options", "ignore-me"),
    }

    # Add new entry in the existing config map
    configmap_data = core_v1_client.read_namespaced_config_map(
        KADALU_CONFIG_MAP, NAMESPACE)
    volinfo_file = "%s.info" % volname
    configmap_data.data[volinfo_file] = json.dumps(data)

    core_v1_client.patch_namespaced_config_map(KADALU_CONFIG_MAP, NAMESPACE,
                                               configmap_data)
    logging.info(
        logf("Updated configmap", name=KADALU_CONFIG_MAP, volname=volname))

    filename = os.path.join(MANIFESTS_DIR, "external-storageclass.yaml")
    template(filename, **data)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(
        logf("Deployed External StorageClass",
             volname=volname,
             manifest=filename))
Пример #2
0
def handle_added(core_v1_client, obj):
    """
    New Volume is requested. Update the configMap and deploy
    """

    if not validate_volume_request(obj):
        # TODO: Delete Custom resource
        return

    # Ignore if already deployed
    volname = obj["metadata"]["name"]
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith("server-" + volname + "-"):
            logging.debug(
                logf("Ignoring already deployed server statefulsets",
                     storagename=volname))
            return

    # Generate new Volume ID
    obj["spec"]["volume_id"] = str(uuid.uuid1())

    update_config_map(core_v1_client, obj)
    deploy_server_pods(obj)

    # Deploy service(One service per Volume)
    volname = obj["metadata"]["name"]
    filename = os.path.join(MANIFESTS_DIR, "services.yaml")
    template(filename, namespace=NAMESPACE, volname=volname)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(logf("Deployed Service", volname=volname, manifest=filename))
Пример #3
0
def create_virtblock_volume(hostvol_mnt, volname, size):
    """Create virtual block volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Create a file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(
        logf("Created virtblock directory", path=os.path.dirname(volpath)))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)
    os.truncate(volpath_full, size)
    logging.debug(
        logf("Truncated file to required size", path=volpath, size=size))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(
        logf("Created Filesystem", path=volpath, command=MKFS_XFS_CMD))
    save_pv_metadata(hostvol_mnt, volpath, size)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
Пример #4
0
def mount_volume(pvpath, target_path, pvtype, fstype=None):
    """Mount a Volume"""
    if pvtype == PV_TYPE_VIRTBLOCK:
        fstype = "xfs" if fstype is None else fstype
        execute(MOUNT_CMD, "-t", fstype, pvpath, target_path)
    else:
        execute(MOUNT_CMD, "--bind", pvpath, target_path)
Пример #5
0
def deploy_config_map(core_v1_client):
    """Deploys the template configmap if not exists"""

    configmaps = core_v1_client.list_namespaced_config_map(NAMESPACE)
    create_cmd = "create"
    uid = uuid.uuid4()
    for item in configmaps.items:
        if item.metadata.name == KADALU_CONFIG_MAP:
            logging.info(
                logf("Found existing configmap. Updating",
                     name=item.metadata.name))

            create_cmd = "apply"
            # Don't overwrite UID info.
            configmap_data = core_v1_client.read_namespaced_config_map(
                KADALU_CONFIG_MAP, NAMESPACE)
            if configmap_data.data.get("uid", None):
                uid = configmap_data.data["uid"]
            # Keep the config details required to be preserved.

    # Deploy Config map
    filename = os.path.join(MANIFESTS_DIR, "configmap.yaml")
    template(filename, namespace=NAMESPACE, kadalu_version=VERSION, uid=uid)
    execute(KUBECTL_CMD, create_cmd, "-f", filename)
    logging.info(logf("Deployed ConfigMap", manifest=filename))
    return uid
Пример #6
0
def create_and_mount_brick(brick_device, brick_path, brickfs):
    """
    Create brick filesystem and mount the brick. Currently
    only xfs is supported
    """

    # If brick device path is not starts with /dev then use
    # /brickdev prefix. Brick device directory passed by the user
    # is mounted as /brickdev to avoid mixing with any other
    # dirs inside container.
    if not brick_device.startswith("/dev/"):
        brick_device = "/brickdev/" + os.path.basename(brick_device)

    if brickfs == "xfs":
        try:
            execute("mkfs.xfs", brick_device)
        except CommandException as err:
            if b"appears to contain an existing filesystem" not in err.err:
                logging.error(
                    logf(
                        "Failed to create file system",
                        fstype=brickfs,
                        device=brick_device,
                    ))
                sys.exit(1)

        mountdir = os.path.dirname(brick_path)
        os.makedirs(mountdir, mode=0o755, exist_ok=True)

        execute("mount", "-oprjquota", brick_device, mountdir)
Пример #7
0
def deploy_server_pods(obj):
    """
    Deploy server pods depending on type of Hosting
    Volume and other options specified
    """
    # Deploy server pod
    volname = obj["metadata"]["name"]
    docker_user = os.environ.get("DOCKER_USER", "kadalu")
    template_args = {
        "namespace": NAMESPACE,
        "kadalu_version": VERSION,
        "docker_user": docker_user,
        "volname": volname,
        "volume_id": obj["spec"]["volume_id"]
    }

    # One StatefulSet per Brick
    for idx, brick in enumerate(obj["spec"]["storage"]):
        template_args["host_brick_path"] = brick.get("path", "")
        template_args["kube_hostname"] = brick["node"]
        template_args["brick_path"] = "/bricks/%s/data/brick" % volname
        template_args["brick_index"] = idx
        template_args["brick_device"] = brick.get("device", "")
        template_args["brick_device_dir"] = get_brick_device_dir(brick)

        filename = os.path.join(MANIFESTS_DIR, "server.yaml")
        template(filename, **template_args)
        execute(KUBECTL_CMD, "create", "-f", filename)
        logging.info(
            logf("Deployed Server pod",
                 volname=volname,
                 manifest=filename,
                 node=brick["node"]))
Пример #8
0
def deploy_storage_class():
    """Deploys the default storage class for KaDalu if not exists"""

    api_instance = client.StorageV1Api()
    scs = api_instance.list_storage_class()
    sc_names = []
    for tmpl in os.listdir(MANIFESTS_DIR):
        if tmpl.startswith("storageclass-"):
            sc_names.append(
                tmpl.replace("storageclass-", "").replace(".yaml.j2", ""))

    installed_scs = [item.metadata.name for item in scs.items]
    for sc_name in sc_names:
        filename = os.path.join(MANIFESTS_DIR,
                                "storageclass-%s.yaml" % sc_name)
        if sc_name in installed_scs:
            logging.info(
                logf("Ignoring already deployed StorageClass",
                     manifest=filename))
            continue

        # Deploy Storage Class
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        execute(KUBECTL_CMD, "create", "-f", filename)
        logging.info(logf("Deployed StorageClass", manifest=filename))
Пример #9
0
def mount_glusterfs_with_host(volname, target_path, host, options=None):
    """Mount Glusterfs Volume"""

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, target_path):
        logging.debug(logf(
            "Already mounted",
            mount=target_path
        ))
        return

    if not os.path.exists(target_path):
        makedirs(target_path)

    # FIXME: make this better later (an issue for external contribution)
    # opt_array = None
    # if options:
    #     opt_array = []
    #     for opt in options.split(","):
    #         if not opt or opt == "":
    #             break
    #         for k,v in opt.split("="):
    #             if k == "log-level":
    #                 opt_array.append("--log-level")
    #                 opt_array.append(v)
    #                 # TODO: handle more options, and document them

    # Fix the log, so we can check it out later
    # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
    log_file = "/var/log/gluster/gluster.log"
    cmd = [
        GLUSTERFS_CMD,
        "--process-name", "fuse",
        "-l", "%s" % log_file,
        "--volfile-id", volname,
        "-s", host,
        target_path
    ]
    # if opt_array:
    #     cmd.extend(opt_array)
    #
    # # add mount point after all options
    # cmd.append(target_path)
    logging.debug(logf(
        "glusterfs command",
        cmd=cmd
    ))

    try:
        execute(*cmd)
    except CommandException as err:
        logging.info(logf(
            "mount command failed",
            cmd=cmd,
            error=err,
        ))

    return
Пример #10
0
def mount_glusterfs(volume, target_path):
    """Mount Glusterfs Volume"""
    if not os.path.exists(target_path):
        makedirs(target_path)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted", mount=target_path))
        return

    # This is just to prevent multiple requests getting here in parallel
    target_path_lock = "%s.lock" % target_path

    while True:
        if not os.path.exists(target_path_lock):
            # Need to create a dummy file, no need to do IO
            # Hence no open and close business
            os.mknod(target_path_lock)
            break
        time.sleep(0.2)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted (2nd try)", mount=target_path))
        os.unlink(target_path_lock)
        return

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        mount_glusterfs_with_host(volume['g_volname'], target_path,
                                  volume['g_host'], volume['g_options'])
        os.unlink(target_path_lock)
        return

    generate_client_volfile(volume['name'])
    # Fix the log, so we can check it out later
    # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
    log_file = "/var/log/gluster/gluster.log"
    cmd = [
        GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
        "--volfile-id", volume['name'], "-f",
        "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), target_path
    ]
    try:
        execute(*cmd)
    except Exception as err:
        logging.error(
            logf("error to execute command",
                 volume=volume,
                 cmd=cmd,
                 error=format(err)))
        os.unlink(target_path_lock)
        raise err

    os.unlink(target_path_lock)
    return
Пример #11
0
def mount_volume(pvpath, mountpoint, pvtype, fstype=None):
    """Mount a Volume"""
    if pvtype == PV_TYPE_VIRTBLOCK:
        fstype = "xfs" if fstype is None else fstype
        execute(MOUNT_CMD, "-t", fstype, pvpath, mountpoint)
    else:
        execute(MOUNT_CMD, "--bind", pvpath, mountpoint)

    os.chmod(mountpoint, 0o777)
Пример #12
0
def mount_glusterfs(volume, mountpoint, is_client=False):
    """Mount Glusterfs Volume"""
    if volume["type"] == "External":
        volname = volume['g_volname']
    else:
        volname = volume["name"]

    # Ignore if already glusterfs process running for that volume
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted", mount=mountpoint))
        return

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted (2nd try)", mount=mountpoint))
        return

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volume['g_volname'], mountpoint,
                                      volume['g_host'], volume['g_options'],
                                      is_client)
        return

    with mount_lock:
        generate_client_volfile(volume['name'])
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % mountpoint.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volume['name'], "--fs-display-name",
            "kadalu:%s" % volume['name'], "-f",
            "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), mountpoint
        ]

        ## required for 'simple-quota'
        if not is_client:
            cmd.extend(["--client-pid", "-14"])

        try:
            execute(*cmd)
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return
Пример #13
0
def unmount_volume(mountpoint):
    """Unmount a Volume"""
    if mountpoint.find("volumeDevices"):
        # Should remove loop device as well or else duplicate loop devices will
        # be setup everytime
        cmd = ["findmnt", "-T", mountpoint, "-oSOURCE", "-n"]
        device, _, _ = execute(*cmd)
        if match := re.search(r'loop\d+', device):
            loop = match.group(0)
            cmd = ["losetup", "-d", f"/dev/{loop}"]
            execute(*cmd)
Пример #14
0
def set_quota(rootdir, subdir_path, quota_value):
    """
    Set Quota for given subdir path. Get project
    ID from directory inode
    """
    ino = os.lstat(subdir_path).st_ino
    execute("xfs_quota", "-x", "-c",
            'project -s -p %s %d' % (subdir_path, ino), rootdir)

    execute("xfs_quota", "-x", "-c",
            'limit -p bhard=%s %d' % (quota_value, ino), rootdir)
Пример #15
0
def create_virtblock_volume(hostvol_mnt, volname, size):
    """Create virtual block volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf(
        "Volume hash",
        volhash=volhash
    ))

    # Check for mount availability before creating virtblock volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Create a file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(logf(
        "Created virtblock directory",
        path=os.path.dirname(volpath)
    ))

    if os.path.exists(volpath_full):
        rand = time.time()
        logging.info(logf(
            "Getting 'Create request' on existing file, renaming.",
            path=volpath_full, random=rand
        ))
        os.rename(volpath_full, "%s.%s" % (volpath_full, rand))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)
    os.truncate(volpath_full, size)
    logging.debug(logf(
        "Truncated file to required size",
        path=volpath,
        size=size
    ))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(logf(
        "Created Filesystem",
        path=volpath,
        command=MKFS_XFS_CMD
    ))
    save_pv_metadata(hostvol_mnt, volpath, size)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
Пример #16
0
def set_quota(rootdir, subdir_path, quota_value):
    """
    Set Quota for given subdir path. Get project
    ID from directory inode. XFS can have 64 bit inodes
    so strip it to 32 bit and hope not to clash.
    """
    ino = os.lstat(subdir_path).st_ino % PROJECT_MOD
    execute("xfs_quota", "-x", "-c",
            'project -s -p %s %d' % (subdir_path, ino), rootdir)

    execute("xfs_quota", "-x", "-c",
            'limit -p bhard=%s %d' % (quota_value, ino), rootdir)
Пример #17
0
def handle_added(core_v1_client, obj):
    """
    New Volume is requested. Update the configMap and deploy
    """

    if not validate_volume_request(obj):
        # TODO: Delete Custom resource
        logging.debug(logf("validation of volume request failed", yaml=obj))

        return

    # Ignore if already deployed
    volname = obj["metadata"]["name"]
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith("server-" + volname + "-"):
            logging.debug(
                logf("Ignoring already deployed server statefulsets",
                     storagename=volname))
            return

    # Add new entry in the existing config map
    configmap_data = core_v1_client.read_namespaced_config_map(
        KADALU_CONFIG_MAP, NAMESPACE)

    if configmap_data.data.get("%s.info" % volname, None):
        # Volume already exists
        logging.debug(
            logf("Ignoring already updated volume config",
                 storagename=volname))
        return

    # Generate new Volume ID
    obj["spec"]["volume_id"] = str(uuid.uuid1())

    voltype = obj["spec"]["type"]
    if voltype == "External":
        handle_external_storage_addition(core_v1_client, obj)
        return

    # Generate Node ID for each storage device.
    for idx, _ in enumerate(obj["spec"]["storage"]):
        obj["spec"]["storage"][idx]["node_id"] = str(uuid.uuid1())

    update_config_map(core_v1_client, obj)
    deploy_server_pods(obj)

    filename = os.path.join(MANIFESTS_DIR, "services.yaml")
    template(filename, namespace=NAMESPACE, volname=volname)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(logf("Deployed Service", volname=volname, manifest=filename))
Пример #18
0
def deploy_server_pods(obj):
    """
    Deploy server pods depending on type of Hosting
    Volume and other options specified
    """
    # Deploy server pod
    volname = obj["metadata"]["name"]
    voltype = obj["spec"]["type"]
    docker_user = os.environ.get("DOCKER_USER", "kadalu")

    shd_required = False
    if voltype in (VOLUME_TYPE_REPLICA_3, VOLUME_TYPE_REPLICA_2):
        shd_required = True

    template_args = {
        "namespace": NAMESPACE,
        "kadalu_version": VERSION,
        "docker_user": docker_user,
        "volname": volname,
        "voltype": voltype,
        "volume_id": obj["spec"]["volume_id"],
        "shd_required": shd_required
    }

    # One StatefulSet per Brick
    for idx, storage in enumerate(obj["spec"]["storage"]):
        template_args["host_brick_path"] = storage.get("path", "")
        template_args["kube_hostname"] = storage.get("node", "")
        # TODO: Understand the need, and usage of suffix
        template_args["serverpod_name"] = get_brick_hostname(volname,
                                                             storage.get(
                                                                 "node",
                                                                 "pvc"),
                                                             idx,
                                                             suffix=False)
        template_args["brick_path"] = "/bricks/%s/data/brick" % volname
        template_args["brick_index"] = idx
        template_args["brick_device"] = storage.get("device", "")
        template_args["pvc_name"] = storage.get("pvc", "")
        template_args["brick_device_dir"] = get_brick_device_dir(storage)
        template_args["brick_node_id"] = storage["node_id"]

        filename = os.path.join(MANIFESTS_DIR, "server.yaml")
        template(filename, **template_args)
        execute(KUBECTL_CMD, "create", "-f", filename)
        logging.info(
            logf("Deployed Server pod",
                 volname=volname,
                 manifest=filename,
                 node=storage.get("node", "")))
Пример #19
0
def deploy_storage_class():
    """Deploys the default storage class for KaDalu if not exists"""

    api_instance = client.StorageV1Api()
    scs = api_instance.list_storage_class()
    for item in scs.items:
        if item.metadata.name.startswith(STORAGE_CLASS_NAME_PREFIX):
            return

    # Deploy Storage Class
    filename = os.path.join(MANIFESTS_DIR, "storageclass.yaml")
    template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(logf("Deployed StorageClass", manifest=filename))
Пример #20
0
def deploy_config_map(core_v1_client):
    """Deploys the template configmap if not exists"""

    configmaps = core_v1_client.list_namespaced_config_map(NAMESPACE)
    for item in configmaps.items:
        if item.metadata.name == KADALU_CONFIG_MAP:
            logging.debug(
                logf("Found existing configmap", name=item.metadata.name))
            return

    # Deploy Config map
    filename = os.path.join(MANIFESTS_DIR, "configmap.yaml")
    template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(logf("Deployed ConfigMap", manifest=filename))
Пример #21
0
def mount_glusterfs(volume, target_path):
    """Mount Glusterfs Volume"""
    if not os.path.exists(target_path):
        os.makedirs(target_path, exist_ok=True)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        return

    generate_client_volfile(volume)
    cmd = [
        GLUSTERFS_CMD, "--process-name", "fuse", "-l", "/dev/stdout",
        "--volfile-id=%s" % volume, target_path, "-f",
        "%s/%s.client.vol" % (VOLFILES_DIR, volume)
    ]
    execute(*cmd)
Пример #22
0
def reload_glusterfs(volume):
    """Mount Glusterfs Volume"""
    if volume["type"] == "External":
        return False

    volname = volume["name"]

    if not VOL_DATA.get(volname, None):
        return False

    # Ignore if already glusterfs process running for that volume
    with mount_lock:
        if not generate_client_volfile(volname):
            return False
        # TODO: ideally, keep the pid in structure for easier access
        # pid = VOL_DATA[volname]["pid"]
        # cmd = ["kill", "-HUP", str(pid)]
        cmd = [
            "ps", "--no-header", "-ww", "-o", "pid,command", "-C", "glusterfs"
        ]

        try:
            out, err, _ = execute(*cmd)
            send_signal_to_process(volname, out, "-HUP")
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            return False

    return True
Пример #23
0
def mount_glusterfs(volume, target_path):
    """Mount Glusterfs Volume"""
    if not os.path.exists(target_path):
        makedirs(target_path)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted", mount=target_path))
        return

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted (2nd try)", mount=target_path))
        return

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volume['g_volname'], target_path,
                                      volume['g_host'], volume['g_options'])
        return

    with mount_lock:
        generate_client_volfile(volume['name'])
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volume['name'], "-f",
            "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), target_path
        ]
        try:
            execute(*cmd)
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return
Пример #24
0
def deploy_csi_pods(core_v1_client):
    """
    Look for CSI pods, if any one CSI pod found then
    that means it is deployed
    """
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith(CSI_POD_PREFIX):
            logging.debug("Ignoring already deployed CSI pods")
            return

    # Deploy CSI Pods
    filename = os.path.join(MANIFESTS_DIR, "csi.yaml")
    docker_user = os.environ.get("DOCKER_USER", "kadalu")
    template(filename,
             namespace=NAMESPACE,
             kadalu_version=VERSION,
             docker_user=docker_user)
    execute(KUBECTL_CMD, "create", "-f", filename)
    logging.info(logf("Deployed CSI Pods", manifest=filename))
Пример #25
0
def get_quota_report(rootdir):
    """Get Project Quota Report"""
    try:
        out, _ = execute("xfs_quota", "-x", "-c", 'report -p -b', rootdir)
        return out.split(b"\n")
    except CommandException as err:
        logging.error(
            logf("Failed to get Quota Report",
                 rootdir=rootdir,
                 err=err.err,
                 ret=err.ret))
        return []
Пример #26
0
def deploy_csi_pods(core_v1_client):
    """
    Look for CSI pods, if any one CSI pod found then
    that means it is deployed
    """
    create_cmd = "create"
    pods = core_v1_client.list_namespaced_pod(NAMESPACE)
    for pod in pods.items:
        if pod.metadata.name.startswith(CSI_POD_PREFIX):
            logging.info("Updating already deployed CSI pods")
            create_cmd = "apply"

    # Deploy CSI Pods
    api_instance = client.VersionApi().get_code()
    if api_instance.major > "1" or api_instance.major == "1" and \
       api_instance.minor >= "14":
        filename = os.path.join(MANIFESTS_DIR, "csi-driver-object.yaml")
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        execute(KUBECTL_CMD, create_cmd, "-f", filename)
    else:
        filename = os.path.join(MANIFESTS_DIR, "csi-driver-crd.yaml")
        template(filename, namespace=NAMESPACE, kadalu_version=VERSION)
        execute(KUBECTL_CMD, create_cmd, "-f", filename)

    filename = os.path.join(MANIFESTS_DIR, "csi.yaml")
    docker_user = os.environ.get("DOCKER_USER", "kadalu")
    template(filename,
             namespace=NAMESPACE,
             kadalu_version=VERSION,
             docker_user=docker_user,
             k8s_dist=K8S_DIST,
             kubelet_dir=KUBELET_DIR)
    execute(KUBECTL_CMD, create_cmd, "-f", filename)
    logging.info(logf("Deployed CSI Pods", manifest=filename))
Пример #27
0
def create_and_mount_brick(brick_device, brick_path, brickfs):
    """
    Create brick filesystem and mount the brick. Currently
    only xfs is supported
    """

    # If brick device path is not starts with /dev then use
    # /brickdev prefix. Brick device directory passed by the user
    # is mounted as /brickdev to avoid mixing with any other
    # dirs inside container.
    if not brick_device.startswith("/dev/"):
        brick_device = "/brickdev/" + os.path.basename(brick_device)

    mountdir = os.path.dirname(brick_path)
    os.makedirs(mountdir,
                mode=0o755,
                exist_ok=True)

    try:
        execute("mount", brick_device, mountdir)
    except CommandException as err:
        if 'wrong fs type' in err.err:
            # This error pops up when we do mount on an empty device or wrong fs
            # Try doing a mkfs and try mount
            try:
                execute("mkfs.xfs", brick_device)
            except CommandException as err:
                if "appears to contain an existing filesystem" not in err.err:
                    logging.error(logf(
                        "Failed to create file system",
                        fstype=brickfs,
                        device=brick_device,
                    ))
                    sys.exit(1)
                else:
                    pass
                try:
                    execute("mount", brick_device, mountdir)
                except CommandException as err:
                    logging.error(logf(
                        "Failed to mount export brick (after mkfs)",
                        fstype=brickfs,
                        device=brick_device,
                        mountdir=mountdir,
                        error=err,
                    ))
                    sys.exit(1)

        elif 'already mounted' not in err.err:
            logging.error(logf(
                "Failed to mount export brick",
                fstype=brickfs,
                device=brick_device,
                mountdir=mountdir,
                error=err,
            ))
            sys.exit(1)

        else:
            pass
Пример #28
0
def send_signal_to_process(volname, out, sig):
    """Sends the signal to one of the process"""

    for line in out.split("\n"):
        parts = line.split()
        pid = parts[0]
        for part in parts:
            if part.startswith("--volume-id="):
                if part.split("=")[-1] == volname:
                    cmd = ["kill", sig, pid]
                    try:
                        execute(*cmd)
                    except CommandException as err:
                        logging.error(
                            logf("error to execute command",
                                 volume=volname,
                                 cmd=cmd,
                                 error=format(err)))
                    return

    logging.debug(logf("Sent SIGHUP to glusterfs process", volname=volname))
    return
Пример #29
0
def update_virtblock_volume(hostvol_mnt, volname, expansion_requested_pvsize):
    """Update virtual block volume"""

    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Check for mount availability before updating virtblock volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Update the file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(
        logf("Updated virtblock directory", path=os.path.dirname(volpath)))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)

    execute("truncate", "-s", expansion_requested_pvsize, volpath_full)
    logging.debug(
        logf("Truncated file to required size",
             path=volpath,
             size=expansion_requested_pvsize))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(
        logf("Created Filesystem", path=volpath, command=MKFS_XFS_CMD))

    update_pv_metadata(hostvol_mnt, volpath, expansion_requested_pvsize)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=expansion_requested_pvsize,
        volpath=volpath,
    )
Пример #30
0
def execute_gluster_quota_command(privkey, user, host, gvolname, path, size):
    """
    Function to execute the GlusterFS's quota command on external cluster
    """
    # 'size' can always be parsed as integer with no errors
    size = int(size) * 0.95

    host = reachable_host(host)
    if host is None:
        errmsg = "All hosts are not reachable"
        logging.error(logf(errmsg))
        return errmsg

    quota_cmd = [
        "ssh",
        "-oStrictHostKeyChecking=no",
        "-i",
        "%s" % privkey,
        "%s@%s" % (user, host),
        "sudo",
        "gluster",
        "volume",
        "quota",
        "%s" % gvolname,
        "limit-usage",
        "/%s" % path,
        "%s" % size,
    ]
    try:
        execute(*quota_cmd)
    except CommandException as err:
        errmsg = "Unable to set Gluster Quota via ssh"
        logging.error(logf(errmsg, error=err))
        return errmsg

    return None