Esempio n. 1
0
def mount_glusterfs(volume, mountpoint, is_client=False):
    """Mount Glusterfs Volume"""
    if volume["type"] == "External":
        volname = volume['g_volname']
    else:
        volname = volume["name"]

    # Ignore if already glusterfs process running for that volume
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted", mount=mountpoint))
        return

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted (2nd try)", mount=mountpoint))
        return

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volume['g_volname'], mountpoint,
                                      volume['g_host'], volume['g_options'],
                                      is_client)
        return

    with mount_lock:
        generate_client_volfile(volume['name'])
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % mountpoint.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volume['name'], "--fs-display-name",
            "kadalu:%s" % volume['name'], "-f",
            "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), mountpoint
        ]

        ## required for 'simple-quota'
        if not is_client:
            cmd.extend(["--client-pid", "-14"])

        try:
            execute(*cmd)
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return
Esempio n. 2
0
def check_external_volume(pv_request):
    """Mount hosting volume"""
    # Assumption is, this has to have 'hostvol_type' as External.
    params = {}
    for pkey, pvalue in pv_request.parameters.items():
        params[pkey] = pvalue

    hvol = {
        "host": params['gluster_host'],
        "name": params['gluster_volname'],
        "options": params['gluster_options'],
    }
    mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol['name'])

    mount_glusterfs_with_host(hvol['name'], mntdir, hvol['host'], hvol['options'])

    time.sleep(0.37)

    if not is_gluster_mount_proc_running(hvol['name'], mntdir):
        logging.debug(logf(
            "Mount failed",
            hvol=hvol,
            mntdir=mntdir
        ))
        return None

    logging.debug(logf(
        "Mount successful",
        hvol=hvol
    ))

    return hvol
Esempio n. 3
0
def mount_glusterfs_with_host(volname, target_path, host, options=None):
    """Mount Glusterfs Volume"""

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, target_path):
        logging.debug(logf(
            "Already mounted",
            mount=target_path
        ))
        return

    if not os.path.exists(target_path):
        makedirs(target_path)

    # FIXME: make this better later (an issue for external contribution)
    # opt_array = None
    # if options:
    #     opt_array = []
    #     for opt in options.split(","):
    #         if not opt or opt == "":
    #             break
    #         for k,v in opt.split("="):
    #             if k == "log-level":
    #                 opt_array.append("--log-level")
    #                 opt_array.append(v)
    #                 # TODO: handle more options, and document them

    # Fix the log, so we can check it out later
    # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
    log_file = "/var/log/gluster/gluster.log"
    cmd = [
        GLUSTERFS_CMD,
        "--process-name", "fuse",
        "-l", "%s" % log_file,
        "--volfile-id", volname,
        "-s", host,
        target_path
    ]
    # if opt_array:
    #     cmd.extend(opt_array)
    #
    # # add mount point after all options
    # cmd.append(target_path)
    logging.debug(logf(
        "glusterfs command",
        cmd=cmd
    ))

    try:
        execute(*cmd)
    except CommandException as err:
        logging.info(logf(
            "mount command failed",
            cmd=cmd,
            error=err,
        ))

    return
Esempio n. 4
0
def check_external_volume(pv_request, host_volumes):
    """Mount hosting volume"""
    # Assumption is, this has to have 'hostvol_type' as External.
    params = {}
    for pkey, pvalue in pv_request.parameters.items():
        params[pkey] = pvalue

    mntdir = None
    hvol = None
    for vol in host_volumes:
        if vol["type"] != "External":
            continue
        if (vol["g_volname"] != params["gluster_volname"]) or \
           (vol["g_host"] != params["gluster_host"]):
            continue

        mntdir = os.path.join(HOSTVOL_MOUNTDIR, vol["name"])
        hvol = vol
        break

    if not mntdir:
        logging.warning("No host volume found to provide PV")
        return None

    mount_glusterfs_with_host(hvol['g_volname'], mntdir, hvol['g_host'], hvol['g_options'])

    time.sleep(0.37)

    if not is_gluster_mount_proc_running(hvol['g_volname'], mntdir):
        logging.debug(logf(
            "Mount failed",
            hvol=hvol,
            mntdir=mntdir
        ))
        return None

    logging.debug(logf(
        "Mount successful",
        hvol=hvol
    ))

    return hvol
Esempio n. 5
0
def check_external_volume(pv_request, host_volumes):
    """Mount hosting volume"""
    # Assumption is, this has to have 'hostvol_type' as External.
    params = {}
    for pkey, pvalue in pv_request.parameters.items():
        params[pkey] = pvalue

    mntdir = None
    hvol = None
    for vol in host_volumes:
        if vol["type"] != "External":
            continue

        # For external volume both k_format, g_volname and hosts should match
        # gluster_hosts is flattened to a string and can be compared as such
        # Assumptions:
        # 1. User will not reuse a gluster non-native volume
        if (vol["k_format"] == params["kadalu_format"]
                and vol["g_volname"] == params["gluster_volname"]
                and vol["g_host"] == params["gluster_hosts"]):
            mntdir = os.path.join(HOSTVOL_MOUNTDIR, vol["name"])
            hvol = vol
            break

    if not mntdir:
        logging.warning("No host volume found to provide PV")
        return None

    mount_glusterfs_with_host(hvol['g_volname'], mntdir, hvol['g_host'],
                              hvol['g_options'])

    time.sleep(0.37)

    if not is_gluster_mount_proc_running(hvol['g_volname'], mntdir):
        logging.debug(logf("Mount failed", hvol=hvol, mntdir=mntdir))
        return None

    logging.debug(logf("Mount successful", hvol=hvol))

    return hvol
Esempio n. 6
0
def unmount_glusterfs(mountpoint):
    """Unmount GlusterFS mount"""
    volname = os.path.basename(mountpoint)
    if is_gluster_mount_proc_running(volname, mountpoint):
        execute(UNMOUNT_CMD, "-l", mountpoint)
Esempio n. 7
0
def unmount_glusterfs(target_path):
    """Unmount GlusterFS mount"""
    volname = os.path.basename(target_path)
    if is_gluster_mount_proc_running(volname, target_path):
        execute(UNMOUNT_CMD, target_path)
Esempio n. 8
0
def mount_glusterfs_with_host(volname,
                              mountpoint,
                              hosts,
                              options=None,
                              is_client=False):
    """Mount Glusterfs Volume"""

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted", mount=mountpoint))
        return

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    log_file = "/var/log/gluster/gluster.log"

    cmd = [
        GLUSTERFS_CMD,
        "--process-name",
        "fuse",
        "-l",
        "%s" % log_file,
        "--volfile-id",
        volname,
    ]
    ## on server component we can mount glusterfs with client-pid
    #if not is_client:
    #    cmd.extend(["--client-pid", "-14"])

    for host in hosts.split(','):
        cmd.extend(["--volfile-server", host])

    g_ops = []
    if options:
        for option in options.split(","):
            g_ops.append(f"--{option}")

    logging.debug(
        logf(
            "glusterfs command",
            cmd=cmd,
            opts=g_ops,
            mountpoint=mountpoint,
        ))

    command = cmd + g_ops + [mountpoint]
    try:
        execute(*command)
    except CommandException as excep:
        if excep.err.find("invalid option") != -1:
            logging.info(
                logf(
                    "proceeding without supplied incorrect mount options",
                    options=g_ops,
                ))
            command = cmd + [mountpoint]
            try:
                execute(*command)
            except CommandException as excep:
                logging.info(
                    logf(
                        "mount command failed",
                        cmd=command,
                        error=excep,
                    ))
            return
        logging.info(logf(
            "mount command failed",
            cmd=command,
            error=excep,
        ))
    return
Esempio n. 9
0
def mount_glusterfs(volume, mountpoint, storage_options="", is_client=False):
    """Mount Glusterfs Volume"""
    if volume["type"] == "External":
        volname = volume['g_volname']
    else:
        volname = volume["name"]

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volname, mountpoint, volume['g_host'],
                                      volume['g_options'], is_client)
        use_gluster_quota = False
        if (os.path.isfile("/etc/secret-volume/ssh-privatekey")
                and "SECRET_GLUSTERQUOTA_SSH_USERNAME" in os.environ):
            use_gluster_quota = True
        secret_private_key = "/etc/secret-volume/ssh-privatekey"
        secret_username = os.environ.get('SECRET_GLUSTERQUOTA_SSH_USERNAME',
                                         None)

        # SSH into only first reachable host in volume['g_host'] entry
        g_host = reachable_host(volume['g_host'])

        if g_host is None:
            logging.error(logf("All hosts are not reachable"))
            return

        if use_gluster_quota is False:
            logging.debug(logf("Do not set quota-deem-statfs"))
        else:
            logging.debug(
                logf("Set quota-deem-statfs for gluster directory Quota"))
            quota_deem_cmd = [
                "ssh", "-oStrictHostKeyChecking=no", "-i",
                "%s" % secret_private_key,
                "%s@%s" % (secret_username, g_host), "sudo", "gluster",
                "volume", "set",
                "%s" % volume['g_volname'], "quota-deem-statfs", "on"
            ]
            try:
                execute(*quota_deem_cmd)
            except CommandException as err:
                errmsg = "Unable to set quota-deem-statfs via ssh"
                logging.error(logf(errmsg, error=err))
                raise err
        return mountpoint

    generate_client_volfile(volname)
    client_volfile_path = os.path.join(VOLFILES_DIR, "%s.client.vol" % volname)

    if storage_options != "":

        # Construct 'dict' from passed storage-options in 'str'
        storage_options = storage_options_parse(storage_options)

        # Keep the default volfile untouched
        tmp_volfile_path = tempfile.mkstemp()[1]
        shutil.copy(client_volfile_path, tmp_volfile_path)

        # Parse the client-volfile, update passed storage-options & save
        parsed_client_volfile_path = Volfile.parse(tmp_volfile_path)
        parsed_client_volfile_path.update_options_by_type(storage_options)
        parsed_client_volfile_path.save()

        # Sort storage-options and generate hash
        storage_options_hash = get_storage_options_hash(
            json.dumps(storage_options, sort_keys=True))

        # Rename mountpoint & client volfile path with hash
        mountpoint = mountpoint + "_" + storage_options_hash
        new_client_volfile_path = os.path.join(
            VOLFILES_DIR, "%s_%s.client.vol" % (volname, storage_options_hash))
        os.rename(tmp_volfile_path, new_client_volfile_path)
        client_volfile_path = new_client_volfile_path

    # Ignore if already glusterfs process running for that volume
    if is_gluster_mount_proc_running(volname, mountpoint):
        reload_glusterfs(volume)
        logging.debug(logf("Already mounted", mount=mountpoint))
        return mountpoint

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        reload_glusterfs(volume)
        logging.debug(logf("Already mounted (2nd try)", mount=mountpoint))
        return mountpoint

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    with mount_lock:
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % mountpoint.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volname, "--fs-display-name",
            "kadalu:%s" % volname, "-f", client_volfile_path, mountpoint
        ]

        ## required for 'simple-quota'
        if not is_client:
            cmd.extend(["--client-pid", "-14"])

        try:
            (_, err, pid) = execute(*cmd)
            VOL_DATA[volname]["pid"] = pid
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return mountpoint