Exemplo n.º 1
0
def create_virtblock_volume(hostvol_mnt, volname, size):
    """Create virtual block volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Create a file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(
        logf("Created virtblock directory", path=os.path.dirname(volpath)))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)
    os.truncate(volpath_full, size)
    logging.debug(
        logf("Truncated file to required size", path=volpath, size=size))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(
        logf("Created Filesystem", path=volpath, command=MKFS_XFS_CMD))
    save_pv_metadata(hostvol_mnt, volpath, size)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
Exemplo n.º 2
0
def create_subdir_volume(hostvol_mnt, volname, size):
    """Create sub directory Volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_SUBVOL, volhash, volname)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Check for mount availability before creating subdir volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Create a subdir
    makedirs(os.path.join(hostvol_mnt, volpath))
    logging.debug(logf("Created PV directory", pvdir=volpath))

    # Write info file so that Brick's quotad sidecar
    # container picks it up.
    save_pv_metadata(hostvol_mnt, volpath, size)

    # Wait for quota set
    # TODO: Handle Timeout
    pvsize_buffer = size * 0.05  # 5%
    pvsize_min = (size - pvsize_buffer)
    pvsize_max = (size + pvsize_buffer)
    logging.debug(
        logf(
            "Watching df of pv directory",
            pvdir=volpath,
            pvsize_buffer=pvsize_buffer,
        ))

    count = 0
    while True:
        count += 1
        pvstat = retry_errors(os.statvfs, [os.path.join(hostvol_mnt, volpath)],
                              [ENOTCONN])
        volsize = pvstat.f_blocks * pvstat.f_bsize
        if pvsize_min < volsize < pvsize_max:
            logging.debug(
                logf("Matching df output, Quota set successful",
                     volsize=volsize,
                     num_tries=count))
            break

        if count >= 6:
            logging.warning(
                logf("Waited for some time, Quota set failed, continuing.",
                     volsize=volsize,
                     num_tries=count))
            break

        time.sleep(1)

    return Volume(
        volname=volname,
        voltype=PV_TYPE_SUBVOL,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
Exemplo n.º 3
0
def mount_glusterfs_with_host(volname, target_path, host, options=None):
    """Mount Glusterfs Volume"""

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, target_path):
        logging.debug(logf(
            "Already mounted",
            mount=target_path
        ))
        return

    if not os.path.exists(target_path):
        makedirs(target_path)

    # FIXME: make this better later (an issue for external contribution)
    # opt_array = None
    # if options:
    #     opt_array = []
    #     for opt in options.split(","):
    #         if not opt or opt == "":
    #             break
    #         for k,v in opt.split("="):
    #             if k == "log-level":
    #                 opt_array.append("--log-level")
    #                 opt_array.append(v)
    #                 # TODO: handle more options, and document them

    # Fix the log, so we can check it out later
    # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
    log_file = "/var/log/gluster/gluster.log"
    cmd = [
        GLUSTERFS_CMD,
        "--process-name", "fuse",
        "-l", "%s" % log_file,
        "--volfile-id", volname,
        "-s", host,
        target_path
    ]
    # if opt_array:
    #     cmd.extend(opt_array)
    #
    # # add mount point after all options
    # cmd.append(target_path)
    logging.debug(logf(
        "glusterfs command",
        cmd=cmd
    ))

    try:
        execute(*cmd)
    except CommandException as err:
        logging.info(logf(
            "mount command failed",
            cmd=cmd,
            error=err,
        ))

    return
Exemplo n.º 4
0
def mount_glusterfs(volume, target_path):
    """Mount Glusterfs Volume"""
    if not os.path.exists(target_path):
        makedirs(target_path)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted", mount=target_path))
        return

    # This is just to prevent multiple requests getting here in parallel
    target_path_lock = "%s.lock" % target_path

    while True:
        if not os.path.exists(target_path_lock):
            # Need to create a dummy file, no need to do IO
            # Hence no open and close business
            os.mknod(target_path_lock)
            break
        time.sleep(0.2)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted (2nd try)", mount=target_path))
        os.unlink(target_path_lock)
        return

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        mount_glusterfs_with_host(volume['g_volname'], target_path,
                                  volume['g_host'], volume['g_options'])
        os.unlink(target_path_lock)
        return

    generate_client_volfile(volume['name'])
    # Fix the log, so we can check it out later
    # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
    log_file = "/var/log/gluster/gluster.log"
    cmd = [
        GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
        "--volfile-id", volume['name'], "-f",
        "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), target_path
    ]
    try:
        execute(*cmd)
    except Exception as err:
        logging.error(
            logf("error to execute command",
                 volume=volume,
                 cmd=cmd,
                 error=format(err)))
        os.unlink(target_path_lock)
        raise err

    os.unlink(target_path_lock)
    return
Exemplo n.º 5
0
def mount_glusterfs(volume, mountpoint, is_client=False):
    """Mount Glusterfs Volume"""
    if volume["type"] == "External":
        volname = volume['g_volname']
    else:
        volname = volume["name"]

    # Ignore if already glusterfs process running for that volume
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted", mount=mountpoint))
        return

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted (2nd try)", mount=mountpoint))
        return

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volume['g_volname'], mountpoint,
                                      volume['g_host'], volume['g_options'],
                                      is_client)
        return

    with mount_lock:
        generate_client_volfile(volume['name'])
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % mountpoint.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volume['name'], "--fs-display-name",
            "kadalu:%s" % volume['name'], "-f",
            "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), mountpoint
        ]

        ## required for 'simple-quota'
        if not is_client:
            cmd.extend(["--client-pid", "-14"])

        try:
            execute(*cmd)
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return
Exemplo n.º 6
0
def create_virtblock_volume(hostvol_mnt, volname, size):
    """Create virtual block volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf(
        "Volume hash",
        volhash=volhash
    ))

    # Check for mount availability before creating virtblock volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Create a file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(logf(
        "Created virtblock directory",
        path=os.path.dirname(volpath)
    ))

    if os.path.exists(volpath_full):
        rand = time.time()
        logging.info(logf(
            "Getting 'Create request' on existing file, renaming.",
            path=volpath_full, random=rand
        ))
        os.rename(volpath_full, "%s.%s" % (volpath_full, rand))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)
    os.truncate(volpath_full, size)
    logging.debug(logf(
        "Truncated file to required size",
        path=volpath,
        size=size
    ))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(logf(
        "Created Filesystem",
        path=volpath,
        command=MKFS_XFS_CMD
    ))
    save_pv_metadata(hostvol_mnt, volpath, size)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
Exemplo n.º 7
0
def mount_volume(pvpath, mountpoint, pvtype, fstype=None):
    """Mount a Volume"""
    # Need this after kube 1.20.0
    makedirs(mountpoint)

    if pvtype == PV_TYPE_VIRTBLOCK:
        fstype = "xfs" if fstype is None else fstype
        execute(MOUNT_CMD, "-t", fstype, pvpath, mountpoint)
    else:
        execute(MOUNT_CMD, "--bind", pvpath, mountpoint)

    os.chmod(mountpoint, 0o777)
Exemplo n.º 8
0
def mount_glusterfs(volume, target_path):
    """Mount Glusterfs Volume"""
    if not os.path.exists(target_path):
        makedirs(target_path)

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted", mount=target_path))
        return

    # Ignore if already mounted
    if os.path.ismount(target_path):
        logging.debug(logf("Already mounted (2nd try)", mount=target_path))
        return

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volume['g_volname'], target_path,
                                      volume['g_host'], volume['g_options'])
        return

    with mount_lock:
        generate_client_volfile(volume['name'])
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % target_path.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volume['name'], "-f",
            "%s/%s.client.vol" % (VOLFILES_DIR, volume['name']), target_path
        ]
        try:
            execute(*cmd)
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return
Exemplo n.º 9
0
def save_pv_metadata(hostvol_mnt, pvpath, pvsize):
    """Save PV metadata in info file"""
    # Create info dir if not exists
    info_file_path = os.path.join(hostvol_mnt, "info", pvpath)
    info_file_dir = os.path.dirname(info_file_path)

    makedirs(info_file_dir)
    logging.debug(
        logf("Created metadata directory", metadata_dir=info_file_dir))

    with open(info_file_path + ".json", "w") as info_file:
        info_file.write(
            json.dumps({
                "size": pvsize,
                "path_prefix": os.path.dirname(pvpath)
            }))
        logging.debug(logf(
            "Metadata saved",
            metadata_file=info_file_path,
        ))
Exemplo n.º 10
0
def update_virtblock_volume(hostvol_mnt, volname, expansion_requested_pvsize):
    """Update virtual block volume"""

    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_VIRTBLOCK, volhash, volname)
    volpath_full = os.path.join(hostvol_mnt, volpath)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Check for mount availability before updating virtblock volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Update the file with required size
    makedirs(os.path.dirname(volpath_full))
    logging.debug(
        logf("Updated virtblock directory", path=os.path.dirname(volpath)))

    volpath_fd = os.open(volpath_full, os.O_CREAT | os.O_RDWR)
    os.close(volpath_fd)

    execute("truncate", "-s", expansion_requested_pvsize, volpath_full)
    logging.debug(
        logf("Truncated file to required size",
             path=volpath,
             size=expansion_requested_pvsize))

    # TODO: Multiple FS support based on volume_capability mount option
    execute(MKFS_XFS_CMD, volpath_full)
    logging.debug(
        logf("Created Filesystem", path=volpath, command=MKFS_XFS_CMD))

    update_pv_metadata(hostvol_mnt, volpath, expansion_requested_pvsize)
    return Volume(
        volname=volname,
        voltype=PV_TYPE_VIRTBLOCK,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=expansion_requested_pvsize,
        volpath=volpath,
    )
Exemplo n.º 11
0
def mount_volume(pvpath, mountpoint, pvtype, fstype=None):
    """Mount a Volume"""

    # Create subvol dir if PV is manually created
    if not os.path.exists(pvpath):
        makedirs(pvpath)

    # TODO: Will losetup survive container reboot?
    if pvtype == PV_TYPE_RAWBLOCK:
        # losetup of truncated file
        cmd = ["losetup", "-f", "--show", pvpath]
        try:
            loop, _, _ = execute(*cmd)
        except CommandException as err:
            # Better not to create loop devices manually
            errmsg = "Please check availability of 'losetup' and 'loop' device"
            logging.error(logf(errmsg, cmd=cmd, error=format(err)))
            return False

        # Bind mount loop device to target_path, stage_path may not be needed
        makedirs(os.path.dirname(mountpoint))
        Path(mountpoint).touch(mode=0o777)
        execute(MOUNT_CMD, "--bind", loop, mountpoint)
        return True

    # Need this after kube 1.20.0
    makedirs(mountpoint)

    if pvtype == PV_TYPE_VIRTBLOCK:
        fstype = "xfs" if fstype is None else fstype
        execute(MOUNT_CMD, "-t", fstype, pvpath, mountpoint)
    else:
        execute(MOUNT_CMD, "--bind", pvpath, mountpoint)

    os.chmod(mountpoint, 0o777)
    return True
Exemplo n.º 12
0
def create_subdir_volume(hostvol_mnt, volname, size):
    """Create sub directory Volume"""
    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_SUBVOL, volhash, volname)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Check for mount availability before creating subdir volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Create a subdir
    makedirs(os.path.join(hostvol_mnt, volpath))
    logging.debug(logf("Created PV directory", pvdir=volpath))

    # Write info file so that Brick's quotad sidecar
    # container picks it up.
    save_pv_metadata(hostvol_mnt, volpath, size)

    # Wait for quota set
    # TODO: Handle Timeout
    pvsize_buffer = size * 0.05  # 5%
    pvsize_min = (size - pvsize_buffer)
    pvsize_max = (size + pvsize_buffer)
    logging.debug(
        logf(
            "Watching df of pv directory",
            pvdir=volpath,
            pvsize_buffer=pvsize_buffer,
        ))

    #setfattr -n trusted.glusterfs.namespace -v true
    #setfattr -n trusted.gfs.squota.limit -v size
    try:
        retry_errors(os.setxattr, [
            os.path.join(hostvol_mnt, volpath), "trusted.glusterfs.namespace",
            "true".encode()
        ], [ENOTCONN])
        retry_errors(os.setxattr, [
            os.path.join(hostvol_mnt, volpath), "trusted.gfs.squota.limit",
            str(size).encode()
        ], [ENOTCONN])
    # noqa # pylint: disable=broad-except
    except Exception as err:
        logging.info(
            logf("Failed to set quota using simple-quota. Continuing",
                 error=err))

    count = 0
    while True:
        count += 1
        pvstat = retry_errors(os.statvfs, [os.path.join(hostvol_mnt, volpath)],
                              [ENOTCONN])
        volsize = pvstat.f_blocks * pvstat.f_bsize
        if pvsize_min < volsize < pvsize_max:
            logging.debug(
                logf("Matching df output, Quota set successful",
                     volsize=volsize,
                     num_tries=count))
            break

        if count >= 6:
            logging.warning(
                logf("Waited for some time, Quota set failed, continuing.",
                     volsize=volsize,
                     num_tries=count))
            break

        time.sleep(1)

    return Volume(
        volname=volname,
        voltype=PV_TYPE_SUBVOL,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=size,
        volpath=volpath,
    )
Exemplo n.º 13
0
def update_subdir_volume(hostvol_mnt, hostvoltype, volname,
                         expansion_requested_pvsize):
    """Update sub directory Volume"""

    volhash = get_volname_hash(volname)
    volpath = get_volume_path(PV_TYPE_SUBVOL, volhash, volname)
    logging.debug(logf("Volume hash", volhash=volhash))

    # Check for mount availability before updating subdir volume
    retry_errors(os.statvfs, [hostvol_mnt], [ENOTCONN])

    # Create a subdir
    makedirs(os.path.join(hostvol_mnt, volpath))
    logging.debug(logf("Updated PV directory", pvdir=volpath))

    # Write info file so that Brick's quotad sidecar
    # container picks it up.
    update_pv_metadata(hostvol_mnt, volpath, expansion_requested_pvsize)

    # Wait for quota set
    # TODO: Handle Timeout
    pvsize_buffer = expansion_requested_pvsize * 0.05  # 5%
    pvsize_min = (expansion_requested_pvsize - pvsize_buffer)
    pvsize_max = (expansion_requested_pvsize + pvsize_buffer)
    logging.debug(
        logf(
            "Watching df of pv directory",
            pvdir=volpath,
            pvsize_buffer=pvsize_buffer,
        ))

    # Handle this case in calling function
    if hostvoltype == 'External':
        return None

    retry_errors(os.setxattr, [
        os.path.join(hostvol_mnt, volpath), "trusted.gfs.squota.limit",
        str(expansion_requested_pvsize).encode()
    ], [ENOTCONN])

    count = 0
    while True:
        count += 1
        pvstat = retry_errors(os.statvfs, [os.path.join(hostvol_mnt, volpath)],
                              [ENOTCONN])
        volsize = pvstat.f_blocks * pvstat.f_bsize
        if pvsize_min < volsize < pvsize_max:
            logging.debug(
                logf("Matching df output, Quota update set successful",
                     volsize=volsize,
                     pvsize=expansion_requested_pvsize,
                     num_tries=count))
            break

        if count >= 6:
            logging.warning(
                logf(
                    "Waited for some time, Quota update set failed, continuing.",
                    volsize=volsize,
                    pvsize=expansion_requested_pvsize,
                    num_tries=count))
            break

        time.sleep(1)

    return Volume(
        volname=volname,
        voltype=PV_TYPE_SUBVOL,
        volhash=volhash,
        hostvol=os.path.basename(hostvol_mnt),
        size=expansion_requested_pvsize,
        volpath=volpath,
    )
Exemplo n.º 14
0
def mount_glusterfs_with_host(volname,
                              mountpoint,
                              hosts,
                              options=None,
                              is_client=False):
    """Mount Glusterfs Volume"""

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        logging.debug(logf("Already mounted", mount=mountpoint))
        return

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    log_file = "/var/log/gluster/gluster.log"

    cmd = [
        GLUSTERFS_CMD,
        "--process-name",
        "fuse",
        "-l",
        "%s" % log_file,
        "--volfile-id",
        volname,
    ]
    ## on server component we can mount glusterfs with client-pid
    #if not is_client:
    #    cmd.extend(["--client-pid", "-14"])

    for host in hosts.split(','):
        cmd.extend(["--volfile-server", host])

    g_ops = []
    if options:
        for option in options.split(","):
            g_ops.append(f"--{option}")

    logging.debug(
        logf(
            "glusterfs command",
            cmd=cmd,
            opts=g_ops,
            mountpoint=mountpoint,
        ))

    command = cmd + g_ops + [mountpoint]
    try:
        execute(*command)
    except CommandException as excep:
        if excep.err.find("invalid option") != -1:
            logging.info(
                logf(
                    "proceeding without supplied incorrect mount options",
                    options=g_ops,
                ))
            command = cmd + [mountpoint]
            try:
                execute(*command)
            except CommandException as excep:
                logging.info(
                    logf(
                        "mount command failed",
                        cmd=command,
                        error=excep,
                    ))
            return
        logging.info(logf(
            "mount command failed",
            cmd=command,
            error=excep,
        ))
    return
Exemplo n.º 15
0
def mount_glusterfs(volume, mountpoint, storage_options="", is_client=False):
    """Mount Glusterfs Volume"""
    if volume["type"] == "External":
        volname = volume['g_volname']
    else:
        volname = volume["name"]

    if volume['type'] == 'External':
        # Try to mount the Host Volume, handle failure if
        # already mounted
        with mount_lock:
            mount_glusterfs_with_host(volname, mountpoint, volume['g_host'],
                                      volume['g_options'], is_client)
        use_gluster_quota = False
        if (os.path.isfile("/etc/secret-volume/ssh-privatekey")
                and "SECRET_GLUSTERQUOTA_SSH_USERNAME" in os.environ):
            use_gluster_quota = True
        secret_private_key = "/etc/secret-volume/ssh-privatekey"
        secret_username = os.environ.get('SECRET_GLUSTERQUOTA_SSH_USERNAME',
                                         None)

        # SSH into only first reachable host in volume['g_host'] entry
        g_host = reachable_host(volume['g_host'])

        if g_host is None:
            logging.error(logf("All hosts are not reachable"))
            return

        if use_gluster_quota is False:
            logging.debug(logf("Do not set quota-deem-statfs"))
        else:
            logging.debug(
                logf("Set quota-deem-statfs for gluster directory Quota"))
            quota_deem_cmd = [
                "ssh", "-oStrictHostKeyChecking=no", "-i",
                "%s" % secret_private_key,
                "%s@%s" % (secret_username, g_host), "sudo", "gluster",
                "volume", "set",
                "%s" % volume['g_volname'], "quota-deem-statfs", "on"
            ]
            try:
                execute(*quota_deem_cmd)
            except CommandException as err:
                errmsg = "Unable to set quota-deem-statfs via ssh"
                logging.error(logf(errmsg, error=err))
                raise err
        return mountpoint

    generate_client_volfile(volname)
    client_volfile_path = os.path.join(VOLFILES_DIR, "%s.client.vol" % volname)

    if storage_options != "":

        # Construct 'dict' from passed storage-options in 'str'
        storage_options = storage_options_parse(storage_options)

        # Keep the default volfile untouched
        tmp_volfile_path = tempfile.mkstemp()[1]
        shutil.copy(client_volfile_path, tmp_volfile_path)

        # Parse the client-volfile, update passed storage-options & save
        parsed_client_volfile_path = Volfile.parse(tmp_volfile_path)
        parsed_client_volfile_path.update_options_by_type(storage_options)
        parsed_client_volfile_path.save()

        # Sort storage-options and generate hash
        storage_options_hash = get_storage_options_hash(
            json.dumps(storage_options, sort_keys=True))

        # Rename mountpoint & client volfile path with hash
        mountpoint = mountpoint + "_" + storage_options_hash
        new_client_volfile_path = os.path.join(
            VOLFILES_DIR, "%s_%s.client.vol" % (volname, storage_options_hash))
        os.rename(tmp_volfile_path, new_client_volfile_path)
        client_volfile_path = new_client_volfile_path

    # Ignore if already glusterfs process running for that volume
    if is_gluster_mount_proc_running(volname, mountpoint):
        reload_glusterfs(volume)
        logging.debug(logf("Already mounted", mount=mountpoint))
        return mountpoint

    # Ignore if already mounted
    if is_gluster_mount_proc_running(volname, mountpoint):
        reload_glusterfs(volume)
        logging.debug(logf("Already mounted (2nd try)", mount=mountpoint))
        return mountpoint

    if not os.path.exists(mountpoint):
        makedirs(mountpoint)

    with mount_lock:
        # Fix the log, so we can check it out later
        # log_file = "/var/log/gluster/%s.log" % mountpoint.replace("/", "-")
        log_file = "/var/log/gluster/gluster.log"
        cmd = [
            GLUSTERFS_CMD, "--process-name", "fuse", "-l", log_file,
            "--volfile-id", volname, "--fs-display-name",
            "kadalu:%s" % volname, "-f", client_volfile_path, mountpoint
        ]

        ## required for 'simple-quota'
        if not is_client:
            cmd.extend(["--client-pid", "-14"])

        try:
            (_, err, pid) = execute(*cmd)
            VOL_DATA[volname]["pid"] = pid
        except CommandException as err:
            logging.error(
                logf("error to execute command",
                     volume=volume,
                     cmd=cmd,
                     error=format(err)))
            raise err

    return mountpoint