def create_gluster_partition
    mgmt_node = run_helper.get_mgmt_node()
    nodes = run_helper.get_nodes_ip()


    for node in nodes:
        cmd = 'pvcreate GLUSTERPV --force; vgcreate $GLUSTERVG $GLUSTERPV'
        run_helper.run_command(node, cmd, False)

	cmd = 'lvcreate -n $GLUSTERLV -L 2GB $GLUSTERVG'
        run_helper.run_command(node, cmd, False)

	cmd = 'mkfs -t xfs -i size=512 /dev/$GLUSTERVG/$GLUSTERLV'
        run_helper.run_command(node, cmd, False)

	cmd = 'echo `xfs_admin -u /dev/$GLUSTERVG/$GLUSTERLV` $GLUSTEREXPORT  xfs  allocsize=4096,inode64 0 0 >> /etc/fstab'
	run_helper.run_command(node, cmd, False)

    return 0
def install_gluster(tarball):
    nodes = run_helper.get_nodes_ip()

    clients = run_helper.get_client_ip()
    for client in clients:
        if client not in nodes:
            nodes.append(client)

    if not os.path.exists(tarball):
        print 'INFO: Source tarball ' + tarball + ' doesn\'t exist. Proceeding to download from bits.gluster.com'
        download_url = 'http://bits.gluster.com/pub/gluster/glusterfs/src/' + tarball
        cmd = 'wget ' +  download_url
        wget_status = os.system(cmd)
        if wget_status:
            print 'unable to download ' + tarball + ' from bits.gluster.com, \n Exiting...'
            sys.exit(1)

    prefix_path = run_helper.get_prefix_path()

    build_dir = run_helper.get_build_dir()
    if build_dir[-1] != '/':
        build_dir = build_dir + '/'

    ret_queue = Queue.Queue()
    threads = []
    for node in nodes:
        t = threading.Thread(target=real_install_gluster, args=(node, tarball, build_dir, prefix_path, ret_queue))
        t.start()
        threads.append(t)

    ret_codes = []
    for t in threads:
        t.join()
        ret_codes.append(ret_queue.get())

    return_value = 0
    for ret in ret_codes:
        if ret != 0:
            return_value = 1
            break

    return return_value
def install_gluster_rpms(rpms):

    nodes = run_helper.get_nodes_ip()

    clients = run_helper.get_client_ip()
    for client in clients:
        if client not in nodes:
            nodes.append(client)

    g_version = run_helper.get_gluster_version()
    if g_version[-7:] == '.tar.gz':
        gluster_version = g_version[:-7]
    else:
        gluster_version = g_version

    match = re.search(r'glusterfs-([\w.]+)', gluster_version)
    if not match:
        print 'unable to get gluster version to determine the rpm URL. Please check the configfile'
        sys.exit(1)
    version = match.group(1)

    ret_queue = Queue.Queue()
    threads = []
    for node in nodes:
        t = threading.Thread(target=install_rpm, args=(node, version, rpms, ret_queue))
        t.start()
        threads.append(t)

    ret_codes = []
    for t in threads:
        t.join()
        ret_codes.append(ret_queue.get())

    ret_value = 0
    for ret in ret_codes:
        if ret != 0:
            ret_value = 1
            break

    return ret_value
def create_gluster_volume():
    mgmt_node = run_helper.get_mgmt_node()
    nodes = run_helper.get_nodes_ip()
    if mgmt_node not in nodes:
        print "WARNING: management is not part of the server nodes. While this is not usual, Still proceeding with it."

    export_dir = run_helper.get_server_export_dir()
    vol_type = run_helper.get_volume_type()
    trans_type = run_helper.get_trans_type()
    volname = run_helper.get_vol_name()
    bricks_number = run_helper.get_number_of_bricks()

    pre_create_cleanup(nodes, export_dir)

    brick_list = []
    last_server_index = len(nodes) - 1
    server_index = 0
    for i in range(1, (int(bricks_number) + 1)):
        brick = nodes[server_index] + ":" + export_dir + "/" + volname + "_brick" + str(i)
        brick_list.append(brick)
        if server_index == last_server_index:
            server_index = 0
        else:
            server_index = server_index + 1

    replica_count = ""
    if vol_type in ("dist-rep", "stripe-rep", "rep", "dist-stripe-rep"):
        replica_count = "replica " + run_helper.get_replica_count()

    stripe_count = ""
    if vol_type in ("stripe", "stripe-rep", "dist-stripe-rep", "dist-stripe"):
        stripe_count = "stripe " + run_helper.get_stripe_count()

    vol_create_cmd = (
        "gluster volume create "
        + volname
        + " "
        + replica_count
        + " "
        + stripe_count
        + " "
        + "transport "
        + trans_type
        + " "
        + " ".join(brick_list)
        + " --mode=script"
    )

    flag = 0
    for node in nodes:
        status = run_helper.run_command(node, "glusterd", False)
        if status:
            print "glusterd can not be started in node: " + node
            flag = 1

    if flag:
        print "glusterd can not be started successfully in all nodes. Exiting..."
        sys.exit(1)

    flag = 0
    for node in nodes:
        if node != mgmt_node:
            status = run_helper.run_command(mgmt_node, "gluster peer probe " + node, False)
            if status:
                print "peer probe went wrong in " + node
                flag = 1

    if flag:
        print "Peer probe went wrong in some machines. Exiting..."
        sys.exit(1)

    status = run_helper.run_command(mgmt_node, vol_create_cmd, True)
    if status:
        print "volume creation failed."

    return status
def create_gluster_volume():
    mgmt_node = run_helper.get_mgmt_node()
    nodes = run_helper.get_nodes_ip()
    if mgmt_node not in nodes:
        print 'WARNING: management is not part of the server nodes. While this is not usual, Still proceeding with it.'

    export_dir = run_helper.get_server_export_dir()
    vol_type = run_helper.get_volume_type()
    trans_type = run_helper.get_trans_type()
    volname = run_helper.get_vol_name()
    bricks_number = run_helper.get_number_of_bricks()

    pre_create_cleanup(nodes, export_dir)

    brick_list = []
    last_server_index = len(nodes) -1
    server_index = 0
    for i in range(1, (int(bricks_number) + 1)):
        brick = nodes[server_index] + ':' + export_dir + '/' + volname + '_brick' + str(i)
        brick_list.append(brick)
        if server_index == last_server_index:
            server_index = 0
        else:
            server_index = server_index + 1

    replica_count = ''
    if vol_type in ('dist-rep', 'stripe-rep', 'rep', 'dist-stripe-rep'):
        replica_count = 'replica ' + run_helper.get_replica_count()

    stripe_count = ''
    if vol_type in ('stripe', 'stripe-rep', 'dist-stripe-rep', 'dist-stripe'):
        stripe_count = 'stripe ' + run_helper.get_stripe_count()

    vol_create_cmd = 'gluster volume create ' + volname + ' ' + replica_count + ' ' + stripe_count + ' ' + 'transport ' + trans_type + ' ' + ' '.join(brick_list) + ' --mode=script'

    flag = 0
    for node in nodes:
        status = run_helper.run_command(node, 'glusterd', False)
        if status:
            print 'glusterd can not be started in node: ' + node
            flag = 1

    if flag:
        print 'glusterd can not be started successfully in all nodes. Exiting...'
        sys.exit(1)

    flag = 0
    for node in nodes:
        if node != mgmt_node:
            status = run_helper.run_command(mgmt_node, 'gluster peer probe ' + node, False)
            if status:
                print 'peer probe went wrong in ' + node
                flag = 1

    if flag:
        print 'Peer probe went wrong in some machines. Exiting...'
        sys.exit(1)

    status = run_helper.run_command(mgmt_node, vol_create_cmd, True)
    if status:
        print 'volume creation failed.'

    return status