Esempio n. 1
0
def add_peer_nodes_to_cluster(peers, mnode=None):
    """Adds the given peer nodes to cluster

    Args:
        peers (list) : list of peer nodes to be attached to cluster

    Kwargs:
        mnode (str): Node on which cmd has to be executed.
            If None, defaults to servers[0].

    Returns:
        bool: True, if peer nodes are attached to cluster
              False, otherwise

    Example:
        add_peer_nodes_to_cluster(['peer_node1','peer_node2'])
    """

    if mnode is None:
        mnode = tc.servers[0]

    if not isinstance(peers, list):
        peers = [peers]

    ret = start_glusterd(servers=peers)
    if not ret:
        tc.logger.error("glusterd did not start in peer nodes")
        return False

    ret = peer_probe_servers(servers=peers, mnode=mnode)
    if not ret:
        tc.logger.error("Unable to do peer probe on peer machines")
        return False

    return True
Esempio n. 2
0
def add_peer_nodes_to_cluster(peers, mnode=None):
    """Adds the given peer nodes to cluster

    Args:
        peers (list) : list of peer nodes to be attached to cluster

    Kwargs:
        mnode (str): Node on which cmd has to be executed.
            If None, defaults to servers[0].

    Returns:
        bool: True, if peer nodes are attached to cluster
              False, otherwise

    Example:
        add_peer_nodes_to_cluster(['peer_node1','peer_node2'])
    """

    if mnode is None:
        mnode = tc.servers[0]

    if not isinstance(peers, list):
        peers = [peers]

    ret = start_glusterd(servers=peers)
    if not ret:
        tc.logger.error("glusterd did not start in peer nodes")
        return False

    ret = peer_probe_servers(servers=peers, mnode=mnode)
    if not ret:
        tc.logger.error("Unable to do peer probe on peer machines")
        return False

    return True
Esempio n. 3
0
def setup_vol(volname='', dist='', rep='', dispd='', red='', stripe='', \
        trans='', servers=''):
    """
        Setup a gluster volume for testing.
        It first formats the back-end bricks and then creates a
        trusted storage pool by doing peer probe. And then it creates
        a volume of specified configuration.

        When the volume is created, it sets a global flag to indicate
        that the volume is created. If another testcase calls this
        function for the second time with same volume name, the function
        checks for the flag and if found, will return True.

        Returns True on success and False for failure.
    """
    if servers == '':
        servers = tc.servers
    volinfo = get_volume_info(server=servers[0])
    if volinfo is not None and volname in volinfo.keys():
        tc.logger.debug("volume %s already exists in %s. Returning..." \
                % (volname, servers[0]))
        return True
    ret = env_setup_servers(servers=servers)
    if not ret:
        tc.logger.error("Formatting backend bricks failed. Aborting...")
        return False
    ret = start_glusterd(servers)
    if not ret:
        tc.logger.error("glusterd did not start in at least one server")
        return False
    time.sleep(5)
    ret = peer_probe_servers(servers[1:], mnode=servers[0])
    if not ret:
        tc.logger.error("Unable to peer probe one or more machines")
        return False
    if rep != 1 and dispd != 1:
        tc.logger.warning("Both replica count and disperse count is specified")
        tc.logger.warning("Ignoring the disperse and using the replica count")
        dispd = 1
        red = 1
    ret = create_volume(volname, dist, rep, stripe, trans, servers, \
            dispd=dispd, red=red)
    if ret[0] != 0:
        tc.logger.error("Unable to create volume %s" % volname)
        return False
    time.sleep(2)
    ret = start_volume(volname, servers[0])
    if not ret:
        tc.logger.error("volume start %s failed" % volname)
        return False
    if tc.global_config["gluster"]["cluster_config"]["nfs_ganesha"]["enable"]:
        from distaflibs.gluster.ganesha import vol_set_ganesha
        ret = vol_set_ganesha(volname)
        if not ret:
            tc.logger.error("failed to set the ganesha option for %s" % volname)
            return False
    tc.global_flag[volname] = True
    return True
Esempio n. 4
0
def setup_vol(volname='', dist='', rep='', dispd='', red='', stripe='', \
        trans='', servers=''):
    """
        Setup a gluster volume for testing.
        It first formats the back-end bricks and then creates a
        trusted storage pool by doing peer probe. And then it creates
        a volume of specified configuration.

        When the volume is created, it sets a global flag to indicate
        that the volume is created. If another testcase calls this
        function for the second time with same volume name, the function
        checks for the flag and if found, will return True.

        Returns True on success and False for failure.
    """
    if servers == '':
        servers = tc.servers
    volinfo = get_volume_info(server=servers[0])
    if volinfo is not None and volname in volinfo.keys():
        tc.logger.debug("volume %s already exists in %s. Returning..." \
                % (volname, servers[0]))
        return True
    ret = env_setup_servers(servers=servers)
    if not ret:
        tc.logger.error("Formatting backend bricks failed. Aborting...")
        return False
    ret = start_glusterd(servers)
    if not ret:
        tc.logger.error("glusterd did not start in at least one server")
        return False
    time.sleep(5)
    ret = peer_probe_servers(servers[1:], mnode=servers[0])
    if not ret:
        tc.logger.error("Unable to peer probe one or more machines")
        return False
    if rep != 1 and dispd != 1:
        tc.logger.warning("Both replica count and disperse count is specified")
        tc.logger.warning("Ignoring the disperse and using the replica count")
        dispd = 1
        red = 1
    ret = create_volume(volname, dist, rep, stripe, trans, servers, \
            dispd=dispd, red=red)
    if ret[0] != 0:
        tc.logger.error("Unable to create volume %s" % volname)
        return False
    time.sleep(2)
    ret = start_volume(volname, servers[0])
    if not ret:
        tc.logger.error("volume start %s failed" % volname)
        return False
    tc.global_flag[volname] = True
    return True
Esempio n. 5
0
def ctdb_gluster_setup(mnode=None, servers=None, meta_volname=None):
    '''Setup CTDB on gluster setup
    Kwargs:
        mnode (str): Node on which the command has
            to be executed. Default value is ctdb_servers[0]
        servers (list): The list of servers on which we need
            the CTDB setup.
            Defaults to ctdb_servers as specified in the config file.
        meta_volname (str) : Name for the ctdb meta volume.
            Dafault ctdb meta volume name is "ctdb".
    Returns:
        bool: True if successful, False otherwise
    Example:
        ctdb_gluster_setup()
    '''
    if mnode is None:
        mnode = (tc.global_config['gluster']['cluster_config']['smb']
                 ['ctdb_servers'][0]['host'])
    if servers is None:
        servers = (tc.global_config['gluster']['cluster_config']['smb']
                   ['ctdb_servers'])
        server_host_list = []
        for server in servers:
            server_host_list.append(server['host'])
        servers = server_host_list
    if not isinstance(servers, list):
        servers = [servers]
    no_of_ctdb_servers = len(servers)
    if meta_volname is None:
        meta_volname = "ctdb"

    # 1. firewall setting for ctdb setup
    ret = ctdb_firewall_settings(servers[:])
    if ret:
        tc.logger.info("firewall settings successfull for ctdb setup")
    else:
        tc.logger.error("firewall settings failed for ctdb setup")
        return False

    # 2. peer probe
    ret = peer_probe_servers(servers[:], mnode=mnode)
    if not ret:
        return False

    # 3. create ctdb meta volume
    ret = create_ctdb_meta_volume(mnode, servers[:], meta_volname)
    if ret:
        tc.logger.info("successfully created ctdb meta volume")
    else:
        tc.logger.error("failed to create ctdb meta volume")
        return False
    tc.run(mnode, "gluster v info %s" % meta_volname)

    # 4. update the ctdb hook scripts
    ret = update_hook_scripts(servers[:])
    if ret:
        tc.logger.info("successfully updated the hook scripts on all servers")
    else:
        tc.logger.error("failed to update the hook scripts on "
                        "one or more servers")
        return False

    # 5. update the smb.conf file
    ret = update_smb_conf(servers[:])
    if ret:
        tc.logger.info("successfully updated the smb.conf file on all servers")
    else:
        tc.logger.error("failed to update the smb.conf file on "
                        "one or more servers")
        return False

    # 6a. start the meta volume
    ret = start_volume(meta_volname, mnode)
    if ret:
        tc.logger.info("successfully started the meta volume")
        tc.run(mnode, "gluster v status ctdb")
    else:
        tc.logger.error("failed to start the meta volume")
        return False
    time.sleep(20)
    # 6.b check if /gluster/lock mount exists on all servers
    ret = check_if_gluster_lock_mount_exists(servers[:])
    if ret:
        tc.logger.info("/gluster/lock mount exists on all servers")
    else:
        return False

    # 7. check if /etc/sysconfig/ctdb file exists
    ret = check_if_ctdb_file_exists(servers[:])
    if ret:
        tc.logger.info("/etc/sysconfig/ctdb file exists on all servers")
    else:
        return False

    # 8. create /etc/ctdb/nodes file
    ret = create_ctdb_nodes_file(servers[:])
    if ret:
        tc.logger.info("successfully created /etc/ctdb/nodes file on "
                       "all servers")
    else:
        tc.logger.error("failed to create /etc/ctdb/nodes file on "
                        "one or more servers")
        return False

    # 9. create /etc/ctdb/public_addresses file
    ret = create_ctdb_public_addresses(servers[:])
    if ret:
        tc.logger.info("successfully created /etc/ctdb/public_addresses file "
                       "on all servers")
    else:
        tc.logger.error("failed to create /etc/ctdb/public_addresses file "
                        "on one or more servers")
        return False

    # 10. start the ctdb service
    ret = start_ctdb_service(servers[:])
    if ret:
        tc.logger.info("successfully started ctdb service on all servers")
    else:
        return False
    time.sleep(360)

    # 11. verify the ctdb status
    ret = verify_ctdb_status(mnode)
    if ret:
        tc.logger.info("ctdb status is correct")
    else:
        tc.logger.error("ctdb status is incorrect")
        return False

    return True
Esempio n. 6
0
def setup_nfs_ganesha(no_of_servers=None):
    '''Setup NFS-Ganesha HA cluster.
    Kwargs:
        no_of_servers (Optional[int]): The number of nodes on which we have
            to setup the HA cluster. Default it takes the number
            of servers from the pool list.
    Returns:
        bool: True if successfull, False otherwise.
    '''
    if ('setup_nfs_ganesha' in tc.global_flag and
            tc.global_flag['setup_nfs_ganesha'] == True):
        tc.logger.debug("The setup nfs-ganesha is already setup, returning...")
        return True
    if no_of_servers is None:
        servers = tc.servers
        no_of_servers = len(servers)
    servers = tc.servers[0:no_of_servers]
    no_of_servers = int(no_of_servers)
    # Step 1: Peer probe
    ret = peer_probe_servers(tc.servers[1:no_of_servers], mnode=tc.servers[0])
    if not ret:
        return False
    # Step 2: Passwordless ssh for nfs
    ret = create_nfs_passwordless_ssh(snodes=tc.servers[0:no_of_servers],
                                      mnode=tc.servers[0])
    if ret:
        tc.logger.info("passwordless ssh between nodes successfull")
    else:
        tc.logger.error("passwordless ssh between nodes unsuccessfull")
        return False
    # Step 3: Update ganesha-ha.conf file
    ret = update_ganesha_ha_conf(no_of_servers)
    if ret:
        tc.logger.info("ganesha-ha.conf files succeessfully updated on all "
                       "the nodes")
    else:
        tc.logger.error("ganesha-ha.conf files not succeessfully updated on "
                        "all the nodes")
        return False
    # Step 4: Cluster setup
    ret = cluster_auth_setup(no_of_servers)
    if ret:
        tc.logger.info("successfull cluster setup")
    else:
        tc.logger.error("unsuccessfull cluster setup")
        return False
    # Step 5: Using CLI to create shared volume
    ret, _, _ = tc.run(tc.servers[0], "gluster v list | grep "
                       "'gluster_shared_storage'")
    if ret != 0:
        ret, _, _ = tc.run(tc.servers[0], "gluster volume set all "
                           "cluster.enable-shared-storage enable")
        if ret != 0:
            tc.logger.error("shared volume creation unsuccessfull")
            return False
        else:
            tc.logger.info("shared volume creation successfull")
            time.sleep(10)
    else:
        tc.logger.info("shared volume already exists")
    time.sleep(60)
    # Step 6: Enable NFS-Ganesha
    ret = set_nfs_ganesha(True)
    if ret:
        tc.logger.info("gluster nfs-ganesha enable success")
    else:
        tc.logger.error("gluster nfs-ganesha enable failed")
        return False
    # Setting globalflag to True
    tc.global_flag["setup_nfs_ganesha"] = True

    return True
Esempio n. 7
0
def ctdb_gluster_setup(mnode=None, servers=None, meta_volname=None):
    '''Setup CTDB on gluster setup
    Kwargs:
        mnode (str): Node on which the command has
            to be executed. Default value is ctdb_servers[0]
        servers (list): The list of servers on which we need
            the CTDB setup.
            Defaults to ctdb_servers as specified in the config file.
        meta_volname (str) : Name for the ctdb meta volume.
            Dafault ctdb meta volume name is "ctdb".
    Returns:
        bool: True if successful, False otherwise
    Example:
        ctdb_gluster_setup()
    '''
    if mnode is None:
        mnode = (tc.global_config['gluster']['cluster_config']
                 ['smb']['ctdb_servers'][0])
    if servers is None:
        servers = (tc.global_config['gluster']['cluster_config']
                   ['smb']['ctdb_servers'])
    if not isinstance(servers, list):
        servers = [servers]
    no_of_ctdb_servers = len(servers)
    if meta_volname is None:
        meta_volname = "ctdb"

    # 1. firewall setting for ctdb setup
    ret = ctdb_firewall_settings(servers[:])
    if ret:
        tc.logger.info("firewall settings successfull for ctdb setup")
    else:
        tc.logger.error("firewall settings failed for ctdb setup")
        return False

    # 2. peer probe
    ret = peer_probe_servers(servers[:], mnode=mnode)
    if not ret:
        return False

    # 3. create ctdb meta volume
    ret = create_ctdb_meta_volume(mnode, servers[:], meta_volname)
    if ret:
        tc.logger.info("successfully created ctdb meta volume")
    else:
        tc.logger.error("failed to create ctdb meta volume")
        return False
    tc.run(mnode, "gluster v info %s" % meta_volname)

    # 4. update the ctdb hook scripts
    ret = update_hook_scripts(servers[:])
    if ret:
        tc.logger.info("successfully updated the hook scripts on all servers")
    else:
        tc.logger.error("failed to update the hook scripts on "
                        "one or more servers")
        return False

    # 5. update the smb.conf file
    ret = update_smb_conf(servers[:])
    if ret:
        tc.logger.info("successfully updated the smb.conf file on all servers")
    else:
        tc.logger.error("failed to update the smb.conf file on "
                        "one or more servers")
        return False

    # 6a. start the meta volume
    ret = start_volume(meta_volname, mnode)
    if ret:
        tc.logger.info("successfully started the meta volume")
        tc.run(mnode, "gluster v status ctdb")
    else:
        tc.logger.error("failed to start the meta volume")
        return False
    time.sleep(20)
    # 6.b check if /gluster/lock mount exists on all servers
    ret = check_if_gluster_lock_mount_exists(servers[:])
    if ret:
        tc.logger.info("/gluster/lock mount exists on all servers")
    else:
        return False

    # 7. check if /etc/sysconfig/ctdb file exists
    ret = check_if_ctdb_file_exists(servers[:])
    if ret:
        tc.logger.info("/etc/sysconfig/ctdb file exists on all servers")
    else:
        return False

    # 8. create /etc/ctdb/nodes file
    ret = create_ctdb_nodes_file(servers[:])
    if ret:
        tc.logger.info("successfully created /etc/ctdb/nodes file on "
                       "all servers")
    else:
        tc.logger.error("failed to create /etc/ctdb/nodes file on "
                        "one or more servers")
        return False

    # 9. create /etc/ctdb/public_addresses file
    ret = create_ctdb_public_addresses(servers[:])
    if ret:
        tc.logger.info("successfully created /etc/ctdb/public_addresses file "
                       "on all servers")
    else:
        tc.logger.error("failed to create /etc/ctdb/public_addresses file "
                        "on one or more servers")
        return False

    # 10. start the ctdb service
    ret = start_ctdb_service(servers[:])
    if ret:
        tc.logger.info("successfully started ctdb service on all servers")
    else:
        return False
    time.sleep(360)

    # 11. verify the ctdb status
    ret = verify_ctdb_status(mnode)
    if ret:
        tc.logger.info("ctdb status is correct")
    else:
        tc.logger.error("ctdb status is incorrect")
        return False

    return True
Esempio n. 8
0
def setup_nfs_ganesha(no_of_servers=None):
    '''Setup NFS-Ganesha HA cluster.
    Kwargs:
        no_of_servers (Optional[int]): The number of nodes on which we have
            to setup the HA cluster. Default it takes the number
            of servers from the pool list.
    Returns:
        bool: True if successfull, False otherwise.
    '''
    if ('setup_nfs_ganesha' in tc.global_flag
            and tc.global_flag['setup_nfs_ganesha'] == True):
        tc.logger.debug("The setup nfs-ganesha is already setup, returning...")
        return True
    if no_of_servers is None:
        servers = tc.servers
        no_of_servers = len(servers)
    servers = tc.servers[0:no_of_servers]
    no_of_servers = int(no_of_servers)
    # Step 1: Peer probe
    ret = peer_probe_servers(tc.servers[1:no_of_servers], mnode=tc.servers[0])
    if not ret:
        return False
    # Step 2: Passwordless ssh for nfs
    ret = create_nfs_passwordless_ssh(snodes=tc.servers[0:no_of_servers],
                                      mnode=tc.servers[0])
    if ret:
        tc.logger.info("passwordless ssh between nodes successfull")
    else:
        tc.logger.error("passwordless ssh between nodes unsuccessfull")
        return False
    # Step 3: Update ganesha-ha.conf file
    ret = update_ganesha_ha_conf(no_of_servers)
    if ret:
        tc.logger.info("ganesha-ha.conf files succeessfully updated on all "
                       "the nodes")
    else:
        tc.logger.error("ganesha-ha.conf files not succeessfully updated on "
                        "all the nodes")
        return False
    # Step 4: Cluster setup
    ret = cluster_auth_setup(no_of_servers)
    if ret:
        tc.logger.info("successfull cluster setup")
    else:
        tc.logger.error("unsuccessfull cluster setup")
        return False
    # Step 5: Using CLI to create shared volume
    ret, _, _ = tc.run(tc.servers[0], "gluster v list | grep "
                       "'gluster_shared_storage'")
    if ret != 0:
        ret, _, _ = tc.run(
            tc.servers[0], "gluster volume set all "
            "cluster.enable-shared-storage enable")
        if ret != 0:
            tc.logger.error("shared volume creation unsuccessfull")
            return False
        else:
            tc.logger.info("shared volume creation successfull")
            time.sleep(10)
    else:
        tc.logger.info("shared volume already exists")
    time.sleep(60)
    # Step 6: Enable NFS-Ganesha
    ret = set_nfs_ganesha(True)
    if ret:
        tc.logger.info("gluster nfs-ganesha enable success")
    else:
        tc.logger.error("gluster nfs-ganesha enable failed")
        return False
    # Setting globalflag to True
    tc.global_flag["setup_nfs_ganesha"] = True

    return True
Esempio n. 9
0
def setup_vol(volname, mnode=None, dist=1, rep=1, dispd=1, red=1,
              stripe=1, trans="tcp", servers=None):
    """
        Setup a gluster volume for testing.
        It first formats the back-end bricks and then creates a
        trusted storage pool by doing peer probe. And then it creates
        a volume of specified configuration.

        When the volume is created, it sets a global flag to indicate
        that the volume is created. If another testcase calls this
        function for the second time with same volume name, the function
        checks for the flag and if found, will return True.
    Args:
        volname(str): volume name that has to be created

    Kwargs:
        mnode(str): server on which command has to be execeuted,
            defaults to tc.servers[0]
        dist(int): distribute count, defaults to 1
        rep(int): replica count, defaults to 1
        stripe(int): stripe count, defaults to 1
        trans(str): transport type, defaults to tcp
        servers(list): servers on which volume has to be created,
            defaults to number of servers in pool list, if that is None,
            then takes tc.servers
        disp(int): disperse count, defaults to 1
        dispd(int): disperse-data count, defaults to 1
        red(int): rdundancy count, defaults to 1

    Returns:
        bool: True on success and False for failure.
    """
    if servers is None:
        servers = tc.servers[:]
    if mnode is None:
        mnode = tc.servers[0]
    volinfo = get_volume_info(mnode=mnode)
    if volinfo is not None and volname in volinfo.keys():
        tc.logger.debug("volume %s already exists in %s. Returning..." \
                % (volname, servers[0]))
        return True
    ret = env_setup_servers(servers=servers)
    if not ret:
        tc.logger.error("Formatting backend bricks failed. Aborting...")
        return False
    ret = start_glusterd(servers)
    if not ret:
        tc.logger.error("glusterd did not start in at least one server")
        return False
    time.sleep(5)
    ret = peer_probe_servers(servers[1:], mnode=mnode)
    if not ret:
        tc.logger.error("Unable to peer probe one or more machines")
        return False
    if rep != 1 and dispd != 1:
        tc.logger.warning("Both replica count and disperse count is specified")
        tc.logger.warning("Ignoring the disperse and using the replica count")
        dispd = 1
        red = 1
    ret = create_volume(volname, mnode, dist, rep, stripe, trans, servers,
                        dispd=dispd, red=red)
    if ret[0] != 0:
        tc.logger.error("Unable to create volume %s" % volname)
        return False
    time.sleep(2)
    ret = start_volume(volname, mnode)
    if not ret:
        tc.logger.error("volume start %s failed" % volname)
        return False
    if tc.global_config["gluster"]["cluster_config"]["nfs_ganesha"]["enable"]:
        from distaflibs.gluster.ganesha import vol_set_ganesha
        ret = vol_set_ganesha(volname)
        if not ret:
            tc.logger.error("failed to set the ganesha option for %s" % volname)
            return False
    tc.global_flag[volname] = True
    return True