def snap_restore(volname, snapname, server=''): """ stops the volume restore the snapshot and starts the volume Returns True upon success, False on in any step """ if server == '': server = tc.nodes[0] ret = stop_volume(volname, server) if not ret: return False ret = tc.run(server, "gluster snapshot restore %s" % snapname) if ret[0] != 0: tc.logger.error("snapshot restore failed") return False ret = start_volume(volname, server) if not ret: return False return True
def snap_restore_complete(volname, snapname, mnode=None): """stops the volume restore the snapshot and starts the volume Example: snap_restore_complete(testvol, testsnap) Args: volname (str): volume name snapname (str): snapshot name Kwargs: mnode (str): Node on which cmd has to be executed. If None, defaults to nodes[0]. Returns: bool: True on success, False on failure """ if mnode is None: mnode = tc.servers[0] # Stopping volume before snap restore ret = stop_volume(volname, mnode) if not ret: tc.logger.error( "Failed to stop volume %s before restoring snapshot " "%s in node %s" % (volname, snapname, mnode) ) return False ret, _, _ = snap_restore(snapname, mnode=mnode) if ret != 0: tc.logger.error("snapshot restore cli execution failed") return False # Starting volume after snap restore ret = start_volume(volname, mnode) if not ret: tc.logger.error( "Failed to start volume %s after restoring snapshot " "%s in node %s" % (volname, snapname, mnode) ) return False return True
def snap_restore_complete(volname, snapname, mnode=None): """stops the volume restore the snapshot and starts the volume Example: snap_restore_complete(testvol, testsnap) Args: volname (str): volume name snapname (str): snapshot name Kwargs: mnode (str): Node on which cmd has to be executed. If None, defaults to nodes[0]. Returns: bool: True on success, False on failure """ if mnode is None: mnode = tc.servers[0] # Stopping volume before snap restore ret = stop_volume(volname, mnode) if not ret: tc.logger.error("Failed to stop volume %s before restoring snapshot " "%s in node %s" % (volname, snapname, mnode)) return False ret, _, _ = snap_restore(snapname, mnode=mnode) if ret != 0: tc.logger.error("snapshot restore cli execution failed") return False # Starting volume after snap restore ret = start_volume(volname, mnode) if not ret: tc.logger.error("Failed to start volume %s after restoring snapshot " "%s in node %s" % (volname, snapname, mnode)) return False return True
def ctdb_gluster_setup(mnode=None, servers=None, meta_volname=None): '''Setup CTDB on gluster setup Kwargs: mnode (str): Node on which the command has to be executed. Default value is ctdb_servers[0] servers (list): The list of servers on which we need the CTDB setup. Defaults to ctdb_servers as specified in the config file. meta_volname (str) : Name for the ctdb meta volume. Dafault ctdb meta volume name is "ctdb". Returns: bool: True if successful, False otherwise Example: ctdb_gluster_setup() ''' if mnode is None: mnode = (tc.global_config['gluster']['cluster_config']['smb'] ['ctdb_servers'][0]['host']) if servers is None: servers = (tc.global_config['gluster']['cluster_config']['smb'] ['ctdb_servers']) server_host_list = [] for server in servers: server_host_list.append(server['host']) servers = server_host_list if not isinstance(servers, list): servers = [servers] no_of_ctdb_servers = len(servers) if meta_volname is None: meta_volname = "ctdb" # 1. firewall setting for ctdb setup ret = ctdb_firewall_settings(servers[:]) if ret: tc.logger.info("firewall settings successfull for ctdb setup") else: tc.logger.error("firewall settings failed for ctdb setup") return False # 2. peer probe ret = peer_probe_servers(servers[:], mnode=mnode) if not ret: return False # 3. create ctdb meta volume ret = create_ctdb_meta_volume(mnode, servers[:], meta_volname) if ret: tc.logger.info("successfully created ctdb meta volume") else: tc.logger.error("failed to create ctdb meta volume") return False tc.run(mnode, "gluster v info %s" % meta_volname) # 4. update the ctdb hook scripts ret = update_hook_scripts(servers[:]) if ret: tc.logger.info("successfully updated the hook scripts on all servers") else: tc.logger.error("failed to update the hook scripts on " "one or more servers") return False # 5. update the smb.conf file ret = update_smb_conf(servers[:]) if ret: tc.logger.info("successfully updated the smb.conf file on all servers") else: tc.logger.error("failed to update the smb.conf file on " "one or more servers") return False # 6a. start the meta volume ret = start_volume(meta_volname, mnode) if ret: tc.logger.info("successfully started the meta volume") tc.run(mnode, "gluster v status ctdb") else: tc.logger.error("failed to start the meta volume") return False time.sleep(20) # 6.b check if /gluster/lock mount exists on all servers ret = check_if_gluster_lock_mount_exists(servers[:]) if ret: tc.logger.info("/gluster/lock mount exists on all servers") else: return False # 7. check if /etc/sysconfig/ctdb file exists ret = check_if_ctdb_file_exists(servers[:]) if ret: tc.logger.info("/etc/sysconfig/ctdb file exists on all servers") else: return False # 8. create /etc/ctdb/nodes file ret = create_ctdb_nodes_file(servers[:]) if ret: tc.logger.info("successfully created /etc/ctdb/nodes file on " "all servers") else: tc.logger.error("failed to create /etc/ctdb/nodes file on " "one or more servers") return False # 9. create /etc/ctdb/public_addresses file ret = create_ctdb_public_addresses(servers[:]) if ret: tc.logger.info("successfully created /etc/ctdb/public_addresses file " "on all servers") else: tc.logger.error("failed to create /etc/ctdb/public_addresses file " "on one or more servers") return False # 10. start the ctdb service ret = start_ctdb_service(servers[:]) if ret: tc.logger.info("successfully started ctdb service on all servers") else: return False time.sleep(360) # 11. verify the ctdb status ret = verify_ctdb_status(mnode) if ret: tc.logger.info("ctdb status is correct") else: tc.logger.error("ctdb status is incorrect") return False return True
def ctdb_gluster_setup(mnode=None, servers=None, meta_volname=None): '''Setup CTDB on gluster setup Kwargs: mnode (str): Node on which the command has to be executed. Default value is ctdb_servers[0] servers (list): The list of servers on which we need the CTDB setup. Defaults to ctdb_servers as specified in the config file. meta_volname (str) : Name for the ctdb meta volume. Dafault ctdb meta volume name is "ctdb". Returns: bool: True if successful, False otherwise Example: ctdb_gluster_setup() ''' if mnode is None: mnode = (tc.global_config['gluster']['cluster_config'] ['smb']['ctdb_servers'][0]) if servers is None: servers = (tc.global_config['gluster']['cluster_config'] ['smb']['ctdb_servers']) if not isinstance(servers, list): servers = [servers] no_of_ctdb_servers = len(servers) if meta_volname is None: meta_volname = "ctdb" # 1. firewall setting for ctdb setup ret = ctdb_firewall_settings(servers[:]) if ret: tc.logger.info("firewall settings successfull for ctdb setup") else: tc.logger.error("firewall settings failed for ctdb setup") return False # 2. peer probe ret = peer_probe_servers(servers[:], mnode=mnode) if not ret: return False # 3. create ctdb meta volume ret = create_ctdb_meta_volume(mnode, servers[:], meta_volname) if ret: tc.logger.info("successfully created ctdb meta volume") else: tc.logger.error("failed to create ctdb meta volume") return False tc.run(mnode, "gluster v info %s" % meta_volname) # 4. update the ctdb hook scripts ret = update_hook_scripts(servers[:]) if ret: tc.logger.info("successfully updated the hook scripts on all servers") else: tc.logger.error("failed to update the hook scripts on " "one or more servers") return False # 5. update the smb.conf file ret = update_smb_conf(servers[:]) if ret: tc.logger.info("successfully updated the smb.conf file on all servers") else: tc.logger.error("failed to update the smb.conf file on " "one or more servers") return False # 6a. start the meta volume ret = start_volume(meta_volname, mnode) if ret: tc.logger.info("successfully started the meta volume") tc.run(mnode, "gluster v status ctdb") else: tc.logger.error("failed to start the meta volume") return False time.sleep(20) # 6.b check if /gluster/lock mount exists on all servers ret = check_if_gluster_lock_mount_exists(servers[:]) if ret: tc.logger.info("/gluster/lock mount exists on all servers") else: return False # 7. check if /etc/sysconfig/ctdb file exists ret = check_if_ctdb_file_exists(servers[:]) if ret: tc.logger.info("/etc/sysconfig/ctdb file exists on all servers") else: return False # 8. create /etc/ctdb/nodes file ret = create_ctdb_nodes_file(servers[:]) if ret: tc.logger.info("successfully created /etc/ctdb/nodes file on " "all servers") else: tc.logger.error("failed to create /etc/ctdb/nodes file on " "one or more servers") return False # 9. create /etc/ctdb/public_addresses file ret = create_ctdb_public_addresses(servers[:]) if ret: tc.logger.info("successfully created /etc/ctdb/public_addresses file " "on all servers") else: tc.logger.error("failed to create /etc/ctdb/public_addresses file " "on one or more servers") return False # 10. start the ctdb service ret = start_ctdb_service(servers[:]) if ret: tc.logger.info("successfully started ctdb service on all servers") else: return False time.sleep(360) # 11. verify the ctdb status ret = verify_ctdb_status(mnode) if ret: tc.logger.info("ctdb status is correct") else: tc.logger.error("ctdb status is incorrect") return False return True