def run(self): remote = RemoteMachineShellConnection(self.server) server_type = 'membase' if remote.is_couchbase_installed(): server_type = 'couchbase' stamp = time.strftime("%d_%m_%Y_%H_%M") try: info = remote.extract_remote_info() if info.type.lower() != 'windows': core_files = [] print "looking for crashes on {0} ... ".format(info.ip) print "erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type) core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash")) print "core* files under /opt/{0}/var/lib/{0}/".format(server_type) core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core")) print "core* files under /tmp/" core_files.extend(remote.file_starts_with("/tmp/", "core")) if core_files: print "found crashes on {0}: {1}".format(info.ip, core_files) else: print "crashes not found on {0}".format(info.ip) i = 0 for core_file in core_files: if core_file.find('erl_crash.dump') != -1: #let's just copy that file back erl_crash_file_name = "erlang-{0}-{1}.log".format(self.server.ip, i) remote_path, file_name = os.path.dirname(core_file), os.path.basename(core_file) if remote.get_file(remote_path, file_name, os.path.join(self.path, erl_crash_file_name)): print 'downloaded core file : {0}'.format(core_file) i += 1 else: command = "/opt/{0}/bin/tools/cbanalyze-core".format(server_type) core_file_name = "core-{0}-{1}.log".format(self.server.ip, i) core_log_output = "/tmp/{0}".format(core_file_name) output, _ = remote.execute_command('{0} {1} -f {2}'.format(command, core_file, core_log_output)) print output remote_path, file_name = os.path.dirname(core_log_output), os.path.basename(core_log_output) if remote.get_file(remote_path, file_name, os.path.join(self.path, core_file_name)): print 'downloaded core file : {0}'.format(core_log_output) i += 1 if i > 0: command = "mkdir -p /tmp/backup_crash/{0};mv -f /tmp/core* /tmp/backup_crash/{0}; mv -f /opt/{0}/var/lib/{1}/erl_crash.dump* /tmp/backup_crash/{0}".\ format(stamp, server_type) print "put all crashes on {0} in backup folder: /tmp/backup_crash/{1}".format(self.server.ip, stamp) remote.execute_command(command) output, error = remote.execute_command("ls -la /tmp/backup_crash/{0}".format(stamp)) for o in output: print o remote.disconnect() if remote: remote.disconnect() except Exception as ex: print ex
def run(self): file_name = "%s-%s-diag.zip" % (self.server.ip, time_stamp()) if not self.local: remote_client = RemoteMachineShellConnection(self.server) print("Collecting logs from %s\n" % self.server.ip) output, error = remote_client.execute_cbcollect_info( file_name) print("\n".join(error)) user_path = "/home/" if remote_client.info.distribution_type.lower() == 'mac': user_path = "/Users/" else: if self.server.ssh_username == "root": user_path = "/" remote_path = "%s%s" % (user_path, self.server.ssh_username) status = remote_client.file_exists(remote_path, file_name) if not status: raise Exception( "%s doesn't exists on server" % file_name) status = remote_client.get_file(remote_path, file_name, "%s/%s" % ( self.path, file_name)) if status: print("Downloading zipped logs from %s" % self.server.ip) else: raise Exception("Fail to download zipped logs from %s" % self.server.ip) remote_client.execute_command( "rm -f %s" % os.path.join(remote_path, file_name)) remote_client.disconnect()
def start_fetch_pcaps(self): log_path = TestInputSingleton.input.param("logs_folder", "/tmp") for server in self.servers: remote_client = RemoteMachineShellConnection(server) # stop tcdump stop_tcp_cmd = "if [[ \"$(pgrep tcpdump)\" ]]; " \ "then kill -s TERM $(pgrep tcpdump); fi" o, e = remote_client.execute_command(stop_tcp_cmd) remote_client.log_command_output(o, e) if self.is_test_failed(): # install zip unzip o, e = remote_client.execute_command( "yum install -y zip unzip") remote_client.log_command_output(o, e) # zip the pcaps folder zip_cmd = "zip -r " + server.ip + "_pcaps.zip pcaps" o, e = remote_client.execute_command(zip_cmd) remote_client.log_command_output(o, e) # transfer the zip file zip_file_copied = remote_client.get_file( "/root", os.path.basename(server.ip + "_pcaps.zip"), log_path) self.log.info("%s node pcap zip copied on client : %s" % (server.ip, zip_file_copied)) if zip_file_copied: # Remove the zips remote_client.execute_command("rm -rf " + server.ip + "_pcaps.zip") # Remove pcaps remote_client.execute_command("rm -rf pcaps") remote_client.disconnect()
def run(self): remote_client = RemoteMachineShellConnection(self.server) now = datetime.now() day = now.day month = now.month year = now.year file_name = "%s-%s%s%s-diag.zip" % (self.server.ip, month, day, year) print "Collecting logs from %s\n" % (self.server.ip) output, error = remote_client.execute_cbcollect_info(file_name) print "\n".join(output) print "\n".join(error) user_path = "/home/" if self.server.ssh_username == "root": user_path = "/" if not remote_client.file_exists( "%s%s" % (user_path, self.server.ssh_username), file_name): raise Exception("%s doesn't exists on server" % (file_name)) if remote_client.get_file( "%s%s" % (user_path, self.server.ssh_username), file_name, "%s/%s" % (self.path, file_name)): print "Downloading zipped logs from %s" % (self.server.ip) else: raise Exception("Fail to download zipped logs from %s" % (self.server.ip)) remote_client.disconnect()
def download_backups(self,backup_location): #connect and list all the files under that #location and download those files #create random folder in local machine #create temp folder and then uuid under temp os.getcwd() local_files = [] cwd = os.getcwd() local_dir = "{0}/out/tmp/{1}".format(cwd, uuid.uuid4()) if not os.path.exists(local_dir): os.makedirs(local_dir) self.log.info("created {0} folder in the local machine...".format(local_dir)) shell = RemoteMachineShellConnection(self.server) files = shell.list_files(backup_location) for file in files: self.log.info("downloading remote file {0}".format(file)) shell.get_file(file['path'], file['file'], "{0}/{1}".format(local_dir,file['file'])) local_files.append({'path': local_dir, 'file': file['file']}) shell.remove_directory(backup_location) #now we can remove these files: return local_files
def fetch_cb_collect_logs(self): log_path = TestInputSingleton.input.param("logs_folder", "/tmp") for node in self.servers: params = dict() if len(self.servers) != 1: params['nodes'] = 'ns_1@' + node.ip else: # In case of single node we have to pass ip as below params['nodes'] = 'ns_1@' + '127.0.0.1' self.log.info('Running cbcollect on node ' + node.ip) rest = RestConnection(node) status, _, _ = rest.perform_cb_collect(params) # Needed as it takes a few seconds before the collection start time.sleep(10) self.log.info("%s - cbcollect status: %s" % (node.ip, status)) if status is True: self.log.info("Polling active_tasks to check cbcollect status") cb_collect_response = dict() retry = 0 while retry < 30: cb_collect_response = rest.ns_server_tasks( "clusterLogsCollection") self.log.debug( "{}: CBCollectInfo Iteration {} - {}".format( node.ip, retry, cb_collect_response["status"])) if cb_collect_response['status'] == 'completed': self.log.debug(cb_collect_response) break else: # CB collect running, wait and check progress again retry += 1 time.sleep(10) self.log.info("Copying cbcollect ZIP file to Client") remote_client = RemoteMachineShellConnection(node) cb_collect_path = cb_collect_response['perNode'][ params['nodes']]['path'] zip_file_copied = remote_client.get_file( os.path.dirname(cb_collect_path), os.path.basename(cb_collect_path), log_path) self.log.info("%s node cb collect zip coped on client : %s" % (node.ip, zip_file_copied)) if zip_file_copied: remote_client.execute_command("rm -f %s" % cb_collect_path) remote_client.disconnect() else: self.log.error("API perform_cb_collect returned False")
def get_tar(remotepath, filepath, filename, servers, todir="."): if type(servers) is not list: servers = [servers] for server in servers: shell = RemoteMachineShellConnection(server) _ = shell.execute_command("tar -zcvf %s.tar.gz %s" % (filepath, filepath)) file_check = shell.file_exists(remotepath, filename) if not file_check: self.log.error( "Tar File {} doesn't exist".format(filename)) tar_file_copied = shell.get_file(remotepath, filename, todir) if not tar_file_copied: self.log.error("Failed to copy Tar file") _ = shell.execute_command("rm -rf %s.tar.gz" % filepath)
def run(self): remote_client = RemoteMachineShellConnection(self.server) now = datetime.now() day = now.day month = now.month year = now.year file_name = "%s-%s%s%s-diag.zip" % (self.server.ip, month, day, year) print "Collecting logs from %s\n" % (self.server.ip) output, error = remote_client.execute_cbcollect_info(file_name) print "\n".join(output) print "\n".join(error) user_path = "/home/" if self.server.ssh_username == "root": user_path = "/" if not remote_client.file_exists("%s%s" % (user_path, self.server.ssh_username), file_name): raise Exception("%s doesn't exists on server" % (file_name)) if remote_client.get_file("%s%s" % (user_path, self.server.ssh_username), file_name, "%s/%s" % (self.path, file_name)): print "Downloading zipped logs from %s" % (self.server.ip) else: raise Exception("Fail to download zipped logs from %s" % (self.server.ip)) remote_client.disconnect()
def get_all_stats(self): # One node, the 'master', should aggregate stats from client and server nodes # and push results to couchdb. # Assuming clients ssh_username/password are same as server # Save servers list as clients # Replace servers ip with clients ip clients = self.input.servers for i in range(len(clients)): clients[i].ip = self.input.clients[i] remotepath = '/tmp' i = 0 for client in clients: shell = RemoteMachineShellConnection(client) filename = '{0}.json'.format(i) destination = "{0}/{1}".format(os.getcwd(), filename) print "Getting client stats file {0} from {1}".format(filename, client) if not shell.get_file(remotepath, filename, destination): print "Unable to fetch the json file {0} on Client {1} @ {2}".format(remotepath+'/'+filename \ , i, client.ip) exit(1) i += 1 self.aggregate_all_stats(len(clients))
print "looking for erl_crash files under /opt/{0}/var/lib/{0}/".format(server_type) core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "erl_crash")) print "looking for core* files under /opt/{0}/var/lib/{0}/".format(server_type) core_files.extend(remote.file_starts_with("/opt/{0}/var/lib/{0}/".format(server_type), "core")) print "looking for core* files under /tmp/" core_files.extend(remote.file_starts_with("/tmp/", "core")) i = 0 for core_file in core_files: if core_file.find('erl_crash.dump') != -1: #let's just copy that file back erl_crash_file_name = "erlang-{0}-{1}.log".format(serverInfo.ip, i) erl_crash_path = "/opt/{0}/var/lib/{0}/{1}".format(server_type, erl_crash_file_name) remote.execute_command('cp {0} {1}'.format(core_file, erl_crash_path)) destination = "{0}/{1}".format(os.getcwd(), erl_crash_file_name) if remote.get_file(remotepath="/opt/{0}/var/lib/{0}/".format(server_type), filename=erl_crash_file_name, todir=destination): print 'downloaded core file : {0}'.format(destination) i += 1 else: command = "/opt/{0}/bin/analyze_core".format(server_type) core_file_name = "core-{0}-{1}.log".format(serverInfo.ip, i) core_log_output = "/{0}/{1}".format("tmp", core_file_name) output, error = remote.execute_command('{0} {1} -f {2}'.format(command, core_file, core_log_output)) print output destination = "{0}/{1}".format(os.getcwd(), core_file_name) if remote.get_file(remotepath="/tmp", filename=core_file_name, todir=destination): print 'downloaded core file : {0}'.format(destination) i += 1 if i > 0: remote.execute_command('mkdir -p /tmp/backup;mv -f /tmp/core* /tmp/backup/;')
core_files.extend(remote.file_starts_with("/tmp/", "core")) i = 0 for core_file in core_files: if core_file.find('erl_crash.dump') != -1: #let's just copy that file back erl_crash_file_name = "erlang-{0}-{1}.log".format( serverInfo.ip, i) erl_crash_path = "/opt/{0}/var/lib/{0}/{1}".format( server_type, erl_crash_file_name) remote.execute_command('cp {0} {1}'.format( core_file, erl_crash_path)) destination = "{0}/{1}".format(os.getcwd(), erl_crash_file_name) if remote.get_file( remotepath="/opt/{0}/var/lib/{0}/".format( server_type), filename=erl_crash_file_name, todir=destination): print 'downloaded core file : {0}'.format( destination) i += 1 else: command = "/opt/{0}/bin/analyze_core".format( server_type) core_file_name = "core-{0}-{1}.log".format( serverInfo.ip, i) core_log_output = "/{0}/{1}".format( "tmp", core_file_name) output, error = remote.execute_command( '{0} {1} -f {2}'.format(command, core_file, core_log_output))
def getRemoteFile(self, host, remotepath, filename): shell = RemoteMachineShellConnection(host) shell.get_file(remotepath, filename, audit.DOWNLOADPATH)