def stream_log_for_task(self, args, task): uri = TAIL_LOG_FORMAT.format(logfetch_base.base_uri(args), task) path = '{0}/{1}'.format(task, args.logfile) keep_trying = True try: params = {"path": path} logfile_response = requests.get(uri, params=params, headers=args.headers) logfile_response.raise_for_status() offset = long(logfile_response.json()['offset']) except ValueError: sys.stderr.write( colored( 'Could not get initial offset for log in task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n' .format(task), 'red')) keep_trying = False except: sys.stderr.write( colored( 'Could not find log file at path {0} for task {1}, check your -l arg and try again\n' .format(args.logfile, task), 'red')) self.show_available_files(args, task) keep_trying = False while keep_trying: try: offset = self.fetch_new_log_data(uri, path, offset, args, task) time.sleep(5) except ValueError: sys.stderr.write( colored( 'Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n' .format(task), 'red')) keep_trying = False
def stream_log_for_task(self, args, task): uri = TAIL_LOG_FORMAT.format(logfetch_base.base_uri(args), task) path = '{0}/{1}'.format(task, args.logfile) keep_trying = True try: params = {"path" : path} logfile_response = requests.get(uri, params=params, headers=args.headers) logfile_response.raise_for_status() offset = long(logfile_response.json()['offset']) except ValueError: sys.stderr.write(colored('Could not get initial offset for log in task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red')) keep_trying = False except: sys.stderr.write(colored('Could not find log file at path {0} for task {1}, check your -l arg and try again\n'.format(args.logfile, task), 'red')) self.show_available_files(args, task) keep_trying = False while keep_trying: try: offset = self.fetch_new_log_data(uri, path, offset, args, task) time.sleep(5) except ValueError: sys.stderr.write(colored('Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red')) keep_trying = False except errors.NoTailDataError: sys.stderr.write(colored('Could not tail logs for task {0}, response had no data and was not a 2xx\n'.format(task), 'red')) sys.stderr.flush() keep_trying = False
def logs_folder_files(args, task): uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task) files_json = get_json_response(uri, args, {'path' : '{0}/logs'.format(task)}) if 'files' in files_json: files = files_json['files'] return [f['name'] for f in files if logfetch_base.is_in_date_range(args, f['mtime'])] else: return [f['path'].rsplit('/')[-1] for f in files_json if logfetch_base.is_in_date_range(args, f['mtime'])]
def logs_folder_files(args, task): uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task) files_json = logfetch_base.get_json_response(uri, args, {'path' : '{0}/logs'.format(task)}, True) if 'files' in files_json: files = files_json['files'] return [f['name'] for f in files if is_valid_live_log(args, f)] else: return [f['path'].rsplit('/')[-1] for f in files_json if is_valid_live_log(args, f)]
def base_directory_files(args, task): uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task) files_json = get_json_response(uri, args) if 'files' in files_json: files = files_json['files'] return [f['name'] for f in files if valid_logfile(f)] else: return [f['path'].rsplit('/')[-1] for f in files_json if valid_logfile(f)]
def logs_folder_files(args, task): uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task) files_json = logfetch_base.get_json_response(uri, args, {"path": "{0}/logs".format(task)}, True) if "files" in files_json: files = files_json["files"] return [f["name"] for f in files if logfetch_base.is_in_date_range(args, f["mtime"])] else: return [f["path"].rsplit("/")[-1] for f in files_json if logfetch_base.is_in_date_range(args, f["mtime"])]
def base_directory_files(args, task): uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task) files_json = get_json_response(uri, args) if 'files' in files_json: files = files_json['files'] return [f['name'] for f in files if valid_logfile(f)] else: return [ f['path'].rsplit('/')[-1] for f in files_json if valid_logfile(f) ]
def singularity_s3logs_uri(args): if args.taskId: singularity_path = TASK_FORMAT.format(args.taskId) elif args.deployId and args.requestId: singularity_path = DEPLOY_FORMAT.format(args.requestId, args.deployId) elif args.requestId: singularity_path = REQUEST_FORMAT.format(args.requestId) else: exit("Specify one of taskId, requestId and deployId, or requestId") singularity_uri = S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), singularity_path) return singularity_uri
def logs_folder_files(args, task): uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task) files_json = get_json_response(uri, {'path': '{0}/logs'.format(task)}) if 'files' in files_json: files = files_json['files'] return [ f['name'] for f in files if logfetch_base.is_in_date_range(args, f['mtime']) ] else: return [ f['path'].rsplit('/')[-1] for f in files_json if logfetch_base.is_in_date_range(args, f['mtime']) ]
def stream_log_for_task(self, args, task): uri = TAIL_LOG_FORMAT.format(logfetch_base.base_uri(args), task) path = '{0}/{1}'.format(task, args.logfile) keep_trying = True try: offset = self.get_initial_offset(uri, path) except ValueError: sys.stderr.write(colored('Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red')) keep_trying = False while keep_trying: try: offset = self.fetch_new_log_data(uri, path, offset, args, task) time.sleep(5) except ValueError: sys.stderr.write(colored('Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red')) keep_trying = False
def files_json(args, task): uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task) return get_json_response(uri, args)
def task_history(args, task): uri = TASK_HISTORY_FORMAT.format(logfetch_base.base_uri(args), task) return get_json_response(uri, args)
def s3_task_logs_uri(args, idString): return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), TASK_FORMAT.format(idString))
def s3_request_logs_uri(args, idString): return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), REQUEST_FORMAT.format(idString))
def singularity_s3logs_uri(args, idString): return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), TASK_FORMAT.format(idString))