예제 #1
0
파일: tail.py 프로젝트: wzhx78/Singularity
 def stream_log_for_task(self, args, task):
     uri = TAIL_LOG_FORMAT.format(logfetch_base.base_uri(args), task)
     path = '{0}/{1}'.format(task, args.logfile)
     keep_trying = True
     try:
         params = {"path": path}
         logfile_response = requests.get(uri,
                                         params=params,
                                         headers=args.headers)
         logfile_response.raise_for_status()
         offset = long(logfile_response.json()['offset'])
     except ValueError:
         sys.stderr.write(
             colored(
                 'Could not get initial offset for log in task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'
                 .format(task), 'red'))
         keep_trying = False
     except:
         sys.stderr.write(
             colored(
                 'Could not find log file at path {0} for task {1}, check your -l arg and try again\n'
                 .format(args.logfile, task), 'red'))
         self.show_available_files(args, task)
         keep_trying = False
     while keep_trying:
         try:
             offset = self.fetch_new_log_data(uri, path, offset, args, task)
             time.sleep(5)
         except ValueError:
             sys.stderr.write(
                 colored(
                     'Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'
                     .format(task), 'red'))
             keep_trying = False
예제 #2
0
파일: tail.py 프로젝트: HubSpot/Singularity
 def stream_log_for_task(self, args, task):
     uri = TAIL_LOG_FORMAT.format(logfetch_base.base_uri(args), task)
     path = '{0}/{1}'.format(task, args.logfile)
     keep_trying = True
     try:
         params = {"path" : path}
         logfile_response = requests.get(uri, params=params, headers=args.headers)
         logfile_response.raise_for_status()
         offset = long(logfile_response.json()['offset'])
     except ValueError:
         sys.stderr.write(colored('Could not get initial offset for log in task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red'))
         keep_trying = False
     except:
         sys.stderr.write(colored('Could not find log file at path {0} for task {1}, check your -l arg and try again\n'.format(args.logfile, task), 'red'))
         self.show_available_files(args, task)
         keep_trying = False
     while keep_trying:
         try:
             offset = self.fetch_new_log_data(uri, path, offset, args, task)
             time.sleep(5)
         except ValueError:
             sys.stderr.write(colored('Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red'))
             keep_trying = False
         except errors.NoTailDataError:
             sys.stderr.write(colored('Could not tail logs for task {0}, response had no data and was not a 2xx\n'.format(task), 'red'))
             sys.stderr.flush()
             keep_trying = False
예제 #3
0
def logs_folder_files(args, task):
  uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
  files_json = get_json_response(uri, args, {'path' : '{0}/logs'.format(task)})
  if 'files' in files_json:
    files = files_json['files']
    return [f['name'] for f in files if logfetch_base.is_in_date_range(args, f['mtime'])]
  else:
    return [f['path'].rsplit('/')[-1] for f in files_json if logfetch_base.is_in_date_range(args, f['mtime'])]
예제 #4
0
def logs_folder_files(args, task):
    uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
    files_json = logfetch_base.get_json_response(uri, args, {'path' : '{0}/logs'.format(task)}, True)
    if 'files' in files_json:
        files = files_json['files']
        return [f['name'] for f in files if is_valid_live_log(args, f)]
    else:
        return [f['path'].rsplit('/')[-1] for f in files_json if is_valid_live_log(args, f)]
예제 #5
0
def base_directory_files(args, task):
  uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
  files_json = get_json_response(uri, args)
  if 'files' in files_json:
    files = files_json['files']
    return [f['name'] for f in files if valid_logfile(f)]
  else:
    return [f['path'].rsplit('/')[-1] for f in files_json if valid_logfile(f)]
예제 #6
0
def logs_folder_files(args, task):
    uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
    files_json = logfetch_base.get_json_response(uri, args, {"path": "{0}/logs".format(task)}, True)
    if "files" in files_json:
        files = files_json["files"]
        return [f["name"] for f in files if logfetch_base.is_in_date_range(args, f["mtime"])]
    else:
        return [f["path"].rsplit("/")[-1] for f in files_json if logfetch_base.is_in_date_range(args, f["mtime"])]
예제 #7
0
def logs_folder_files(args, task):
    uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
    files_json = logfetch_base.get_json_response(uri, args, {'path' : '{0}/logs'.format(task)}, True)
    if 'files' in files_json:
        files = files_json['files']
        return [f['name'] for f in files if is_valid_live_log(args, f)]
    else:
        return [f['path'].rsplit('/')[-1] for f in files_json if is_valid_live_log(args, f)]
예제 #8
0
파일: tail.py 프로젝트: wzhx78/Singularity
def base_directory_files(args, task):
    uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
    files_json = get_json_response(uri, args)
    if 'files' in files_json:
        files = files_json['files']
        return [f['name'] for f in files if valid_logfile(f)]
    else:
        return [
            f['path'].rsplit('/')[-1] for f in files_json if valid_logfile(f)
        ]
예제 #9
0
def singularity_s3logs_uri(args):
  if args.taskId:
    singularity_path = TASK_FORMAT.format(args.taskId)
  elif args.deployId and args.requestId:
    singularity_path = DEPLOY_FORMAT.format(args.requestId, args.deployId)
  elif args.requestId:
    singularity_path = REQUEST_FORMAT.format(args.requestId)
  else:
    exit("Specify one of taskId, requestId and deployId, or requestId")
  singularity_uri = S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), singularity_path)

  return singularity_uri
예제 #10
0
def logs_folder_files(args, task):
    uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
    files_json = get_json_response(uri, {'path': '{0}/logs'.format(task)})
    if 'files' in files_json:
        files = files_json['files']
        return [
            f['name'] for f in files
            if logfetch_base.is_in_date_range(args, f['mtime'])
        ]
    else:
        return [
            f['path'].rsplit('/')[-1] for f in files_json
            if logfetch_base.is_in_date_range(args, f['mtime'])
        ]
예제 #11
0
 def stream_log_for_task(self, args, task):
   uri = TAIL_LOG_FORMAT.format(logfetch_base.base_uri(args), task)
   path = '{0}/{1}'.format(task, args.logfile)
   keep_trying = True
   try:
     offset = self.get_initial_offset(uri, path)
   except ValueError:
     sys.stderr.write(colored('Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red'))
     keep_trying = False
   while keep_trying:
     try:
       offset = self.fetch_new_log_data(uri, path, offset, args, task)
       time.sleep(5)
     except ValueError:
       sys.stderr.write(colored('Could not tail logs for task {0}, check that the task is still active and that the slave it runs on has not been decommissioned\n'.format(task), 'red'))
       keep_trying = False
예제 #12
0
def files_json(args, task):
  uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
  return get_json_response(uri, args)
예제 #13
0
def task_history(args, task):
  uri = TASK_HISTORY_FORMAT.format(logfetch_base.base_uri(args), task)
  return get_json_response(uri, args)
예제 #14
0
def files_json(args, task):
    uri = BROWSE_FOLDER_FORMAT.format(logfetch_base.base_uri(args), task)
    return get_json_response(uri, args)
예제 #15
0
def s3_task_logs_uri(args, idString):
    return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args),
                                    TASK_FORMAT.format(idString))
예제 #16
0
def s3_task_logs_uri(args, idString):
  return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), TASK_FORMAT.format(idString))
예제 #17
0
def s3_request_logs_uri(args, idString):
  return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), REQUEST_FORMAT.format(idString))
예제 #18
0
def task_history(args, task):
    uri = TASK_HISTORY_FORMAT.format(logfetch_base.base_uri(args), task)
    return get_json_response(uri, args)
예제 #19
0
def s3_request_logs_uri(args, idString):
    return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args),
                                    REQUEST_FORMAT.format(idString))
예제 #20
0
def singularity_s3logs_uri(args, idString):
  return S3LOGS_URI_FORMAT.format(logfetch_base.base_uri(args), TASK_FORMAT.format(idString))