コード例 #1
0
ファイル: api.py プロジェクト: CaeserNieh/hue
def get_log(request, oozie_workflow, make_links=True, log_start_pattern=None, log_end_pattern=None):
  logs = {}
  is_really_done = False

  for action in oozie_workflow.get_working_actions():
    try:
      if action.externalId:
        data = job_single_logs(request, **{'job': action.externalId})

        if data and 'logs' in data:
          action_logs = data['logs'][1]

          if log_start_pattern:
            re_log_start = re.compile(log_start_pattern, re.M | re.DOTALL)
            if re_log_start.search(action_logs):
              action_logs = re.search(re_log_start, action_logs).group(1).strip()
            else:
              LOG.debug('Failed to find given start log pattern in logs: %s' % log_start_pattern)

          if make_links:
            action_logs = LinkJobLogs._make_links(action_logs)

          logs[action.name] = action_logs

          if log_end_pattern:
            re_log_end = re.compile(log_end_pattern)
            is_really_done = re_log_end.search(action_logs) is not None
            if is_really_done and not action_logs:
              LOG.warn('Unable to scrape full logs, try increasing the jobbrowser log_offset configuration value.')
    except Exception, e:
      LOG.error('An error occurred while watching the job running: %(error)s' % {'error': e})
      is_really_done = True
コード例 #2
0
ファイル: views.py プロジェクト: cloudera/hue
def job_executor_logs(request, job, attempt_index=0, name='syslog', offset=LOG_OFFSET_BYTES):
  response = {'status': -1}
  try:
    log = ''
    if job.status not in ('NEW', 'SUBMITTED', 'ACCEPTED'):
      log = job.history_server_api.download_executors_logs(request, job, name, offset)
    response['status'] = 0
    response['log'] = LinkJobLogs._make_hdfs_links(log)
  except Exception, e:
    response['log'] = _('Failed to retrieve executor log: %s' % e)
コード例 #3
0
def job_executor_logs(request, job, attempt_index=0, name='syslog', offset=LOG_OFFSET_BYTES):
  response = {'status': -1}
  try:
    log = ''
    if job.status not in ('NEW', 'SUBMITTED', 'ACCEPTED'):
      log = job.history_server_api.download_executors_logs(request, job, name, offset)
    response['status'] = 0
    response['log'] = LinkJobLogs._make_hdfs_links(log)
  except Exception, e:
    response['log'] = _('Failed to retrieve executor log: %s' % e)
コード例 #4
0
def get_log(request,
            oozie_workflow,
            make_links=True,
            log_start_pattern=None,
            log_end_pattern=None):
    logs = {}
    is_really_done = False

    for action in oozie_workflow.get_working_actions():
        try:
            if action.externalId:
                data = job_single_logs(request, **{'job': action.externalId})

                if data and 'logs' in data:
                    action_logs = data['logs'][1]

                    if log_start_pattern:
                        re_log_start = re.compile(log_start_pattern,
                                                  re.M | re.DOTALL)
                        if re_log_start.search(action_logs):
                            action_logs = re.search(
                                re_log_start, action_logs).group(1).strip()
                        else:
                            LOG.debug(
                                'Failed to find given start log pattern in logs: %s'
                                % log_start_pattern)

                    if log_end_pattern:
                        re_log_end = re.compile(log_end_pattern)
                        is_really_done = re_log_end.search(
                            action_logs
                        ) is not None or oozie_workflow.status == 'KILLED'
                        if is_really_done and not action_logs:
                            LOG.warning(
                                'Unable to scrape full logs, try increasing the jobbrowser log_offset configuration value.'
                            )

                    if make_links:
                        action_logs = LinkJobLogs._make_links(action_logs)

                    logs[action.name] = action_logs

        except Exception:
            LOG.exception('An error occurred while watching the job running')
            is_really_done = True

    workflow_actions = _get_workflow_actions(oozie_workflow, logs,
                                             is_really_done)

    return logs, workflow_actions, is_really_done
コード例 #5
0
ファイル: api.py プロジェクト: zlcken/hue
  def get_log(self, request, oozie_workflow):
    logs = {}
    is_really_done = False

    for action in oozie_workflow.get_working_actions():
      try:
        if action.externalId:
          data = job_single_logs(request, **{'job': action.externalId})
          if data:
            matched_logs = self._match_logs(data)
            logs[action.name] = LinkJobLogs._make_links(matched_logs)
            is_really_done = OozieApi.RE_LOG_END.search(data['logs'][1]) is not None

      except Exception, e:
        LOG.error('An error happen while watching the job running: %(error)s' % {'error': e})
        is_really_done = True
コード例 #6
0
ファイル: api.py プロジェクト: mazensibai/hue
  def get_log(self, request, oozie_workflow):
    logs = {}
    is_really_done = False

    for action in oozie_workflow.get_working_actions():
      try:
        if action.externalId:
          data = job_single_logs(request, **{'job': action.externalId})

          if data and 'logs' in data:
            matched_logs = self._match_logs(data)

            if matched_logs:
              logs[action.name] = LinkJobLogs._make_links(matched_logs)

            is_really_done = OozieApi.RE_LOG_END.search(data['logs'][1]) is not None
            if is_really_done and not matched_logs:
              LOG.warn('Unable to scrape full pig logs, try increasing the jobbrowser log_offset configuration value.')
      except Exception, e:
        LOG.error('An error occurred while watching the job running: %(error)s' % {'error': e})
        is_really_done = True
コード例 #7
0
    def get_log(self, request, oozie_workflow):
        logs = {}
        is_really_done = False

        for action in oozie_workflow.get_working_actions():
            try:
                if action.externalId:
                    data = job_single_logs(request, **{"job": action.externalId})

                    if data and "logs" in data:
                        matched_logs = self._match_logs(data)

                        if matched_logs:
                            logs[action.name] = LinkJobLogs._make_links(matched_logs)

                        is_really_done = OozieApi.RE_LOG_END.search(data["logs"][1]) is not None
                        if is_really_done and not matched_logs:
                            LOG.warn(
                                "Unable to scrape full pig logs, try increasing the jobbrowser log_offset configuration value."
                            )
            except Exception, e:
                LOG.error("An error occurred while watching the job running: %(error)s" % {"error": e})
                is_really_done = True
コード例 #8
0
ファイル: views.py プロジェクト: ronwxy/hue
  root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
  debug_info = ''
  try:
    response = root.get(link, params=params)
    log = html.fromstring(response, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()
  except Exception, e:
    log = _('Failed to retrieve log: %s' % e)
    try:
      debug_info = '\nLog Link: %s' % log_link
      debug_info += '\nHTML Response: %s' % response
      LOGGER.error(debug_info)
    except:
      LOGGER.exception('failed to create debug info')

  response = {'log': LinkJobLogs._make_hdfs_links(log), 'debug': debug_info}

  return JsonResponse(response)



@check_job_permission
def job_single_logs(request, job):
  """
  Try to smartly detect the most useful task attempt (e.g. Oozie launcher, failed task) and get its MR logs.
  """
  def cmp_exec_time(task1, task2):
    return cmp(task1.execStartTimeMs, task2.execStartTimeMs)

  task = None
コード例 #9
0
        params = {}
        if offset != 0:
            params['start'] = offset

        root = Resource(get_log_client(log_link),
                        urlparse.urlsplit(log_link)[2],
                        urlencode=False)
        api_resp = None

        try:
            api_resp = root.get(link, params=params)
            log = html.fromstring(api_resp, parser=html.HTMLParser()).xpath(
                '/html/body/table/tbody/tr/td[2]')[0].text_content()

            response['status'] = 0
            response['log'] = LinkJobLogs._make_hdfs_links(log)
        except Exception, e:
            response['log'] = _('Failed to retrieve log: %s' % e)
            try:
                debug_info = '\nLog Link: %s' % log_link
                if api_resp:
                    debug_info += '\nHTML Response: %s' % response
                response['debug'] = debug_info
                LOG.error(debug_info)
            except:
                LOG.exception('failed to create debug info')

    return JsonResponse(response)


@check_job_permission
コード例 #10
0
ファイル: tests.py プロジェクト: guoqinga/hue
def test_make_log_links():
  """
   Unit test for models.LinkJobLogs._make_links
  """

  # FileBrowser
  assert_equal(
      """<a href="/filebrowser/view=/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a>  &lt;dir&gt;""",
      LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp  <dir>')
  )
  assert_equal(
      """<a href="/filebrowser/view=/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a>&lt;dir&gt;""",
      LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp<dir>')
  )
  assert_equal(
      """output: <a href="/filebrowser/view=/user/romain/tmp" target="_blank">/user/romain/tmp</a>  &lt;dir&gt;""",
      LinkJobLogs._make_links('output: /user/romain/tmp  <dir>')
  )
  assert_equal(
      'Successfully read 3760 records (112648 bytes) from: &quot;<a href="/filebrowser/view=/user/hue/pig/examples/data/midsummer.txt" target="_blank">/user/hue/pig/examples/data/midsummer.txt</a>&quot;',
      LinkJobLogs._make_links('Successfully read 3760 records (112648 bytes) from: "/user/hue/pig/examples/data/midsummer.txt"')
  )
  assert_equal(
      'data,upper_case  MAP_ONLY  <a href="/filebrowser/view=/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>,',
      LinkJobLogs._make_links('data,upper_case  MAP_ONLY  hdfs://localhost:8020/user/romain/out/fffff,')
  )
  assert_equal(
      'MAP_ONLY  <a href="/filebrowser/view=/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>\n2013',
      LinkJobLogs._make_links('MAP_ONLY  hdfs://localhost:8020/user/romain/out/fffff\n2013')
  )
  assert_equal(
      ' <a href="/filebrowser/view=/jobs.tsv" target="_blank">/jobs.tsv</a> ',
      LinkJobLogs._make_links(' /jobs.tsv ')
  )
  assert_equal(
      '<a href="/filebrowser/view=/user/romain/job_pos_2012.tsv" target="_blank">hdfs://localhost:8020/user/romain/job_pos_2012.tsv</a>',
      LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/job_pos_2012.tsv')
  )

  # JobBrowser
  assert_equal(
      """<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
      LinkJobLogs._make_links('job_201306261521_0058')
  )
  assert_equal(
      """Hadoop Job IDs executed by Pig: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
      LinkJobLogs._make_links('Hadoop Job IDs executed by Pig: job_201306261521_0058')
  )
  assert_equal(
      """MapReduceLauncher  - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
      LinkJobLogs._make_links('MapReduceLauncher  - HadoopJobId: job_201306261521_0058')
  )
  assert_equal(
      """- More information at: http://localhost:50030/jobdetails.jsp?jobid=<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
      LinkJobLogs._make_links('- More information at: http://localhost:50030/jobdetails.jsp?jobid=job_201306261521_0058')
  )
  assert_equal(
      """ Logging error messages to: job_201307091553_0028/attempt_201307091553_002""",
      LinkJobLogs._make_links(' Logging error messages to: job_201307091553_0028/attempt_201307091553_002')
  )
  assert_equal(
      """ pig-job_201307091553_0028.log""",
      LinkJobLogs._make_links(' pig-job_201307091553_0028.log')
  )
コード例 #11
0
  if log_link:
    link = '/%s/' % name
    params = {}
    if offset != 0:
      params['start'] = offset

    root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
    api_resp = None

    try:
      api_resp = root.get(link, params=params)
      log = html.fromstring(api_resp, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()

      response['status'] = 0
      response['log'] = LinkJobLogs._make_hdfs_links(log)
    except Exception, e:
      response['log'] = _('Failed to retrieve log: %s' % e)
      try:
        debug_info = '\nLog Link: %s' % log_link
        if api_resp:
          debug_info += '\nHTML Response: %s' % response
        response['debug'] = debug_info
        LOG.error(debug_info)
      except:
        LOG.exception('failed to create debug info')

  return JsonResponse(response)


@check_job_permission
コード例 #12
0
  if log_link:
    link = '/%s/' % name
    params = {}
    if offset != 0:
      params['start'] = offset

    root = Resource(get_log_client(log_link), urlparse.urlsplit(log_link)[2], urlencode=False)
    api_resp = None

    try:
      api_resp = root.get(link, params=params)
      log = html.fromstring(api_resp, parser=html.HTMLParser()).xpath('/html/body/table/tbody/tr/td[2]')[0].text_content()

      response['status'] = 0
      response['log'] = LinkJobLogs._make_hdfs_links(log)
    except Exception, e:
      response['log'] = _('Failed to retrieve log: %s' % e)
      try:
        debug_info = '\nLog Link: %s' % log_link
        if api_resp:
          debug_info += '\nHTML Response: %s' % response
        response['debug'] = debug_info
        LOG.error(debug_info)
      except:
        LOG.exception('failed to create debug info')

  return JsonResponse(response)


@check_job_permission
コード例 #13
0
ファイル: views.py プロジェクト: soongxin/hhue
def job_attempt_logs_json(request,
                          job,
                          attempt_index=0,
                          name='syslog',
                          offset=LOG_OFFSET_BYTES,
                          is_embeddable=False):
    """For async log retrieval as Yarn servers are very slow"""
    log_link = None
    response = {'status': -1}

    try:
        jt = get_api(request.user, request.jt)
        app = jt.get_application(job.jobId)

        if app['applicationType'] == 'MAPREDUCE':
            if app['finalStatus'] in ('SUCCEEDED', 'FAILED', 'KILLED'):
                attempt_index = int(attempt_index)
                if not job.job_attempts['jobAttempt']:
                    response = {'status': 0, 'log': _('Job has no tasks')}
                else:
                    attempt = job.job_attempts['jobAttempt'][attempt_index]

                    log_link = attempt['logsLink']
                    # Reformat log link to use YARN RM, replace node addr with node ID addr
                    log_link = log_link.replace(attempt['nodeHttpAddress'],
                                                attempt['nodeId'])
            elif app['state'] == 'RUNNING':
                log_link = app['amContainerLogs']
        elif app.get('amContainerLogs'):
            log_link = app.get('amContainerLogs')
    except (KeyError, RestException) as e:
        raise KeyError(
            _("Cannot find job attempt '%(id)s'.") % {'id': job.jobId}, e)
    except Exception as e:
        raise Exception(
            _("Failed to get application for job %s: %s") % (job.jobId, e))

    if log_link:
        link = '/%s/' % name
        params = {'doAs': request.user.username}

        if offset != 0:
            params['start'] = offset

        root = Resource(get_log_client(log_link),
                        urllib.parse.urlsplit(log_link)[2],
                        urlencode=False)
        api_resp = None

        try:
            api_resp = root.get(link, params=params)
            log = html.fromstring(api_resp, parser=html.HTMLParser()).xpath(
                '/html/body/table/tbody/tr/td[2]')[0].text_content()

            response['status'] = 0
            response['log'] = LinkJobLogs._make_hdfs_links(log, is_embeddable)
        except Exception as e:
            response['log'] = _('Failed to retrieve log: %s' % e)
            try:
                debug_info = '\nLog Link: %s' % log_link
                if api_resp:
                    debug_info += '\nHTML Response: %s' % response
                response['debug'] = debug_info
                LOG.error(debug_info)
            except:
                LOG.exception('failed to create debug info')

    return JsonResponse(response)
コード例 #14
0
ファイル: views.py プロジェクト: cquptEthan/hue
        response = root.get(link, params=params)
        log = (
            html.fromstring(response, parser=html.HTMLParser())
            .xpath("/html/body/table/tbody/tr/td[2]")[0]
            .text_content()
        )
    except Exception, e:
        log = _("Failed to retrieve log: %s" % e)
        try:
            debug_info = "\nLog Link: %s" % log_link
            debug_info += "\nHTML Response: %s" % response
            LOGGER.error(debug_info)
        except:
            LOGGER.exception("failed to create debug info")

    response = {"log": LinkJobLogs._make_hdfs_links(log), "debug": debug_info}

    return JsonResponse(response)


@check_job_permission
def job_single_logs(request, job):
    """
  Try to smartly detect the most useful task attempt (e.g. Oozie launcher, failed task) and get its MR logs.
  """

    def cmp_exec_time(task1, task2):
        return cmp(task1.execStartTimeMs, task2.execStartTimeMs)

    task = None
コード例 #15
0
ファイル: tests.py プロジェクト: ronwxy/hue
def test_make_log_links():
    """
   Unit test for models.LinkJobLogs._make_links
  """

    # FileBrowser
    assert_equal(
        """<a href="/filebrowser/view=/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a>  &lt;dir&gt;""",
        LinkJobLogs._make_links(
            'hdfs://localhost:8020/user/romain/tmp  <dir>'))
    assert_equal(
        """<a href="/filebrowser/view=/user/romain/tmp" target="_blank">hdfs://localhost:8020/user/romain/tmp</a>&lt;dir&gt;""",
        LinkJobLogs._make_links('hdfs://localhost:8020/user/romain/tmp<dir>'))
    assert_equal(
        """output: <a href="/filebrowser/view=/user/romain/tmp" target="_blank">/user/romain/tmp</a>  &lt;dir&gt;""",
        LinkJobLogs._make_links('output: /user/romain/tmp  <dir>'))
    assert_equal(
        'Successfully read 3760 records (112648 bytes) from: &quot;<a href="/filebrowser/view=/user/hue/pig/examples/data/midsummer.txt" target="_blank">/user/hue/pig/examples/data/midsummer.txt</a>&quot;',
        LinkJobLogs._make_links(
            'Successfully read 3760 records (112648 bytes) from: "/user/hue/pig/examples/data/midsummer.txt"'
        ))
    assert_equal(
        'data,upper_case  MAP_ONLY  <a href="/filebrowser/view=/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>,',
        LinkJobLogs._make_links(
            'data,upper_case  MAP_ONLY  hdfs://localhost:8020/user/romain/out/fffff,'
        ))
    assert_equal(
        'MAP_ONLY  <a href="/filebrowser/view=/user/romain/out/fffff" target="_blank">hdfs://localhost:8020/user/romain/out/fffff</a>\n2013',
        LinkJobLogs._make_links(
            'MAP_ONLY  hdfs://localhost:8020/user/romain/out/fffff\n2013'))
    assert_equal(
        ' <a href="/filebrowser/view=/jobs.tsv" target="_blank">/jobs.tsv</a> ',
        LinkJobLogs._make_links(' /jobs.tsv '))
    assert_equal(
        '<a href="/filebrowser/view=/user/romain/job_pos_2012.tsv" target="_blank">hdfs://localhost:8020/user/romain/job_pos_2012.tsv</a>',
        LinkJobLogs._make_links(
            'hdfs://localhost:8020/user/romain/job_pos_2012.tsv'))

    # JobBrowser
    assert_equal(
        """<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
        LinkJobLogs._make_links('job_201306261521_0058'))
    assert_equal(
        """Hadoop Job IDs executed by Pig: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
        LinkJobLogs._make_links(
            'Hadoop Job IDs executed by Pig: job_201306261521_0058'))
    assert_equal(
        """MapReduceLauncher  - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
        LinkJobLogs._make_links(
            'MapReduceLauncher  - HadoopJobId: job_201306261521_0058'))
    assert_equal(
        """- More information at: http://localhost:50030/jobdetails.jsp?jobid=<a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>""",
        LinkJobLogs._make_links(
            '- More information at: http://localhost:50030/jobdetails.jsp?jobid=job_201306261521_0058'
        ))
    assert_equal(
        """ Logging error messages to: <a href="/jobbrowser/jobs/job_201307091553_0028" target="_blank">job_201307091553_0028</a>/attempt_201307091553_002""",
        LinkJobLogs._make_links(
            ' Logging error messages to: job_201307091553_0028/attempt_201307091553_002'
        ))
    assert_equal(
        """ pig-<a href="/jobbrowser/jobs/job_201307091553_0028" target="_blank">job_201307091553_0028</a>.log""",
        LinkJobLogs._make_links(' pig-job_201307091553_0028.log'))
    assert_equal(
        """MapReduceLauncher  - HadoopJobId: <a href="/jobbrowser/jobs/job_201306261521_0058" target="_blank">job_201306261521_0058</a>. Look at the UI""",
        LinkJobLogs._make_links(
            'MapReduceLauncher  - HadoopJobId: job_201306261521_0058. Look at the UI'
        ))
コード例 #16
0
ファイル: views.py プロジェクト: luodongfu/XLS_BigData_Hue
                    urlencode=False)
    debug_info = ''
    try:
        response = root.get(link, params=params)
        log = html.fromstring(response, parser=html.HTMLParser()).xpath(
            '/html/body/table/tbody/tr/td[2]')[0].text_content()
    except Exception, e:
        log = _('Failed to retrieve log: %s' % e)
        try:
            debug_info = '\nLog Link: %s' % log_link
            debug_info += '\nHTML Response: %s' % response
            LOGGER.error(debug_info)
        except:
            LOGGER.exception('failed to create debug info')

    response = {'log': LinkJobLogs._make_hdfs_links(log), 'debug': debug_info}

    return JsonResponse(response)


@check_job_permission
def job_single_logs(request, job):
    """
  Try to smartly detect the most useful task attempt (e.g. Oozie launcher, failed task) and get its MR logs.
  """
    def cmp_exec_time(task1, task2):
        return cmp(task1.execStartTimeMs, task2.execStartTimeMs)

    task = None

    failed_tasks = job.filter_tasks(task_states=('failed', ))
コード例 #17
0
ファイル: views.py プロジェクト: soongxin/hhue
def single_task_attempt_logs(request,
                             job,
                             taskid,
                             attemptid,
                             offset=LOG_OFFSET_BYTES):
    jt = get_api(request.user, request.jt)

    job_link = jt.get_job_link(job.jobId)
    task = job_link.get_task(taskid)

    try:
        attempt = task.get_attempt(attemptid)
    except (KeyError, RestException) as e:
        raise KeyError(
            _("Cannot find attempt '%(id)s' in task") % {'id': attemptid}, e)

    first_log_tab = 0

    try:
        # Add a diagnostic log
        if hasattr(task, 'job') and hasattr(task.job, 'diagnostics'):
            diagnostic_log = task.job.diagnostics
        elif job_link.is_mr2:
            diagnostic_log = attempt.diagnostics
        else:
            diagnostic_log = ", ".join(task.diagnosticMap[attempt.attemptId])
        logs = [diagnostic_log]
        # Add remaining logs
        logs += [
            section.strip() for section in attempt.get_task_log(offset=offset)
        ]
        log_tab = [i for i, log in enumerate(logs) if log]
        if log_tab:
            first_log_tab = log_tab[0]
    except urllib.error.URLError:
        logs = [_("Failed to retrieve log. TaskTracker not ready.")] * 4

    context = {
        "attempt": attempt,
        "taskid": taskid,
        "joblnk": job_link,
        "task": task,
        "logs": logs,
        "logs_list": attempt.get_log_list(),
        "first_log_tab": first_log_tab,
    }

    if request.GET.get('format') == 'python':
        return context
    else:
        context['logs'] = [
            LinkJobLogs._make_links(log) for i, log in enumerate(logs)
        ]

    if request.GET.get('format') == 'json':
        response = {
            "logs": context['logs'],
            "logsList": context['logs_list'],
            "isRunning": job.status.lower() in ('running', 'pending', 'prep')
        }
        return JsonResponse(response)
    else:
        return render("attempt_logs.mako", request, context)