Ejemplo n.º 1
0
    def get(self, viewLength):
        """Handles a new error report via POST."""
        if viewLength != 'day':
            # TODO(robbyw): For viewLength == week or viewLength == month, aggregate the aggregates.
            viewLength = 'day'

        data = AggregatedStats.all().order('-date').get()
        data = json.loads(data.json)[:25]

        for _, row in data:
            logging.info(row)
            row['servers'] = sorted(row['servers'].items(),
                                    key=lambda x: x[1],
                                    reverse=True)
            row['environments'] = sorted(row['environments'].items(),
                                         key=lambda x: x[1],
                                         reverse=True)

        keys, values = zip(*data)
        errors = LoggedError.get([db.Key(key) for key in keys])

        context = {
            'title': 'Top 25 exceptions over the last %s' % viewLength,
            'errors': zip(errors, values),
            'total': len(data)
        }
        self.response.out.write(
            template.render(getTemplatePath('aggregation.html'), context))
Ejemplo n.º 2
0
  def doAuthenticatedGet(self, _, *args):
    key, = args
    self.response.headers['Content-Type'] = 'text/plain'
    error = LoggedError.get(key)
    error.active = False
    error.put()

    self.response.out.write('ok')
Ejemplo n.º 3
0
    def doAuthenticatedGet(self, _, *args):
        key, = args
        self.response.headers['Content-Type'] = 'text/plain'
        error = LoggedError.get(key)
        error.active = False
        error.put()

        self.response.out.write('ok')
Ejemplo n.º 4
0
 def doAuthenticatedGet(self, user, *args):
   key, = args
   self.response.headers['Content-Type'] = 'text/html'
   error = LoggedError.get(key)
   filters = getFilters(self.request)
   context = {
     'title': '%s - %s' % (error.lastMessage, NAME),
     'extraScripts': ['view'],
     'user': user,
     'error': error,
     'filters': filters.items(),
     'instances': getInstances(filters, parent=error)[:100]
   }
   self.response.out.write(template.render(getTemplatePath('view.html'), context))
Ejemplo n.º 5
0
 def doAuthenticatedGet(self, user, *args):
     key, = args
     self.response.headers['Content-Type'] = 'text/html'
     error = LoggedError.get(key)
     filters = getFilters(self.request)
     context = {
         'title': '%s - %s' % (error.lastMessage, NAME),
         'extraScripts': ['view'],
         'user': user,
         'error': error,
         'filters': filters.items(),
         'instances': getInstances(filters, parent=error)[:100]
     }
     self.response.out.write(
         template.render(getTemplatePath('view.html'), context))
Ejemplo n.º 6
0
  def get(self, viewLength):
    """Handles a new error report via POST."""
    if viewLength != 'day':
      # TODO(robbyw): For viewLength == week or viewLength == month, aggregate the aggregates.
      viewLength = 'day'

    data = AggregatedStats.all().order('-date').get()
    data = json.loads(data.json)[:25]

    for _, row in data:
      logging.info(row)
      row['servers'] = sorted(row['servers'].items(), key = lambda x: x[1], reverse=True)
      row['environments'] = sorted(row['environments'].items(), key = lambda x: x[1], reverse=True)

    keys, values = zip(*data)
    errors = LoggedError.get([db.Key(key) for key in keys])

    context = {
      'title': 'Top 25 exceptions over the last %s' % viewLength,
      'errors': zip(errors, values),
      'total': len(data)
    }
    self.response.out.write(template.render(getTemplatePath('aggregation.html'), context))
Ejemplo n.º 7
0
  def post(self): # pylint: disable=R0914, R0915
    """Handles a new error report via POST."""
    taskId = self.request.get('id', '0')
    currentId = memcache.get(AGGREGATION_ID)
    if taskId == 'None' or not (taskId == currentId or int(taskId) % 50 == 0):
      # Skip this task unless it is the most recently added or if it is one of every fifty tasks.
      logging.debug('Skipping task %s, current is %s', taskId, currentId)
      return

    q = taskqueue.Queue('aggregation')
    tasks = _getTasks(q)
    logging.info('Leased %d tasks', len(tasks))

    byError = collections.defaultdict(list)
    instanceKeys = []
    tasksByError = collections.defaultdict(list)
    for task in tasks:
      data = json.loads(task.payload)
      errorKey = data['error']
      if 'instance' in data and 'backtrace' in data:
        instanceKey = data['instance']
        byError[errorKey].append((instanceKey, data['backtrace']))
        instanceKeys.append(instanceKey)
        tasksByError[errorKey].append(task)
      elif 'aggregation' in data:
        byError[errorKey].append(data['aggregation'])
        tasksByError[errorKey].append(task)
      else:
        # Clean up any old tasks in the queue.
        logging.warn('Deleting an old task')
        q.delete_tasks([task])

    retries = 0
    instanceByKey = getInstanceMap(instanceKeys)
    for errorKey, instances in byError.items():
      instances = [keyOrDict
                      if isinstance(keyOrDict, dict)
                      else aggregateSingleInstance(instanceByKey[keyOrDict[0]], keyOrDict[1])
                   for keyOrDict in instances]
      aggregation = aggregateInstances(instances)

      success = False
      if _lockError(errorKey):
        try:
          error = LoggedError.get(errorKey)
          aggregate(
              error, aggregation.count, aggregation.firstOccurrence,
              aggregation.lastOccurrence, aggregation.lastMessage, aggregation.backtrace,
              aggregation.environments, aggregation.servers)
          error.put()
          logging.info('Successfully aggregated %r items for key %s', aggregation.count, errorKey)
          success = True
        except: # pylint: disable=W0702
          logging.exception('Error writing to data store for key %s.', errorKey)
        finally:
          _unlockError(errorKey)
      else:
        logging.info('Could not lock %s', errorKey)

      if not success:
        # Add a retry task.
        logging.info('Retrying aggregation for %d items for key %s', len(instances), errorKey)
        aggregation.firstOccurrence = str(aggregation.firstOccurrence)
        aggregation.lastOccurrence = str(aggregation.lastOccurrence)
        aggregation.environments = list(aggregation.environments)
        aggregation.servers = list(aggregation.servers)
        taskqueue.Queue('aggregation').add([
          taskqueue.Task(payload = json.dumps({'error': errorKey, 'aggregation': aggregation}), method='PULL')
        ])
        retries += 1

      q.delete_tasks(tasksByError[errorKey])

    if retries:
      logging.warn("Retrying %d tasks", retries)
      for _ in range(retries):
        queueAggregationWorker()
Ejemplo n.º 8
0
    def post(self):  # pylint: disable=R0914, R0915
        """Handles a new error report via POST."""
        taskId = self.request.get("id", "0")
        currentId = memcache.get(AGGREGATION_ID)
        if taskId == "None" or not (taskId == currentId or int(taskId) % 50 == 0):
            # Skip this task unless it is the most recently added or if it is one of every fifty tasks.
            logging.debug("Skipping task %s, current is %s", taskId, currentId)
            return

        q = taskqueue.Queue("aggregation")
        tasks = _getTasks(q)
        logging.info("Leased %d tasks", len(tasks))

        byError = collections.defaultdict(list)
        instanceKeys = []
        tasksByError = collections.defaultdict(list)
        for task in tasks:
            data = json.loads(task.payload)
            errorKey = data["error"]
            if "instance" in data and "backtrace" in data:
                instanceKey = data["instance"]
                byError[errorKey].append((instanceKey, data["backtrace"]))
                instanceKeys.append(instanceKey)
                tasksByError[errorKey].append(task)
            elif "aggregation" in data:
                byError[errorKey].append(data["aggregation"])
                tasksByError[errorKey].append(task)
            else:
                # Clean up any old tasks in the queue.
                logging.warn("Deleting an old task")
                q.delete_tasks([task])

        retries = 0
        instanceByKey = getInstanceMap(instanceKeys)
        for errorKey, instances in byError.items():
            instances = [
                keyOrDict
                if isinstance(keyOrDict, dict)
                else aggregateSingleInstance(instanceByKey[keyOrDict[0]], keyOrDict[1])
                for keyOrDict in instances
            ]
            aggregation = aggregateInstances(instances)

            success = False
            if _lockError(errorKey):
                try:
                    error = LoggedError.get(errorKey)
                    aggregate(
                        error,
                        aggregation.count,
                        aggregation.firstOccurrence,
                        aggregation.lastOccurrence,
                        aggregation.lastMessage,
                        aggregation.backtrace,
                        aggregation.environments,
                        aggregation.servers,
                    )
                    error.put()
                    logging.info("Successfully aggregated %r items for key %s", aggregation.count, errorKey)
                    success = True
                except:  # pylint: disable=W0702
                    logging.exception("Error writing to data store for key %s.", errorKey)
                finally:
                    _unlockError(errorKey)
            else:
                logging.info("Could not lock %s", errorKey)

            if not success:
                # Add a retry task.
                logging.info("Retrying aggregation for %d items for key %s", len(instances), errorKey)
                aggregation.firstOccurrence = str(aggregation.firstOccurrence)
                aggregation.lastOccurrence = str(aggregation.lastOccurrence)
                aggregation.environments = list(aggregation.environments)
                aggregation.servers = list(aggregation.servers)
                taskqueue.Queue("aggregation").add(
                    [taskqueue.Task(payload=json.dumps({"error": errorKey, "aggregation": aggregation}), method="PULL")]
                )
                retries += 1

            q.delete_tasks(tasksByError[errorKey])

        if retries:
            logging.warn("Retrying %d tasks", retries)
            for _ in range(retries):
                queueAggregationWorker()