def main(): """Runs the aggregation.""" logging.info('running the cron') now = datetime.now() oneDayAgo = now - timedelta(days = 1) aggregation = collections.defaultdict(entry) count = 0 query = lambda: LoggedErrorInstance.all().filter('date >=', oneDayAgo) for instance in retryingIter(query): aggregate(aggregation, instance) count += 1 if not count % 500: logging.info('Finished %d items', count) result = sorted(aggregation.items(), key=lambda item: item[1]['count'], reverse=True) logging.info('Finished first day of data') # query = lambda: LoggedErrorInstance.all().filter('date <', oneDayAgo).filter('date >=', oneWeekAgo) # for instance in retryingIter(query): # aggregate(aggregation, instance) # count += 1 # if not count % 500: # logging.info('Finished %d items', count) # result['week'] = sorted(aggregation.items(), key=lambda item: item[1]['count'], reverse=True) # # logging.info('Finished first week of data') stat = AggregatedStats() stat.date = now stat.json = json.dumps(result) stat.put() logging.info('Put aggregate')
def doAuthenticatedGet(self, _): if users.is_current_user_admin(): for error in LoggedError.all(): error.delete() for instance in LoggedErrorInstance.all(): instance.delete() self.response.out.write('Done') else: self.redirect(users.create_login_url(self.request.uri))
def getInstances(filters, parent = None, limit = None, offset = None): """Gets a list of instances of the given parent error, filtered by the given filters.""" query = LoggedErrorInstance.all() if parent: query = query.filter('error =', parent) if filters: for key, value in filters.items(): if key in INSTANCE_FILTERS: query = filterInstances(query, key, value) elif key == 'project' and not parent: query = query.filter('project =', getProject(value)) return query.order('-date').fetch(limit or 51, offset or 0)
def getInstances(filters, parent=None, limit=None, offset=None): """Gets a list of instances of the given parent error, filtered by the given filters.""" query = LoggedErrorInstance.all() if parent: query = query.filter('error =', parent) if filters: for key, value in filters.items(): if key in INSTANCE_FILTERS: query = filterInstances(query, key, value) elif key == 'project' and not parent: query = query.filter('project =', getProject(value)) return query.order('-date').fetch(limit or 51, offset or 0)
def get(self): """Handles a new error report via POST.""" key = self.request.get('key') if key != SECRET_KEY: self.error(403) return counts = [] project = self.request.get('project') if project: project = getProject(project) if not project: self.response.out.write(' '.join(['0' for _ in counts])) for minutes in self.request.get('minutes').split(): query = LoggedErrorInstance.all() if project: query = query.ancestor(project) counts.append(query.filter('date >=', datetime.now() - timedelta(minutes = int(minutes))).count()) self.response.out.write(' '.join((str(count) for count in counts)))
def main(): """Runs the aggregation.""" logging.info('running the cron') now = datetime.now() oneDayAgo = now - timedelta(days=1) aggregation = collections.defaultdict(entry) count = 0 query = lambda: LoggedErrorInstance.all().filter('date >=', oneDayAgo) for instance in retryingIter(query): aggregate(aggregation, instance) count += 1 if not count % 500: logging.info('Finished %d items', count) result = sorted(aggregation.items(), key=lambda item: item[1]['count'], reverse=True) logging.info('Finished first day of data') # query = lambda: LoggedErrorInstance.all().filter('date <', oneDayAgo).filter('date >=', oneWeekAgo) # for instance in retryingIter(query): # aggregate(aggregation, instance) # count += 1 # if not count % 500: # logging.info('Finished %d items', count) # result['week'] = sorted(aggregation.items(), key=lambda item: item[1]['count'], reverse=True) # # logging.info('Finished first week of data') stat = AggregatedStats() stat.date = now stat.json = json.dumps(result) stat.put() logging.info('Put aggregate')
def get(self): """Handles a new error report via POST.""" key = self.request.get('key') if key != SECRET_KEY: self.error(403) return counts = [] project = self.request.get('project') if project: project = getProject(project) if not project: self.response.out.write(' '.join(['0' for _ in counts])) for minutes in self.request.get('minutes').split(): query = LoggedErrorInstance.all() if project: query = query.ancestor(project) counts.append( query.filter('date >=', datetime.now() - timedelta(minutes=int(minutes))).count()) self.response.out.write(' '.join((str(count) for count in counts)))
def getInstanceMap(instanceKeys): """Gets a map from key to instance for the given keys.""" instances = LoggedErrorInstance.get(instanceKeys) return dict(zip(instanceKeys, instances))
def _putInstance(exception): """Put an exception in the data store.""" backtraceText = exception.get('backtrace') or '' environment = exception.get('environment', 'Unknown') message = exception['message'] or '' project = exception['project'] server = exception['serverName'] timestamp = datetime.fromtimestamp(exception['timestamp']) exceptionType = exception.get('type') or '' logMessage = exception.get('logMessage') context = exception.get('context') errorLevel = exception.get('errorLevel') errorHash = generateHash(exceptionType, backtraceText) error = getAggregatedError(project, errorHash) exceptionType = exceptionType.replace('\n', ' ') if len(exceptionType) > 500: exceptionType = exceptionType[:500] exceptionType = exceptionType.replace('\n', ' ') needsAggregation = True if not error: error = LoggedError( project = getProject(project), backtrace = backtraceText, type = exceptionType, hash = errorHash, active = True, errorLevel = errorLevel, count = 1, firstOccurrence = timestamp, lastOccurrence = timestamp, lastMessage = message[:300], environments = [str(environment)], servers = [server]) error.put() needsAggregation = False instance = LoggedErrorInstance( project = error.project, error = error, environment = environment, type = exceptionType, errorLevel = errorLevel, date = timestamp, message = message, server = server, logMessage = logMessage) if context: instance.context = json.dumps(context) if 'userId' in context: try: instance.affectedUser = int(context['userId']) except (TypeError, ValueError): pass instance.put() if needsAggregation: queueAggregation(error, instance, backtraceText)
def _putInstance(exception): """Put an exception in the data store.""" backtraceText = exception.get("backtrace") or "" environment = exception.get("environment", "Unknown") message = exception["message"] or "" project = exception["project"] server = exception["serverName"] timestamp = datetime.fromtimestamp(exception["timestamp"]) exceptionType = exception.get("type") or "" logMessage = exception.get("logMessage") context = exception.get("context") errorLevel = exception.get("errorLevel") errorHash = generateHash(exceptionType, backtraceText) error = getAggregatedError(project, errorHash) exceptionType = exceptionType.replace("\n", " ") if len(exceptionType) > 500: exceptionType = exceptionType[:500] exceptionType = exceptionType.replace("\n", " ") needsAggregation = True if not error: error = LoggedError( project=getProject(project), backtrace=backtraceText, type=exceptionType, hash=errorHash, active=True, errorLevel=errorLevel, count=1, firstOccurrence=timestamp, lastOccurrence=timestamp, lastMessage=message[:300], environments=[str(environment)], servers=[server], ) error.put() needsAggregation = False instance = LoggedErrorInstance( project=error.project, error=error, environment=environment, type=exceptionType, errorLevel=errorLevel, date=timestamp, message=message, server=server, logMessage=logMessage, ) if context: instance.context = json.dumps(context) if "userId" in context: try: instance.affectedUser = int(context["userId"]) except (TypeError, ValueError): pass instance.put() if needsAggregation: queueAggregation(error, instance, backtraceText)