def remove_old_logs(self, log_timeout): """ Removes old logs. Args: log_timeout: The timeout value in seconds. Returns: True on success, False otherwise. """ self.register_db_accessor(constants.DASHBOARD_APP_ID) if log_timeout: timeout = datetime.datetime.utcnow() - \ datetime.timedelta(seconds=log_timeout) query = RequestLogLine.query(RequestLogLine.timestamp < timeout) logging.error("The timeout time is {0}".format(timeout)) else: query = RequestLogLine.query() counter = 0 logging.debug("The current time is {0}".format(datetime.datetime.utcnow())) for entity in query.iter(): logging.debug("Removing {0}".format(entity)) entity.key.delete() counter += 1 logging.error("Removed {0} log entries.".format(counter)) return True
def remove_old_logs(self, log_timeout): """ Removes old logs. Args: log_timeout: The timeout value in seconds. Returns: True on success, False otherwise. """ self.register_db_accessor(constants.DASHBOARD_APP_ID) if log_timeout: timeout = datetime.datetime.utcnow() - \ datetime.timedelta(seconds=log_timeout) query = RequestLogLine.query(RequestLogLine.timestamp < timeout) logging.debug("The timeout time is {0}".format(timeout)) else: query = RequestLogLine.query() counter = 0 logging.debug("The current time is {0}".format( datetime.datetime.utcnow())) for entity in query.iter(): logging.debug("Removing {0}".format(entity)) entity.key.delete() counter += 1 logging.info("Removed {0} log entries.".format(counter)) return True
def post(self): """ Saves logs records to the Datastore for later viewing. """ encoded_data = self.request.body data = json.loads(encoded_data) service_name = data['service_name'] host = data['host'] log_lines = data['logs'] # First, check to see if this service has been registered. service = LoggedService.get_by_id(service_name) if service is None: service = LoggedService(id=service_name) service.hosts = [host] service.put() else: if host not in service.hosts: service.hosts.append(host) service.put() # Next, add in each log line as an AppLogLine entities_to_store = {} for log_line_dict in log_lines: the_time = int(log_line_dict['timestamp']) reversed_time = (2**34 - the_time) * 1000000 key_name = service_name + host + str(reversed_time) log_line = None # Check the local cache first. if key_name in entities_to_store: log_line = entities_to_store[key_name] else: # Grab it from the datastore. log_line = RequestLogLine.get_by_id(id=key_name) if not log_line: # This is the first log for this timestamp. log_line = RequestLogLine(id=key_name) log_line.service_name = service_name log_line.host = host # Catch entity so that it does not repeatedly get fetched. entities_to_store[key_name] = log_line # Update the log entry with the given timestamp. app_log_line = AppLogLine() app_log_line.message = log_line_dict['message'] app_log_line.level = log_line_dict['level'] app_log_line.timestamp = datetime.datetime.fromtimestamp(the_time) # We append to the list property of the log line. log_line.app_logs.append(app_log_line) # Update our local cache with the new version of the log line. entities_to_store[key_name] = log_line batch_put = [] for key_name in entities_to_store: batch_put.append(entities_to_store[key_name]) ndb.put_multi(batch_put)
def post(self): """ Saves logs records to the Datastore for later viewing. """ encoded_data = self.request.body data = json.loads(encoded_data) service_name = data["service_name"] host = data["host"] log_lines = data["logs"] # First, check to see if this service has been registered. service = LoggedService.get_by_id(service_name) if service is None: service = LoggedService(id=service_name) service.hosts = [host] service.put() else: if host not in service.hosts: service.hosts.append(host) service.put() # Next, add in each log line as an AppLogLine entities_to_store = {} for log_line_dict in log_lines: the_time = int(log_line_dict["timestamp"]) reversed_time = (2 ** 34 - the_time) * 1000000 key_name = service_name + host + str(reversed_time) log_line = None # Check the local cache first. if key_name in entities_to_store: log_line = entities_to_store[key_name] else: # Grab it from the datastore. log_line = RequestLogLine.get_by_id(id=key_name) if not log_line: # This is the first log for this timestamp. log_line = RequestLogLine(id=key_name) log_line.service_name = service_name log_line.host = host # Catch entity so that it does not repeatedly get fetched. entities_to_store[key_name] = log_line # Update the log entry with the given timestamp. app_log_line = AppLogLine() app_log_line.message = log_line_dict["message"] app_log_line.level = log_line_dict["level"] app_log_line.timestamp = datetime.datetime.fromtimestamp(the_time) # We append to the list property of the log line. log_line.app_logs.append(app_log_line) # Update our local cache with the new version of the log line. entities_to_store[key_name] = log_line batch_put = [] for key_name in entities_to_store: batch_put.append(entities_to_store[key_name]) ndb.put_multi(batch_put)
def get(self, service_name, host): """ Displays all logs accumulated for the given service, on the named host. Specifying 'all' as the host indicates that we shouldn't restrict ourselves to a single machine. """ is_cloud_admin = self.helper.is_user_cloud_admin() apps_user_is_admin_on = self.helper.get_owned_apps() if (not is_cloud_admin) and (service_name not in apps_user_is_admin_on): self.redirect(DashPage.PATH, self.response) return encoded_cursor = self.request.get('next_cursor') if encoded_cursor and encoded_cursor != "None": start_cursor = Cursor(urlsafe=encoded_cursor) else: start_cursor = None if host == "all": query, next_cursor, is_more = RequestLogLine.query( RequestLogLine.service_name == service_name).fetch_page( self.LOGS_PER_PAGE, produce_cursors=True, start_cursor=start_cursor) else: query, next_cursor, is_more = RequestLogLine.query( RequestLogLine.service_name == service_name, RequestLogLine.host == host).fetch_page( self.LOGS_PER_PAGE, produce_cursors=True, start_cursor=start_cursor) if next_cursor: cursor_value = next_cursor.urlsafe() else: cursor_value = None self.render_app_page(page='logs', values={ 'service_name': service_name, 'host': host, 'query': query, 'next_cursor': cursor_value, 'is_more': is_more, 'page_content': self.TEMPLATE, })
def remove_old_logs(self, log_timeout): """ Removes old logs. Args: log_timeout: The timeout value in seconds. Returns: True on success, False otherwise. """ # If we have state information beyond what function to use, # load the last seen cursor. if (len(self.groomer_state) > 1 and self.groomer_state[0] == self.CLEAN_LOGS_TASK): last_cursor = Cursor(self.groomer_state[1]) else: last_cursor = None self.register_db_accessor(constants.DASHBOARD_APP_ID) if log_timeout: timeout = (datetime.datetime.utcnow() - datetime.timedelta(seconds=log_timeout)) query = RequestLogLine.query(RequestLogLine.timestamp < timeout) logging.debug("The timeout time is {0}".format(timeout)) else: query = RequestLogLine.query() counter = 0 logging.debug("The current time is {0}".format(datetime.datetime.utcnow())) while True: entities, next_cursor, more = query.fetch_page(self.BATCH_SIZE, start_cursor=last_cursor) for entity in entities: logging.debug("Removing {0}".format(entity)) entity.key.delete() counter += 1 if time.time() > self.last_logged + self.LOG_PROGRESS_FREQUENCY: logging.info('Removed {} log entries.'.format(counter)) self.last_logged = time.time() if more: last_cursor = next_cursor self.update_groomer_state([self.CLEAN_LOGS_TASK, last_cursor.urlsafe()]) else: break logging.info("Removed {0} log entries.".format(counter)) return True
def get(self, service_name, host): """ Displays all logs accumulated for the given service, on the named host. Specifying 'all' as the host indicates that we shouldn't restrict ourselves to a single machine. """ is_cloud_admin = self.helper.is_user_cloud_admin() apps_user_is_admin_on = self.helper.get_owned_apps() if (not is_cloud_admin) and (service_name not in apps_user_is_admin_on): self.redirect(StatusPage.PATH, self.response) encoded_cursor = self.request.get("next_cursor") if encoded_cursor and encoded_cursor != "None": start_cursor = Cursor(urlsafe=encoded_cursor) else: start_cursor = None if host == "all": query, next_cursor, is_more = RequestLogLine.query(RequestLogLine.service_name == service_name).fetch_page( self.LOGS_PER_PAGE, produce_cursors=True, start_cursor=start_cursor ) else: query, next_cursor, is_more = RequestLogLine.query( RequestLogLine.service_name == service_name, RequestLogLine.host == host ).fetch_page(self.LOGS_PER_PAGE, produce_cursors=True, start_cursor=start_cursor) if next_cursor: cursor_value = next_cursor.urlsafe() else: cursor_value = None self.render_page( page="logs", template_file=self.TEMPLATE, values={ "service_name": service_name, "host": host, "query": query, "next_cursor": cursor_value, "is_more": is_more, }, )
def get(self, service_name, host): """ Displays all logs accumulated for the given service, on the named host. Specifying 'all' as the host indicates that we shouldn't restrict ourselves to a single machine. """ is_cloud_admin = self.helper.is_user_cloud_admin() apps_user_is_admin_on = self.helper.get_owned_apps() if (not is_cloud_admin) and (service_name not in apps_user_is_admin_on): self.redirect(DashPage.PATH, self.response) return encoded_cursor = self.request.get('next_cursor') if encoded_cursor and encoded_cursor != "None": start_cursor = Cursor(urlsafe=encoded_cursor) else: start_cursor = None if host == "all": query, next_cursor, is_more = RequestLogLine.query( RequestLogLine.service_name == service_name).fetch_page( self.LOGS_PER_PAGE, produce_cursors=True, start_cursor=start_cursor) else: query, next_cursor, is_more = RequestLogLine.query( RequestLogLine.service_name == service_name, RequestLogLine.host == host).fetch_page(self.LOGS_PER_PAGE, produce_cursors=True, start_cursor=start_cursor) if next_cursor: cursor_value = next_cursor.urlsafe() else: cursor_value = None self.render_app_page(page='logs', values={ 'service_name': service_name, 'host': host, 'query': query, 'next_cursor': cursor_value, 'is_more': is_more, 'page_content': self.TEMPLATE, })