def _flush(self): """Internal version of flush() with no locking.""" logs = self.parse_logs() self._clear() first_iteration = True while logs or first_iteration: first_iteration = False request = log_service_pb.FlushRequest() group = log_service_pb.UserAppLogGroup() byte_size = 0 n = 0 for entry in logs: if len(entry[2]) > LogsBuffer._MAX_LINE_SIZE: entry = list(entry) entry[2] = self._truncate(entry[2], LogsBuffer._MAX_LINE_SIZE) if byte_size + len(entry[2]) > LogsBuffer._MAX_FLUSH_SIZE: break line = group.add_log_line() line.set_timestamp_usec(entry[0]) line.set_level(entry[1]) line.set_message(entry[2]) byte_size += 1 + group.lengthString(line.ByteSize()) n += 1 assert n > 0 or not logs logs = logs[n:] request.set_logs(group.Encode()) response = api_base_pb.VoidProto() apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response)
def _flush(self): """Internal version of flush() with no locking.""" logs = self.parse_logs() self._clear() while True: group = log_service_pb.UserAppLogGroup() byte_size = 0 n = 0 for timestamp_usec, level, message, unused_source_location in logs: message = self._truncate(message, self._MAX_LINE_SIZE) if byte_size + len(message) > self._MAX_FLUSH_SIZE: break line = group.add_log_line() line.set_timestamp_usec(timestamp_usec) line.set_level(level) line.set_message(message) byte_size += 1 + group.lengthString(line.ByteSize()) n += 1 assert n > 0 or not logs logs = logs[n:] request = log_service_pb.FlushRequest() request.set_logs(group.Encode()) response = api_base_pb.VoidProto() apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response) if not logs: break
def _flush(self): """Internal version of flush() with no locking.""" records_to_be_flushed = [] try: while True: group = log_service_pb.UserAppLogGroup() bytes_left = self._MAX_FLUSH_SIZE while self._buffer: record = self._get_record() if record.IsBlank(): continue message = self._clean(record.message) message = self._truncate(message, self._MAX_LINE_SIZE) if len(message) > bytes_left: self._rollback_record(record) break records_to_be_flushed.append(record) line = group.add_log_line() line.set_timestamp_usec(record.created) line.set_level(record.level) if record.source_location is not None: line.mutable_source_location().set_file(record.source_location[0]) line.mutable_source_location().set_line(record.source_location[1]) line.mutable_source_location().set_function_name( record.source_location[2]) line.set_message(message) bytes_left -= 1 + group.lengthString(line.ByteSize()) request = log_service_pb.FlushRequest() request.set_logs(group.Encode()) response = api_base_pb.VoidProto() apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response) if not self._buffer: break except apiproxy_errors.CancelledError: records_to_be_flushed.reverse() self._buffer.extendleft(records_to_be_flushed) except Exception, e: records_to_be_flushed.reverse() self._buffer.extendleft(records_to_be_flushed) line = '-' * 80 msg = 'ERROR: Could not flush to log_service (%s)\n%s\n%s\n%s\n' _sys_stderr.write(msg % (e, line, self._contents(), line)) self._clear() raise
def _flush(self): """Internal version of flush() with no locking.""" if self._stderr: sys.stderr.flush() return lines_to_be_flushed = [] try: while True: group = log_service_pb.UserAppLogGroup() bytes_left = LogsBufferNew._MAX_FLUSH_SIZE while self._buffer: bare_line = self._get_line() timestamp_usec, level, message = logsutil.ParseLogEntry( bare_line) if message[-1] == '\n': message = message[:-1] if not message: continue message = LogsBufferNew._truncate( message, LogsBufferNew._MAX_LINE_SIZE) if len(message) > bytes_left: self._rollback_line(bare_line) break lines_to_be_flushed.append(bare_line) line = group.add_log_line() line.set_timestamp_usec(timestamp_usec) line.set_level(level) line.set_message(message) bytes_left -= 1 + group.lengthString(line.ByteSize()) request = log_service_pb.FlushRequest() request.set_logs(group.Encode()) response = api_base_pb.VoidProto() apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response) if not self._buffer: break except apiproxy_errors.CancelledError: lines_to_be_flushed.reverse() self._buffer.extendleft(lines_to_be_flushed) except Exception, e: lines_to_be_flushed.reverse() self._buffer.extendleft(lines_to_be_flushed) if not self._stderr: line = '-' * 80 msg = 'ERROR: Could not flush to log_service (%s)\n%s\n%s\n%s\n' sys.stderr.write(msg % (str(e), line, '\n'.join(self._buffer), line)) self._clear() raise
def _Dynamic_Flush(self, request, unused_response, request_id=None): """Writes application-level log messages for a request to the Datastore.""" group = log_service_pb.UserAppLogGroup(request.logs()) logs = group.log_line_list() appid = os.environ['APPLICATION_ID'] if appid in ['apichecker', 'appscaledashboard']: return # The AppDashboard displays logs in the Java style, which starts at 1. Since # Python's log levels start at 0 (for DEBUG), bump these by one to keep them # in sync. formatted_logs = [{ 'timestamp': log.timestamp_usec() / 1e6, 'level': log.level() + 1, 'message': log.message() } for log in logs] if not formatted_logs: return payload = json.dumps({ 'service_name': appid, 'host': os.environ['MY_IP_ADDRESS'], 'logs': formatted_logs }) nginx_host = os.environ['NGINX_HOST'] logservice.SendLogsThread(payload, nginx_host).start()
def _Dynamic_Flush(self, request, unused_response, request_id=None): """Writes application-level log messages for a request to the Datastore.""" group = log_service_pb.UserAppLogGroup(request.logs()) logs = group.log_line_list() appid = os.environ['APPLICATION_ID'] if appid in ['apichecker', 'appscaledashboard']: return # The AppDashboard displays logs in the Java style, which starts at 1. Since # Python's log levels start at 0 (for DEBUG), bump these by one to keep them # in sync. formatted_logs = [{'timestamp' : log.timestamp_usec() / 1e6, 'level' : log.level() + 1, 'message' : log.message()} for log in logs] if not formatted_logs: return payload = json.dumps({ 'service_name' : appid, 'host' : os.environ['MY_IP_ADDRESS'], 'logs' : formatted_logs }) conn = httplib.HTTPSConnection(os.environ['NGINX_HOST'] + ":1443") headers = {'Content-Type' : 'application/json'} conn.request('POST', '/logs/upload', payload, headers) response = conn.getresponse()
def _Dynamic_Flush(self, request, unused_response, request_id): """Writes application-level log messages for a request.""" rl = self._pending_requests.get(request_id, None) if rl is None: return group = log_service_pb.UserAppLogGroup(request.logs()) logs = group.log_line_list() for log in logs: al = self._pending_requests_applogs[request_id].add() al.time = log.timestamp_usec() al.level = log.level() al.message = log.message()
def _flush_logs(self, logs): """Flushes logs using the LogService API. Args: logs: A list of tuples (timestamp_usec, level, message). """ logs_group = log_service_pb.UserAppLogGroup() for timestamp_usec, level, message in logs: log_line = logs_group.add_log_line() log_line.set_timestamp_usec(timestamp_usec) log_line.set_level(level) log_line.set_message(message) request = log_service_pb.FlushRequest() request.set_logs(logs_group.Encode()) response = api_base_pb.VoidProto() apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response)
def _flush(self): """Internal version of flush() with no locking.""" logs = self.parse_logs() appid = os.environ['APPLICATION_ID'] if appid in ['apichecker', 'appscaledashboard']: self._clear() return formatted_logs = [{ 'timestamp': log[0] / 1e6, 'level': log[1], 'message': log[2] } for log in logs] payload = json.dumps({ 'service_name': appid, 'host': os.environ['MY_IP_ADDRESS'], 'logs': formatted_logs }) conn = httplib.HTTPSConnection(os.environ['NGINX_HOST'] + ":1443") headers = {'Content-Type': 'application/json'} conn.request('POST', '/logs/upload', payload, headers) response = conn.getresponse() self._clear() # AppScale: This currently causes problems when we try to call API requests # via new threads, so since we don't have support for the Logs API at the # moment, this return prevents the Exception that would be thrown from # occurring. # TODO(cgb, nlake44): Revisit this problem when we do decide to implement # the Logs API. return first_iteration = True while logs or first_iteration: first_iteration = False request = log_service_pb.FlushRequest() group = log_service_pb.UserAppLogGroup() byte_size = 0 n = 0 for entry in logs: if len(entry[2]) > LogsBuffer._MAX_LINE_SIZE: entry = list(entry) entry[2] = self._truncate(entry[2], LogsBuffer._MAX_LINE_SIZE) if byte_size + len(entry[2]) > LogsBuffer._MAX_FLUSH_SIZE: break line = group.add_log_line() line.set_timestamp_usec(entry[0]) line.set_level(entry[1]) line.set_message(entry[2]) byte_size += 1 + group.lengthString(line.ByteSize()) n += 1 assert n > 0 or not logs logs = logs[n:] request.set_logs(group.Encode()) response = api_base_pb.VoidProto() apiproxy_stub_map.MakeSyncCall('logservice', 'Flush', request, response)
def _Dynamic_Flush(self, request, unused_response): """Writes application-level log messages for a request to the Datastore.""" if self.persist: group = log_service_pb.UserAppLogGroup(request.logs()) new_app_logs = self.put_log_lines(group.log_line_list()) self.write_app_logs(new_app_logs)
def _Dynamic_Flush(self, request, unused_response, request_id): """Writes application-level log messages for a request.""" group = log_service_pb.UserAppLogGroup(request.logs()) self._insert_app_logs(request_id, group.log_line_list())