class ScribeHandler(logging.Handler): """Handler for sending python standard logging messages to a scribe stream. .. code-block:: python import clog.handlers, logging log = logging.getLogger(name) log.addHandler(clog.handlers.ScribeHandler('localhost', 3600, 'stream', retry_interval=3)) :param host: hostname of scribe server :param port: port number of scribe server :param stream: name of the scribe stream logs will be sent to :param retry_interval: default 0, number of seconds to wait between retries """ def __init__(self, host, port, stream, retry_interval=0): logging.Handler.__init__(self) self.stream = stream self.logger = ScribeLogger(host, port, retry_interval) def emit(self, record): try: msg = self.format(record) self.logger.log_line(self.stream, msg) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
class TestCLogScribeReportStatus(object): @pytest.yield_fixture(autouse=True) def setup_sandbox(self): self.scribe_logdir = tempfile.mkdtemp() self.stream = 'foo' self.scribe_port = find_open_port() self.log_path = get_log_path(self.scribe_logdir, self.stream) self.logger = ScribeLogger( 'localhost', self.scribe_port, retry_interval=10, report_status = mock.Mock() ) with scribed_sandbox(self.scribe_port, self.scribe_logdir): yield shutil.rmtree(self.scribe_logdir) def test_exception_in_raise_status(self): """Make sure socket is closed if exception is raised in report_status function.""" def raise_exception_on_error(is_error, message): if is_error: raise Exception(message) self.logger.report_status = raise_exception_on_error self.logger.client.Log = mock.Mock(side_effect=IOError) try: self.logger.log_line(self.stream, '12345678') except Exception: assert not self.logger.connected
def setup_sandbox(self): self.scribe_logdir = tempfile.mkdtemp() self.stream = 'foo' self.scribe_port = find_open_port() self.log_path = get_log_path(self.scribe_logdir, self.stream) self.logger = ScribeLogger('localhost', self.scribe_port, retry_interval=10, report_status=mock.Mock()) with scribed_sandbox(self.scribe_port, self.scribe_logdir): yield shutil.rmtree(self.scribe_logdir)
def check_create_default_loggers(): """Set up global loggers, if necessary.""" global loggers # important to specifically compare to None, since empty list means something different if loggers is None: # initialize list of loggers loggers = [] # possibly add logger that writes to local files (for dev) if config.clog_enable_file_logging: if config.log_dir is None: raise ValueError( 'log_dir not set; set it or disable clog_enable_file_logging' ) loggers.append(FileLogger()) # possibly add logger that writes to scribe if not config.scribe_disable: logger = ScribeLogger(config.scribe_host, config.scribe_port, config.scribe_retry_interval) loggers.append(logger) if config.clog_enable_stdout_logging: loggers.append(StdoutLogger()) if not loggers and not config.is_logging_configured: raise LoggingNotConfiguredError
def main(): try: from clog.loggers import ScribeLogger except ImportError: print("Scribe logger unavailable, exiting.", file=sys.stderr) sys.exit(1) scribe_logger = ScribeLogger(host="169.254.255.254", port=1463, retry_interval=5) cluster = load_system_paasta_config().get_cluster() client = get_docker_client() for ( timestamp, hostname, container_id, process_name, ) in capture_oom_events_from_stdin(): try: docker_inspect = client.inspect_container(resource_id=container_id) except (APIError): continue env_vars = get_container_env_as_dict(docker_inspect) service = env_vars.get("PAASTA_SERVICE", "unknown") instance = env_vars.get("PAASTA_INSTANCE", "unknown") log_line = LogLine( timestamp=timestamp, hostname=hostname, container_id=container_id, cluster=cluster, service=service, instance=instance, process_name=process_name, ) log_to_scribe(scribe_logger, log_line) log_to_paasta(log_line) send_sfx_event(service, instance, cluster)
def setup_sandbox(self): self.scribe_logdir = tempfile.mkdtemp() self.stream = "foo" self.scribe_port = find_open_port() self.log_path = get_log_path(self.scribe_logdir, self.stream) self.logger = ScribeLogger("localhost", self.scribe_port, retry_interval=10, report_status=mock.Mock()) with scribed_sandbox(self.scribe_port, self.scribe_logdir): yield shutil.rmtree(self.scribe_logdir)
def main(): scribe_logger = ScribeLogger(host='169.254.255.254', port=1463, retry_interval=5) cluster = load_system_paasta_config().get_cluster() client = get_docker_client() for timestamp, hostname, container_id, process_name in capture_oom_events_from_stdin(): try: docker_inspect = client.inspect_container(resource_id=container_id) except (APIError): continue env_vars = get_container_env_as_dict(docker_inspect) service = env_vars.get('PAASTA_SERVICE', 'unknown') instance = env_vars.get('PAASTA_INSTANCE', 'unknown') log_line = LogLine( timestamp=timestamp, hostname=hostname, container_id=container_id, cluster=cluster, service=service, instance=instance, process_name=process_name, ) log_to_scribe(scribe_logger, log_line) log_to_paasta(log_line) send_sfx_event(service, instance, cluster)
def __init__(self, host, port, stream, retry_interval=0): logging.Handler.__init__(self) self.stream = stream self.logger = ScribeLogger(host, port, retry_interval) if use_zipkin(): self.logger = ZipkinTracing(self.logger)
def __init__(self, host, port, stream, retry_interval=0): logging.Handler.__init__(self) self.stream = stream self.logger = ScribeLogger(host, port, retry_interval)
class TestCLogScribeLoggerLineSize(object): @pytest.yield_fixture(autouse=True) def setup_sandbox(self): self.scribe_logdir = tempfile.mkdtemp() self.stream = "foo" self.scribe_port = find_open_port() self.log_path = get_log_path(self.scribe_logdir, self.stream) self.logger = ScribeLogger("localhost", self.scribe_port, retry_interval=10, report_status=mock.Mock()) with scribed_sandbox(self.scribe_port, self.scribe_logdir): yield shutil.rmtree(self.scribe_logdir) def test_line_size_constants(self): assert MAX_LINE_SIZE_IN_BYTES == 50 * 1024 * 1024 assert WARNING_LINE_SIZE_IN_BYTES == 5 * 1024 * 1024 assert WHO_CLOG_LARGE_LINE_STREAM == "tmp_who_clog_large_line" def test_log_line_no_size_limit(self): line = create_test_line() self.logger._log_line_no_size_limit(self.stream, line) wait_on_log_data(self.log_path, line + b"\n") assert not self.logger.report_status.called @mock.patch("clog.loggers.ScribeLogger._log_line_no_size_limit") def test_normal_line_size(self, mock_log_line_no_size_limit): line = create_test_line() assert len(line) <= WARNING_LINE_SIZE_IN_BYTES self.logger.log_line(self.stream, line) assert not self.logger.report_status.called mock_log_line_no_size_limit.assert_called_once_with(self.stream, line) @mock.patch("clog.loggers.ScribeLogger._log_line_no_size_limit") def test_max_line_size(self, mock_log_line_no_size_limit): line = create_test_line(MAX_LINE_SIZE_IN_BYTES) assert len(line) > MAX_LINE_SIZE_IN_BYTES with pytest.raises(LogLineIsTooLongError): self.logger.log_line(self.stream, line) assert self.logger.report_status.called_with( True, "The log line is dropped (line size larger than %r bytes)" % MAX_LINE_SIZE_IN_BYTES ) assert not mock_log_line_no_size_limit.called def test_large_msg(self): # We advertise support of messages up to 50 megs, so let's test that # we actually are able to log a 50 meg message to a real scribe server test_str = "0" * MAX_LINE_SIZE_IN_BYTES self.logger.log_line(self.stream, test_str) expected = test_str.encode("UTF-8") wait_on_log_data(self.log_path, expected + b"\n") @mock.patch("traceback.format_stack") @mock.patch("clog.loggers.ScribeLogger._log_line_no_size_limit") def test_warning_line_size(self, mock_log_line_no_size_limit, mock_traceback): line = create_test_line(WARNING_LINE_SIZE_IN_BYTES) assert len(line) > WARNING_LINE_SIZE_IN_BYTES assert len(line) <= MAX_LINE_SIZE_IN_BYTES self.logger.log_line(self.stream, line) assert self.logger.report_status.called_with( False, "The log line size is larger than %r bytes (monitored in '%s')" % (WARNING_LINE_SIZE_IN_BYTES, WHO_CLOG_LARGE_LINE_STREAM), ) assert mock_log_line_no_size_limit.call_count == 2 call_1 = mock.call(self.stream, line) origin_info = {} origin_info["stream"] = self.stream origin_info["line_size"] = len(line) origin_info["traceback"] = "".join(mock_traceback) origin_info_line = json.dumps(origin_info).encode("UTF-8") call_2 = mock.call(WHO_CLOG_LARGE_LINE_STREAM, origin_info_line) mock_log_line_no_size_limit.assert_has_calls([call_1, call_2])
class TestCLogScribeLoggerLineSize(object): @pytest.yield_fixture(autouse=True) def setup_sandbox(self): self.scribe_logdir = tempfile.mkdtemp() self.stream = 'foo' self.scribe_port = find_open_port() self.log_path = get_log_path(self.scribe_logdir, self.stream) self.logger = ScribeLogger('localhost', self.scribe_port, retry_interval=10, report_status=mock.Mock()) with scribed_sandbox(self.scribe_port, self.scribe_logdir): yield shutil.rmtree(self.scribe_logdir) def test_line_size_constants(self): assert MAX_LINE_SIZE_IN_BYTES == 50 * 1024 * 1024 assert WARNING_LINE_SIZE_IN_BYTES == 5 * 1024 * 1024 assert WHO_CLOG_LARGE_LINE_STREAM == 'tmp_who_clog_large_line' def test_log_line_no_size_limit(self): line = create_test_line() self.logger._log_line_no_size_limit(self.stream, line) wait_on_log_data(self.log_path, line + b'\n') assert not self.logger.report_status.called @mock.patch('clog.loggers.ScribeLogger._log_line_no_size_limit') def test_normal_line_size(self, mock_log_line_no_size_limit): line = create_test_line() assert len(line) <= WARNING_LINE_SIZE_IN_BYTES self.logger.log_line(self.stream, line) assert not self.logger.report_status.called mock_log_line_no_size_limit.assert_called_once_with(self.stream, line) @mock.patch('clog.loggers.ScribeLogger._log_line_no_size_limit') def test_max_line_size(self, mock_log_line_no_size_limit): line = create_test_line(MAX_LINE_SIZE_IN_BYTES) assert len(line) > MAX_LINE_SIZE_IN_BYTES with pytest.raises(LogLineIsTooLongError): self.logger.log_line(self.stream, line) assert self.logger.report_status.called_with( True, 'The log line is dropped (line size larger than %r bytes)' % MAX_LINE_SIZE_IN_BYTES) assert not mock_log_line_no_size_limit.called def test_large_msg(self): # We advertise support of messages up to 50 megs, so let's test that # we actually are able to log a 50 meg message to a real scribe server test_str = '0' * MAX_LINE_SIZE_IN_BYTES self.logger.log_line(self.stream, test_str) expected = test_str.encode('UTF-8') wait_on_log_data(self.log_path, expected + b'\n') @mock.patch('traceback.format_stack') @mock.patch('clog.loggers.ScribeLogger._log_line_no_size_limit') def test_warning_line_size(self, mock_log_line_no_size_limit, mock_traceback): line = create_test_line(WARNING_LINE_SIZE_IN_BYTES) assert len(line) > WARNING_LINE_SIZE_IN_BYTES assert len(line) <= MAX_LINE_SIZE_IN_BYTES self.logger.log_line(self.stream, line) assert self.logger.report_status.called_with( False, 'The log line size is larger than %r bytes (monitored in \'%s\')' % (WARNING_LINE_SIZE_IN_BYTES, WHO_CLOG_LARGE_LINE_STREAM)) assert mock_log_line_no_size_limit.call_count == 2 call_1 = mock.call(self.stream, line) origin_info = {} origin_info['stream'] = self.stream origin_info['line_size'] = len(line) origin_info['traceback'] = ''.join(mock_traceback) origin_info_line = json.dumps(origin_info).encode('UTF-8') call_2 = mock.call(WHO_CLOG_LARGE_LINE_STREAM, origin_info_line) mock_log_line_no_size_limit.assert_has_calls([call_1, call_2])
def construct_scribelogger_with_mocked_tsocket(self, timeout=None): with mock.patch('thriftpy.transport.socket.TSocket', spec=TSocket): if timeout is None: yield ScribeLogger(HOST, PORT, RETRY) else: yield ScribeLogger(HOST, PORT, RETRY, logging_timeout=timeout)