def test_replay_log(self): for i in ("a", "b", "c", "d", "e", "f", "z"): self.log.info(i) yield self.client.set("/logs", json.dumps({"next-log-index": 3})) iter = LogIterator(self.client, replay=True, seen_block_size=1) entry = yield iter.next() self.assertEqual(entry["msg"], "a") entry = yield iter.next() self.assertEqual(entry["msg"], "b") # make sure we haven't updated the last seen index. data, stat = yield self.client.get("/logs") self.assertEqual(json.loads(data), {"next-log-index": 3}) # now if we advance past the last seen index, we'll start # updating the counter. for i in range(4): entry = yield iter.next() self.assertEqual(entry["msg"], "f") # make sure we updated the last seen index. data, stat = yield self.client.get("/logs") self.assertEqual(json.loads(data), {"next-log-index": 6})
def test_iter_sans_container(self): iter = LogIterator(self.client) entry_d = iter.next() # make sure it doesn't blow up yield self.poke_zk() self.log.info("apple") entry = yield entry_d self.assertEqual(entry["msg"], "apple")
def test_flush_log_last_seen(self): for i in ("a", "b", "c"): self.log.info(i) iter = LogIterator(self.client, seen_block_size=1) yield iter.next() yield iter.next() # Now if we pick up again, we should continue with the last message iter = LogIterator(self.client, seen_block_size=1) entry = yield iter.next() data, stat = yield self.client.get("/logs") self.assertTrue(data) self.assertEqual(json.loads(data), {"next-log-index": 3}) self.assertEqual(entry["msg"], "c") self.assertEqual(stat["version"], 3)
def setUp(self): yield super(LogIteratorTest, self).setUp() self.log = yield self.get_configured_log() self.iter = LogIterator(self.client)
class LogIteratorTest(LogTestBase): @inlineCallbacks def setUp(self): yield super(LogIteratorTest, self).setUp() self.log = yield self.get_configured_log() self.iter = LogIterator(self.client) @inlineCallbacks def test_get_next_log(self): self.log.info("hello world") yield self.poke_zk() entry = yield self.iter.next() self.assertEqual(entry["levelname"], "INFO") @inlineCallbacks def test_flush_log_last_seen(self): for i in ("a", "b", "c"): self.log.info(i) iter = LogIterator(self.client, seen_block_size=1) yield iter.next() yield iter.next() # Now if we pick up again, we should continue with the last message iter = LogIterator(self.client, seen_block_size=1) entry = yield iter.next() data, stat = yield self.client.get("/logs") self.assertTrue(data) self.assertEqual(json.loads(data), {"next-log-index": 3}) self.assertEqual(entry["msg"], "c") self.assertEqual(stat["version"], 3) @inlineCallbacks def test_iter_sans_container(self): iter = LogIterator(self.client) entry_d = iter.next() # make sure it doesn't blow up yield self.poke_zk() self.log.info("apple") entry = yield entry_d self.assertEqual(entry["msg"], "apple") @inlineCallbacks def test_replay_log(self): for i in ("a", "b", "c", "d", "e", "f", "z"): self.log.info(i) yield self.client.set("/logs", json.dumps({"next-log-index": 3})) iter = LogIterator(self.client, replay=True, seen_block_size=1) entry = yield iter.next() self.assertEqual(entry["msg"], "a") entry = yield iter.next() self.assertEqual(entry["msg"], "b") # make sure we haven't updated the last seen index. data, stat = yield self.client.get("/logs") self.assertEqual(json.loads(data), {"next-log-index": 3}) # now if we advance past the last seen index, we'll start # updating the counter. for i in range(4): entry = yield iter.next() self.assertEqual(entry["msg"], "f") # make sure we updated the last seen index. data, stat = yield self.client.get("/logs") self.assertEqual(json.loads(data), {"next-log-index": 6})
def debug_log(config, environment, log, options): provider = environment.get_machine_provider() client = yield provider.connect() log.info("Enabling distributed debug log.") settings_manager = GlobalSettingsStateManager(client) yield settings_manager.set_debug_log(True) if not options.limit: log.info("Tailing logs - Ctrl-C to stop.") iterator = LogIterator(client, replay=options.replay) # Setup the logging output with the user specified file. if options.output == "-": log_file = sys.stdout else: log_file = open(options.output, "a") handler = logging.StreamHandler(log_file) log_level = logging.getLevelName(options.level) handler.setLevel(log_level) formatter = logging.Formatter( "%(asctime)s %(context)s: %(name)s %(levelname)s: %(message)s") handler.setFormatter(formatter) def match(data): local_name = data["context"].split(":")[-1] if options.exclude: for exclude in options.exclude: if fnmatch(local_name, exclude) or \ fnmatch(data["context"], exclude) or \ fnmatch(data["name"], exclude): return False if options.include: for include in options.include: if fnmatch(local_name, include) or \ fnmatch(data["context"], include) or \ fnmatch(data["name"], include): return True return False return True count = 0 try: while True: entry = yield iterator.next() if not match(entry): continue # json doesn't distinguish lists v. tuples but python string # formatting doesn't accept lists. entry["args"] = tuple(entry["args"]) record = logging.makeLogRecord(entry) if entry["levelno"] < handler.level: continue handler.handle(record) count += 1 if options.limit is not None and count == options.limit: break finally: yield settings_manager.set_debug_log(False) client.close()