def test_multiple_files(self): """ test writing multiple events to a multiple files and reading them back """ granularity = 5 events = [(b"aaa", b"111"), (b"bbb", b"222"), (b"ccc", b"333"), ] completed_file_count = 0 # we expect the completed directory to be empty completed_list = os.listdir(_output_complete_dir) self.assertEqual(len(completed_list), completed_file_count, completed_list) writer = LogStreamWriter(_test_prefix, _test_suffix, granularity, _output_work_dir, _output_complete_dir) for header, data in events: writer.write(header, data) # wait for the current file to roll over time.sleep(granularity+1) writer.check_for_rollover() # we expect a new file in the completed directory completed_file_count += 1 completed_list = os.listdir(_output_complete_dir) self.assertEqual(len(completed_list), completed_file_count, completed_list) log_stream = generate_log_stream_from_directory(_output_complete_dir) for event, actual in zip(events, log_stream): self.assertEqual(event, actual, (event, actual, )) self.assertRaises(StopIteration, next, log_stream)
def test_single_item(self): """ test writing a single event and reading it back """ granularity = 5 header = b"aaa" data = b"bbb" # we expect the completed directory to be empty completed_list = os.listdir(_output_complete_dir) self.assertEqual(len(completed_list), 0, completed_list) writer = LogStreamWriter(_test_prefix, _test_suffix, granularity, _output_work_dir, _output_complete_dir) writer.write(header, data) # wait for the current file to roll over time.sleep(granularity+1) writer.check_for_rollover() # we expect a single file in the completed directory completed_list = os.listdir(_output_complete_dir) self.assertEqual(len(completed_list), 1, completed_list) stream_file_path = os.path.join(_output_complete_dir, completed_list[0]) log_stream = generate_log_stream_from_file(stream_file_path) read_header, read_data = next(log_stream) self.assertEqual(read_header, header) self.assertEqual(read_data, data) self.assertRaises(StopIteration, next, log_stream)
def main(): """ main entry point """ args = _parse_commandline() _initialize_logging(args.verbose) sub_address_list = _load_sub_list(args.sub_list_path) for address in sub_address_list: if is_ipc_protocol(address): prepare_ipc_path(address) for directory in [args.output_work_dir, args.output_complete_dir, ]: if not os.path.isdir(directory): _log.info("creating {0}".format(directory)) os.makedirs(directory) context = zmq.Context() poller = zmq.Poller() sub_socket_list = list() for sub_socket_address in sub_address_list: sub_socket = context.socket(zmq.SUB) sub_socket.setsockopt(zmq.SUBSCRIBE, "".encode("utf-8")) _log.info("connecting sub_socket to {0}".format(sub_socket_address)) sub_socket.connect(sub_socket_address) poller.register(sub_socket, zmq.POLLIN) sub_socket_list.append(sub_socket) stream_writer = LogStreamWriter(args.output_prefix, args.output_suffix, args.granularity, args.output_work_dir, args.output_complete_dir) halt_event = set_signal_handler() polling_intgerval_milliseconds = args.polling_interval * 1000 while not halt_event.is_set(): try: result_list = poller.poll(polling_intgerval_milliseconds) except zmq.ZMQError: instance = sys.exc_info()[1] if instance.errno == errno.EINTR and halt_event.is_set(): break raise if len(result_list) == 0: stream_writer.check_for_rollover() continue for sub_socket, event in result_list: assert event == zmq.POLLIN, event # we expect topic, compressed header, compressed body _topic = sub_socket.recv() assert sub_socket.rcvmore compressed_header = sub_socket.recv() assert sub_socket.rcvmore compressed_data = sub_socket.recv() assert not sub_socket.rcvmore # send out what we got in header = zlib.decompress(compressed_header) data = zlib.decompress(compressed_data) stream_writer.write(header, data) _log.debug("shutting down") for sub_socket in sub_socket_list: sub_socket.close() context.term() return 0