def run_consumer(): logger = logging.getLogger('consumer') logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setFormatter( logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s')) logger.addHandler(handler) consumer = Consumer(conf) consumer.subscribe(topics=config.resolve_config("CONSUMER_TOPICS")) try: while True: msg = consumer.poll(timeout=1.0) if msg is None: continue if msg.error(): raise KafkaException(msg.error()) else: # Proper message # sys.stderr.write('%% %s [%d] at offset %d with key %s:\n' % # (msg.topic(), msg.partition(), msg.offset(), # str(msg.key()))) print(msg.value()) except KeyboardInterrupt: sys.stderr.write('%% Aborted by user\n') finally: # Close down consumer to commit final offsets. consumer.close()
def commit_offsets(self, consumer_id: str, offsets: List[TopicPartition]): config = Config.get_instance() consumer = Consumer({ "group.id": consumer_id, **config.create_confluent_config() }) consumer.commit(offsets=offsets, asynchronous=False) consumer.close()
def target_topic_consumer(unittest_config: Config, target_topic: Tuple[str, int]) -> Consumer: consumer = Consumer( { "group.id": "asdf", "enable.auto.commit": False, "enable.partition.eof": False, **unittest_config.create_confluent_config(), } ) consumer.assign([TopicPartition(topic=target_topic[0], partition=i, offset=0) for i in range(target_topic[1])]) yield consumer consumer.close()
class KafmanConsumer(metaclass=Singleton): """TODO""" def __init__(self): super().__init__() self.topic = None self.consumer = None self.started = False self.bus = EventBus.get(CONSUMER_BUS) def start(self, settings: dict) -> None: """TODO""" if self.consumer is None: self.consumer = Consumer(settings) self.started = True def stop(self) -> None: """TODO""" if self.consumer is not None: del self.consumer self.consumer = None self.started = False def consume(self, topics: List[str]) -> None: """TODO""" if self.started: tr = threading.Thread(target=self._consume, args=(topics, )) tr.setDaemon(True) tr.start() def _consume(self, topics: List[str]) -> None: """TODO""" self.consumer.subscribe(topics) try: while self.started: message = self.consumer.poll(POLLING_INTERVAL) if message is None: continue elif not message.error(): msg = message.value().decode(Charset.UTF_8.value) self.bus.emit(MSG_CONS_EVT, message=msg, topic=message.topic()) elif message.error().code() == PARTITION_EOF: print( f"End of partition reached {message.topic()}/{message.partition()}" ) else: print(f"Error occurred: {message.error().str()}") except KeyboardInterrupt: print("Keyboard interrupted") finally: if self.consumer: self.consumer.close()
class KafkaConsumer(BaseKafkaConsumer): def __init__(self, config, logger): self._config = config["consumer"] conf = self._config["conf"] conf.setdefault("group.id", str(uuid.uuid1())) self.autocommit_enabled = conf.get("enable.auto.commit", True) self._logger = logger internal_log_path = self._config.get("internal_log_path") if internal_log_path: debug_logger = logging.getLogger("debug_consumer") timestamp = time.strftime("_%d%m%Y_") debug_logger.addHandler( logging.FileHandler("{}/kafka_consumer_debug{}{}.log".format( internal_log_path, timestamp, os.getpid()))) conf["logger"] = debug_logger self._consumer = Consumer(**conf) def subscribe(self, topics=None): topics = topics or list(self._config["topics"].values()) self._consumer.subscribe(topics) def poll(self): msg = self._consumer.poll(self._config["poll_timeout"]) if msg is not None: err = msg.error() if err: if err.code() == KafkaError._PARTITION_EOF: return None else: self._logger.info( "KafkaConsumer Error {} at pid {}: topic={} partition=[{}] reached end at offset {}\n" .format(err.code(), os.getpid(), msg.topic(), msg.partition(), msg.offset())) raise KafkaException(err) if msg.value(): return msg def commit_offset(self, msg): if msg is not None: if self.autocommit_enabled: self._consumer.store_offsets(msg) else: self._consumer.commit(msg, async=False) def close(self): self._consumer.close()
def kafka_consume_expected(topic, group='0', timeout=1.0, mfilter=lambda x: True, validator=lambda x: None, after_subscribe=lambda: None): consumer = Consumer({ 'bootstrap.servers': KAFK, 'group.id': group, 'auto.offset.reset': 'earliest' # earliest _committed_ offset }) msgs = [] topics = consumer.list_topics(topic) # promises to create topic logging.debug("Topic state: %s", topics.topics) if topics.topics[topic].error is not None: logging.warning("Error subscribing to topic: %s", topics.topics) return msgs consumer.subscribe([topic]) time.sleep(5) # for kafka to rebalance consumer groups after_subscribe() logging.debug("Waiting for messages...") while True: msg = consumer.poll(timeout) if msg is None: break logging.info("Seen message: %r %r", msg.key(), msg.value()) if msg.error(): logging.warning("Consumer error: {}".format(msg.error())) continue if mfilter(msg): validator(msg) msgs.append(msg) consumer.commit() consumer.close() return msgs
def pay_order(): consumer = Consumer({ 'bootstrap.servers': os.environ.get('BROKER'), 'group.id': 'consumer-pay-id', 'auto.offset.reset': 'earliest' }) consumer.subscribe(['pay_order']) while True: msg = consumer.poll(1.0) if msg is None: continue if msg.error(): logging.error("Consumer error: {}".format(msg.error())) continue data = json.loads(msg.value()) OrderPayStory().execute(data.get('order_id')) consumer.close()
def deamon(): """ Termite Client """ group = "" KAFKA_HOST = "" KAFKA_TOPIC="" print KAFKA_HOST c = Consumer({ "bootstrap.servers": KAFKA_HOST, 'group.id': group, }) c.subscribe([KAFKA_TOPIC]) running = True while running: msg = c.poll(1) if msg is None: continue if not msg.error(): data = json.loads(msg.value()) print("receive msg:", data) kwargs = { "work_id": data["Work_id"], "flow_id": data["Flow_id"], "cid": data["Cid"] } # 视频美女标签识别 if data.get("Work", "") == "video_tag_detect": t_video_tag_detect(**kwargs) else: pass else: if msg.error().code() == KafkaError._PARTITION_EOF: print "Skip-Error Message-Topic: {} Partition: {} Offset: {}Error: {}".format(msg.topic(), msg.partition(), msg.offset(), msg.error()) else: print "Error Message: {}".format(msg.error()) time.sleep(0.01) c.close()
def order_channel(): consumer = Consumer({ 'bootstrap.servers': os.environ.get('BROKER'), 'group.id': 'consumer-order-id', 'auto.offset.reset': 'earliest' }) consumer.subscribe([ 'order_reserved', 'order_paid', 'order_pay_failed', 'order_reserve_rejected' ]) while True: msg = consumer.poll(1.0) if msg is None: continue if msg.error(): logging.error("Consumer error: {}".format(msg.error())) continue msg.topic() data = json.loads(msg.value()) topic = msg.topic() # TODO For demo if topic == 'order_reserved': OrderSaga().pay(data.get('order_id')) continue if topic == 'order_paid': OrderSaga().approve(data.get('order_id')) continue if topic == 'order_pay_failed': OrderSaga().reject_reserve(data.get('order_id')) continue if topic == 'order_reserve_rejected': OrderSaga().cancel(data.get('order_id')) continue consumer.close()
class BreadCrumbDataConsumer: _logger = logging.getLogger('BreadCrumbDataConsumer') def __init__(self): kafka_configs = KafkaHelper.get_kafka_configs() kafka_configs['group.id'] = 'python_breadcrumb_data_consumer' kafka_configs['auto.offset.reset'] = 'earliest' self._consumer = Consumer(kafka_configs) self._bread_crumb_repo = BreadCrumbRepository() self._trips_stop_data = dict() def consume_breadcrumb_records(self): self._logger.info("Starting breadcrumb data consumer ...") self._consumer.subscribe([STOP_EVENT_TOPIC, BREADCRUMB_DATA_TOPIC]) stop_events_records_count = 0 consumed_breadcrumb_records_count = 0 bread_crumb_records_saved_to_db_count = 0 breadcrumbs = list() last_saved_to_db = datetime.now() try: while True: duration_from_last_saved_to_db = datetime.now( ) - last_saved_to_db if len(breadcrumbs) >= 50_000 or ( len(breadcrumbs) > 0 and duration_from_last_saved_to_db.total_seconds() > (60 * 2)): self._bread_crumb_repo.bulk_save_breadcrumbs( breadcrumbs, self._trips_stop_data) bread_crumb_records_saved_to_db_count += len(breadcrumbs) breadcrumbs.clear() last_saved_to_db = datetime.now() self._logger.info( 'Number of breadcrumb records consumed = {}, stop event records consumed = {}, records saved to db = {}' .format(consumed_breadcrumb_records_count, stop_events_records_count, bread_crumb_records_saved_to_db_count)) msg = self._consumer.poll(1.0) if msg is None: continue elif msg.error(): self._logger.error('error: {}'.format(msg.error())) else: msg_topic = msg.topic() message_data = msg.value().decode("utf-8") if msg_topic == BREADCRUMB_DATA_TOPIC: consumed_breadcrumb_records_count += 1 self.process_bread_crumb_record( breadcrumbs, message_data) elif msg_topic == STOP_EVENT_TOPIC: stop_events_records_count += 1 self.process_stop_event_records(message_data) self._logger.debug( 'Number of breadcrumb records consumed = {}, stop event records consumed = {}' .format(consumed_breadcrumb_records_count, stop_events_records_count)) finally: self._consumer.close() self._bread_crumb_repo.bulk_save_breadcrumbs( breadcrumbs, self._trips_stop_data) def process_bread_crumb_record(self, breadcrumbs, message_data): try: breadcrumb = BreadCrumb.parse_raw(message_data) breadcrumbs.append(breadcrumb) except Exception as ex: self._logger.debug('Encountered an error parsing a bread crumb.', ex) def process_stop_event_records(self, message_data): try: trip_stop_dict = json.loads(message_data) trip_id = list(trip_stop_dict.keys())[0] if trip_id not in self._trips_stop_data.keys(): trip_stop_events_df = pd.read_json( list(trip_stop_dict.values())[0]) first_row = trip_stop_events_df.iloc[0] self._trips_stop_data[trip_id] = { 'route_id': first_row['route_number'], 'service_key': first_row['service_key'], 'direction': first_row['direction'] } except Exception as ex: self._logger.debug( 'Encountered an error parsing a stop events record.', ex)
class TimeOrderedGeneratorWithTimeout(GeneratorInterface): """ A general generator which can read multiple topics and merge their messages in time order. A message must be emitted at (arrival_system_time + latency_ms). In batch mode (until reaching the first EOP on each stream) the generator will not discard any messages. """ def __init__(self, broker, groupid, topics_infos: List[TopicInfo], latency_ms, commit_interval_sec=None, group_by_time=False, begin_timestamp=None, begin_flag=None, end_timestamp=None, end_flag=None, heartbeat_interval_ms=-1): """ :param broker: Broker to connect to. :param groupid: Group id of the consumer. :param topics_infos: [TopicInfo()] - list of TopicInfo objects. :param latency_ms: (integer >=0) Latency to wait before serving a message. After this messages with lower or equal timestamps will be discarded. :param commit_interval_sec: How many seconds to wait between commits.-1 does not commit with the given group id. :param group_by_time: Group messages with the same timestamp. This will yield a list of messages. :param begin_timestamp: Timestamp of the kafka messages where the generator will start. :param begin_flag: BEGINNING, CONTINUE, LIVE - CONTINUE will continue from the last committed offset. If there was no committed offset will start from the end of the stream. :param end_timestamp: Timestamp where to end the reading. :param end_flag: NEVER, END_OF_PARTITION :param heartbeat_interval_ms: -1 does not produce heartbeat. After every interval will produce a HeartBeat typed message with the timestamp. """ if begin_timestamp is not None and begin_flag is not None: raise Exception( 'You can not set the begin timestamp and a flag in the same time.' ) if end_timestamp is not None and end_flag is not None: raise Exception( 'You can not set the end timestamp and a flag in the same time.' ) if begin_timestamp is not None and end_timestamp is not None and begin_timestamp >= end_timestamp: raise Exception( 'The begin timestamp is larger then the end timestamp.') if begin_flag is not None and end_flag is not None and \ begin_flag == BeginFlag.LIVE and end_flag == EndFlag.END_OF_PARTITION: raise Exception( 'You can not start in live and process until the end of the streams.' ) if end_flag is not None and not (end_flag == EndFlag.END_OF_PARTITION or end_flag == EndFlag.NEVER): raise Exception( 'Unknow end flag: {} . Please use the given enum to use proper end flag.' .format(end_flag)) self.end_ts = end_timestamp self.end_flag = end_flag self.commit_interval_sec = commit_interval_sec self.latency_ms = latency_ms self.group_by_time = group_by_time self.max_poll_interval_ms = 5 * 60 * 1000 self.consumer = Consumer({ 'bootstrap.servers': broker, 'group.id': groupid, 'enable.auto.commit': False, 'auto.offset.reset': 'earliest' if begin_flag == BeginFlag.CONTINUE_OR_BEGINNING else 'latest', 'fetch.wait.max.ms': 20, 'max.poll.interval.ms': self.max_poll_interval_ms, 'enable.partition.eof': True }) self.last_poll = None self.tps = [] self.queues = {} self.messages_to_be_committed = {} self.begin_timestamp = begin_timestamp for ti in topics_infos: topic_name = ti.topic self.messages_to_be_committed[topic_name] = { 'last_msg': None, 'committed': True } if begin_timestamp is not None: self.tps.extend( self.consumer.offsets_for_times([ TopicPartition(topic_name, partition=ti.partition, offset=begin_timestamp) ])) elif begin_flag is not None: if begin_flag == BeginFlag.BEGINNING: self.tps.append( TopicPartition(topic_name, partition=ti.partition, offset=OFFSET_BEGINNING)) elif begin_flag in (BeginFlag.CONTINUE, BeginFlag.CONTINUE_OR_BEGINNING): self.tps.append( TopicPartition(topic_name, partition=ti.partition, offset=OFFSET_STORED)) elif begin_flag == BeginFlag.LIVE: self.tps.append( TopicPartition(topic_name, partition=ti.partition, offset=OFFSET_END)) else: raise Exception( 'Unknown begin flag. Please use the enum to provide proper begin flag.' ) else: self.tps.append( TopicPartition(topic_name, partition=ti.partition, offset=OFFSET_END)) end_offset = None if end_flag is not None and end_flag == EndFlag.END_OF_PARTITION: end_offset = self.consumer.get_watermark_offsets( TopicPartition(topic_name, 0))[1] - 1 if end_offset is None or end_offset >= 0: self.queues[topic_name] = Topic(topic_name, self.consumer, end_offset=end_offset, partition=ti.partition, drop=ti.drop) self.consumer.assign(self.tps) self.last_commit = time.time() self.running = True self.heartbeat_interval_ms = heartbeat_interval_ms self.next_hb = None def stopGenerator(self): self.running = False def _serve_messages(self, message_to_serve): if self.commit_interval_sec is not None and self.group_by_time: for msg in message_to_serve: self.messages_to_be_committed[msg.topic()]['last_msg'] = msg self.messages_to_be_committed[msg.topic()]['committed'] = False # serve messages if self.group_by_time: yield message_to_serve else: for msg in message_to_serve: self.messages_to_be_committed[msg.topic()]['last_msg'] = msg self.messages_to_be_committed[msg.topic()]['committed'] = False yield msg if not self.running: break # commit messages when they were delivered current_time = time.time() if self.commit_interval_sec is not None and ( current_time - self.last_commit) > self.commit_interval_sec: for k in self.messages_to_be_committed.keys(): if not self.messages_to_be_committed[k]['committed']: self.consumer.commit( self.messages_to_be_committed[k]['last_msg']) self.messages_to_be_committed[k]['committed'] = True self.last_commit = current_time def _serve_heartbeat(self, current_timestamp_ms): if self.next_hb is None: if self.begin_timestamp is not None: self.next_hb = self.begin_timestamp else: self.next_hb = current_timestamp_ms while self.next_hb <= current_timestamp_ms: yield HeartBeat(self.next_hb) self.next_hb += self.heartbeat_interval_ms def _can_serve(self): min_ets = min([ q.queue[0].message.timestamp()[1] for q in self.queues.values() if len(q.queue) > 0 ], default=-1) if min_ets == -1: return None deadline = getSystemTimestamp() - self.latency_ms if all([q.can_be_emitted(min_ets) for q in self.queues.values()]) and \ any([q.queue[0].ts < deadline for q in self.queues.values() if len(q.queue) > 0 and q.queue[0].message.timestamp()[1] == min_ets]): return min_ets else: return None def getMessages(self): while self.running: if all([v.stopped for v in self.queues.values()]): message_to_serve = [] for q in self.queues.values(): message_to_serve.extend(q.queue) message_to_serve = [m.message for m in message_to_serve] message_to_serve.sort(key=lambda x: x.timestamp()[1]) while len(message_to_serve) > 0: ts = message_to_serve[0].timestamp()[1] serve_it = [] while len(message_to_serve) > 0 and message_to_serve[ 0].timestamp()[1] == ts: serve_it.append(message_to_serve.pop(0)) if not self.heartbeat_interval_ms == -1: yield from self._serve_heartbeat(ts) yield from self._serve_messages(serve_it) logging.info('Exiting from generator.') break self.last_poll = getSystemTimestamp() msg = self.consumer.poll(0.001) if msg is not None: if msg.error(): if msg.error().code() == KafkaError._PARTITION_EOF: if msg.topic() in self.queues: self.queues[msg.topic()].first_eop_reached = True self.queues[msg.topic()].end_of_partition = True else: logging.error('Unhandle error: {}'.format(msg.error())) break else: self.queues[msg.topic()].end_of_partition = False if self.end_ts is not None and msg.timestamp( )[1] > self.end_ts: self.queues[msg.topic()].stop_topic() else: self.queues[msg.topic()].add_message(msg) while self.running: event_ts_to_serve = self._can_serve() if event_ts_to_serve is None or \ self.max_poll_interval_ms - (getSystemTimestamp() - self.last_poll) < 30000: if self.end_flag == EndFlag.NEVER and self.heartbeat_interval_ms != -1 \ and any([q.end_of_partition for q in self.queues.values()]): if self.next_hb is None: self.next_hb = min( getSystemTimestamp() - self.latency_ms, min([ q.queue[0].message.timestamp()[1] for q in self.queues.values() if len(q.queue) > 0 ], default=sys.maxsize)) if self.next_hb < min( getSystemTimestamp() - self.latency_ms, min([ q.queue[0].message.timestamp()[1] for q in self.queues.values() if len(q.queue) > 0 ], default=sys.maxsize)): yield from self._serve_heartbeat(self.next_hb) break if self.heartbeat_interval_ms != -1: yield from self._serve_heartbeat(event_ts_to_serve) message_to_serve = [] for q in self.queues.values(): message_to_serve.extend(q.get_messages(event_ts_to_serve)) yield from self._serve_messages(message_to_serve) if self.end_ts is not None and self.end_ts <= event_ts_to_serve: self.running = False self.consumer.close()
def consume(self, topic, topic_timeout): kafka_config_consumer = ConfigFactory(kafka_client="consumer") config = kafka_config_consumer.config log.info("kafka config for consume %s", config) consumer = Consumer(config) events = [] start_time = time.monotonic() timeout_start_time = start_time timeout_consumer = 10.0 # actual consumer starts now # subscribe to 1 or more topics and define the callback function # callback is only received after consumer.consume() is called! consumer.subscribe([topic], on_assign=self.callback_on_assignment) log.info( f"Waiting for partition assignment ... (timeout at {timeout_consumer} seconds" ) try: while (time.monotonic() - timeout_start_time) < timeout_consumer: # start consumption messages = consumer.consume(timeout=0.1) # check for partition assignment if self.consume_lock == ConsumerState.PARTITIONS_UNASSIGNED: # this should not happen but we are not 100% sure if messages: log.error("messages consumed but lock is unopened") break continue # after partition assignment set the timeout again # and reset the start time from which to determine timeout # violation elif self.consume_lock == ConsumerState.PARTITIONS_ASSIGNED: timeout_start_time = time.monotonic() timeout_consumer = topic_timeout self.consume_lock = ConsumerState.TIMEOUT_SET log.info("Lock has been opened, consuming ...") # appened messages to the events list to be returned if messages: for msg in messages: log.info(f"message at offset: {msg.offset()}, \ partition: {msg.partition()}, \ topic: {msg.topic()}") # TODO: allow assertions to be on message headers etc. # events.append({ # "key": msg.key, # "headers": msg.headers, # "value": msg.value() # }) events.append(msg.value()) # only executed when while condition becomes false else: # at the end check if the partition assignment was achieved if self.consume_lock != ConsumerState.TIMEOUT_SET: log.error("No partition assignments received in time") except KafkaException as e: log.error(f"Kafka error: {e}") pass finally: consumer.close() end_time = time.monotonic() log.debug(f"this cycle took: {(end_time - start_time)} seconds") return events
class AsyncWorker(object): """ Fetches from Kafka topics and processes them. :param consumer_topic: Name of the Kafka topic for consume. :type consumer_topic: str :param service: Service function which is executed every time when job is processed. Service must get as argument str or dict type object. :type service: callable :param consumer_conf: config for Kafka consumer. :type consumer_conf: dict :param failed_topic: Kafka topic for produce unprocessed messages from consumer_topic. :type failed_topic: str :param producer_conf: config for Kafka producer for producing unprocessed messages. :type producer_conf: dict """ def __init__(self, consumer_topic: str, service: Callable, consumer_conf: dict, failed_topic: str, producer_conf: dict): self._consumer_topic = consumer_topic self._consumer = Consumer(consumer_conf) self._service = service self._failed_topic = failed_topic # use naming like <project name>_<version>_<consumer_topic><retry/failed> self._producer = AsyncProducer(producer_conf) def __repr__(self): """Return the string representation of the worker. :return: String representation of the worker. :rtype: str """ return 'Worker(Consumer={}, consume_topic={})'.format( self._consumer, self._consumer_topic) def __del__(self): # pragma: no cover # noinspection PyBroadException try: self._consumer.close() except Exception: pass async def _exec_service(self, message_value): if iscoroutinefunction(self._service): res = await self._service(message_value) else: res = self._service(message_value) return res async def _process_message(self, msg: Message): """ De-serialize message and execute service. :param msg: Kafka message. :type msg: confluent_kafka.Message` """ LOGGER.info( 'Processing Message(topic={}, partition={}, offset={}) ...'.format( msg.topic, msg.partition, msg.offset)) service_repr = get_call_repr(self._service) LOGGER.info('Executing job {}'.format(service_repr)) try: message_value = _decode_msg_value(msg.value()) res = await self._exec_service(message_value) except KeyboardInterrupt: LOGGER.error('Job was interrupted: {}'.format(msg.offset())) except Exception as err: LOGGER.exception('Job {} raised an exception: {}'.format( msg.offset(), err)) await self._producer.produce(topic=self._failed_topic, value=msg.value(), error=str(err)) else: LOGGER.info('Job {} returned: {}'.format(msg.offset(), res)) @property def consumer_topic(self): """Return the name of the Kafka topic. :return: Name of the Kafka topic. :rtype: str """ return self._consumer_topic @property def consumer(self): """Return the Kafka consumer instance. :return: Kafka consumer instance. :rtype: kafka.KafkaConsumer """ return self._consumer @property def service(self): """Return the service function. :return: Callback function, or None if not set. :rtype: callable | None """ return self._service async def start(self, max_messages: int = math.inf, commit_offsets: bool = True) -> int: """Start processing Kafka messages and executing jobs. :param max_messages: Maximum number of Kafka messages to process before stopping. If not set, worker runs until interrupted. :type max_messages: int :param commit_offsets: If set to True, consumer offsets are committed every time a message is processed (default: True). :type commit_offsets: bool :return: Total number of messages processed. :rtype: int """ LOGGER.info('Starting {} ...'.format(self)) self._consumer.unsubscribe() self._consumer.subscribe([self.consumer_topic]) LOGGER.info(" Try get messages from position: {}".format( self._consumer.position(self._consumer.assignment()))) messages_processed = 0 while messages_processed < max_messages: loop = asyncio.get_event_loop() # awaiting place for processing messages in other coroutines messages = await loop.run_in_executor( None, partial(self._consumer.consume, 10, 2.0)) LOGGER.debug(" Try get messages from position: {}".format( self._consumer.position(self._consumer.assignment()))) if not messages: LOGGER.debug("Messages not found") continue for msg in messages: if msg.error(): LOGGER.error("Consumer error: {}".format(msg.error())) LOGGER.info("Get message with offset {}".format(msg.offset())) asyncio.create_task(self._process_message(msg)) if commit_offsets: self._consumer.commit() messages_processed += 1 self._consumer.close() return messages_processed
__add_source_info(instrument) entry[CHILDREN].append(detector_1) entry[CHILDREN].append(instrument) entry[CHILDREN].append(_create_dataset("beamline", instrument_name)) entry[CHILDREN].append(_create_dataset( "name", instrument_name)) # these seem to be the same for i in range(8): monitor = _create_group(f"monitor_{i}", "NXmonitor") entry[CHILDREN].append(monitor) new_run_message = serialise_pl72( filename=des.filename, start_time=des.start_time, stop_time=des.stop_time, run_name=des.run_name, service_id=des.service_id, instrument_name=des.instrument_name, broker=des.broker, nexus_structure=str(entry), job_id=des.job_id) prod.produce(topic="ALL_runInfo", value=new_run_message) print(f"produced: {entry}") except KeyboardInterrupt: break cons.close()
def main(args): serial = args.serial num_messages = args.num_messages brokers = args.brokers group_id = args.group_id input_topic = args.input_topic input_partition = args.input_partition output_topic = args.output_topic if serial: print("Running in SERIAL mode") print( "The input producer will wait for the reply of the transactor before producing the next message." ) else: print("Running in PARALLEL mode") print( "The input producer will produce all messages in parallel (at once) after the first message." ) tr_args = [ sys.executable, os.path.join(HERE, "eos-transactions.py"), "-b", brokers, "-g", group_id + "-tr", "-t", input_topic, "-p", str(input_partition), "-o", output_topic, ] output_consumer = Consumer({ "bootstrap.servers": brokers, "group.id": group_id + "-pr", "auto.offset.reset": "earliest", "enable.auto.commit": True, "enable.partition.eof": False, }) output_consumer.subscribe([output_topic]) input_producer = Producer({ 'bootstrap.servers': brokers, }) try: with tempfile.NamedTemporaryFile(mode='w+') as f: tr_proc = subprocess.Popen(tr_args, stderr=subprocess.STDOUT, stdout=f, cwd=HERE, close_fds=True) try: time.sleep(1) assert tr_proc.poll() is None tx = 0 for i in range(num_messages): input_producer.produce(input_topic, key=b"xy", value=str(tx).encode("ascii")) tx += 1 assert input_producer.flush(10) == 0 while serial or tx <= 1: msg = output_consumer.poll(1.0) if msg is None: continue assert msg.error() is None if tx == 1: t_start = time.time() break if not serial: for _ in range(num_messages - 1): msg = output_consumer.poll(1.0) if msg is None: continue assert msg.error() is None print("Processing took {}".format(time.time() - t_start)) finally: if tr_proc.poll() is None: tr_proc.terminate() tr_proc.wait() f.seek(0) eos_out = f.read() finally: output_consumer.close() # commit offsets i = 0 c = False send_offset_logs = defaultdict(list) send_offset_times = [] for line in eos_out.split("\n"): if line.startswith(":DEMO:START "): c = True if c: send_offset_logs[i].append(line) if line.startswith(":DEMO:END "): send_offset_times.append(float(line.rpartition(" ")[-1])) c = False i += 1 print("\nSend offset times:", send_offset_times) print("Send offset times average:", sum(send_offset_times) / len(send_offset_times)) print("\nRelevant log snippet from the middle:") print("\n".join(send_offset_logs[int(i / 2)])) print("\nFull output of the transactor:") print(eos_out)
class MsgConsumer: def __init__(self, topic, broker_address, group_id='group', client_id='client', auto_offset_reset='earliest', num_messages=1, verbose=False): """Consumer for handling EEG Streamer messages. Args: topic: Topic to subscribe to broker_address: Broker address group_id: group ID client_id: client ID auto_offset_reset: (default: 'earliest') num_messages: Maximum number of messages to consume each time (default: 1) verbose: verbose mode. (default: False) """ self.data = deque() self.timestamps = deque() self.__num_msgs = num_messages """Maximum number of messages to consume each time (default: 1)""" self.__verbose = verbose self.__streamqueue = deque() self.__consumer = Consumer({ 'bootstrap.servers': broker_address, 'auto.offset.reset': auto_offset_reset, 'group.id': group_id, 'client.id': client_id, 'enable.auto.commit': True, 'session.timeout.ms': 6000, 'max.poll.interval.ms': 10000 }) """consumer that reads stream of EEG signal""" self.__consumer.subscribe([topic]) def listen(self): """read stream from Kafka and append to streamqueue Returns: list of list: dataset (nchannel x nsample) or None """ # If chunk size is too large, consume it multiple epochs chunk_size = self.__num_msgs msgs = [] while chunk_size > 100: msgs.extend(self.__consumer.consume(num_messages=100, timeout=1)) chunk_size -= 100 msgs.extend(self.__consumer.consume(num_messages=chunk_size, timeout=1)) print(f"INFO: Received {str(len(msgs))} messages" ) if self.__verbose else None if msgs is None or len(msgs) <= 0: return None self.__streamqueue.extendleft(msgs) # Enqueue if len(self.__streamqueue) < self.__num_msgs: return None # Dequeue msgs__ = [self.__streamqueue.pop() for i in range(0, self.__num_msgs)] timestamps, data = [], [] for msg in msgs__: time, values = msg_decode(msg.value()) timestamps.append(time) if time is not None else None data.append(values) if time is not None else None #TODO:// assert there is not big time gap in the data if len(data) < self.__num_msgs: return None print(timestamps[0], data[0]) if self.__verbose else None data = tuple(zip(*data)) self.data.append(data) self.timestamps.append(timestamps[0]) print(f"INFO: Sucessfully Read a chunk") if self.__verbose else None def stop(self): self.__consumer.close() pass def drain(self): self.__num_msgs = 100000 for i in range(0, 10): self.listen()
def compute_achieved_throughput(broker, partitions_with_offsets, result_dict): partitions_with_offsets = {} input_consumer = Consumer({ 'bootstrap.servers': broker, 'group.id': str(uuid.uuid4()), # 'group.id': 'achieved_throughput_measurer', 'auto.offset.reset': 'earliest', 'enable.auto.commit': True, 'auto.commit.interval.ms': 1000, 'api.version.request': True, 'max.poll.interval.ms': 60000 }) output_consumer = Consumer({ 'bootstrap.servers': broker, 'group.id': str(uuid.uuid4()), # 'group.id': 'achieved_throughput_measurer', 'auto.offset.reset': 'earliest', 'enable.auto.commit': True, 'auto.commit.interval.ms': 1000, 'api.version.request': True, 'max.poll.interval.ms': 60000 }) if 'input' in partitions_with_offsets and len( partitions_with_offsets['input']) > 0: input_consumer.assign(partitions_with_offsets['input']) else: input_consumer.subscribe(['read', 'update', 'transfer']) if 'output' in partitions_with_offsets and len( partitions_with_offsets['output']) > 0: output_consumer.assign(partitions_with_offsets['output']) else: output_consumer.subscribe(['responses']) while True: msgs = input_consumer.consume(timeout=5, num_messages=500) if len(msgs) == 0: break for msg in msgs: try: wrapped = Wrapper() wrapped.ParseFromString(msg.value()) result = {} result['operation'] = msg.topic() result['input_time'] = msg.timestamp()[1] result_dict[wrapped.request_id] = result except DecodeError as e: print("Could not decode?") pass partitions_with_offsets['input'] = input_consumer.position( input_consumer.assignment()) input_consumer.close() total_messages = 0 start_time = 0 end_time = 0 first = True while True: msgs = output_consumer.consume(timeout=5, num_messages=500) if len(msgs) == 0: break for msg in msgs: response = Response() response.ParseFromString(msg.value()) key = response.request_id status_code = response.status_code if key in result_dict: if first: start_time = msg.timestamp()[1] / 1000 first = False total_messages += 1 end_time = msg.timestamp()[1] / 1000 result_dict[key]['output_time'] = msg.timestamp()[1] result_dict[key]['status_code'] = status_code partitions_with_offsets['output'] = output_consumer.position( output_consumer.assignment()) output_consumer.close() print("Total messages considered: " + str(total_messages)) if total_messages == 0 or end_time - start_time == 0: return 0 return total_messages / (end_time - start_time)