def produce(): try: p = Producer({ 'bootstrap.servers': '192.168.150.6:9092,192.168.150.7:9092,192.168.150.8:9092' }) # data = requests.get('http://hq.sinajs.cn/list=sh601006,sh601008') # print(data.text) # for dt in data.text.split(";"): # d = dt.split("=")[1] # ret = p.produce('ddc_test_topic', d.encode('utf-8'), callback=delivery_report) # # print(ret) for i in range(10000): data = '[{"content": "clogan header", "contentType": 1, "curTime": 1587432527233, "threadName": "clogan", "threadId": "1", "mainThread": true}, {"content": "test logan 0", "contentType": 1, "curTime": 1587432527212, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432578763, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432578955, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432579125, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432579277, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432579440, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432579793, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432579954, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432580091, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432580234, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432580369, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432580526, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432580663, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432580804, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432580927, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581045, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581201, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581333, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581453, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581584, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581720, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581850, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432581984, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432582327, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432582485, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432582641, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432582792, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432582941, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432583098, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432583256, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432583408, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432583558, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432583714, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432583871, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432584018, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432584164, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432584300, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432584532, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432584691, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432584835, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432584991, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432585156, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432585321, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432585469, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432585605, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432585769, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432585926, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432586070, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432586226, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432586377, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432586533, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432586691, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432586842, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432586998, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432587150, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432587297, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432587450, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432587593, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432587735, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432587841, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432587970, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432588112, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432588227, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432588363, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432588498, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432588636, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432588770, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432588920, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432589048, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432589207, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432589372, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432589523, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432589671, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432589835, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432589986, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432590156, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432590322, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432590472, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432590622, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432590770, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432590913, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432591049, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432591179, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432591336, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432591492, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432591668, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432591810, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432591971, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432592136, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432592285, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432592442, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432592602, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432592749, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432592914, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432593067, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432593275, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432593429, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432593575, "threadName": "main", "threadId": "2", "mainThread": true}, {"content": "test test testtes ", "contentType": 11, "curTime": 1587432593708, "threadName": "main", "threadId": "2", "mainThread": true}]' # Trigger any available delivery report callbacks from previous produce() calls print(len(data)) p.poll(0) # # # Asynchronously produce a message, the delivery report callback # # will be triggered from poll() above, or flush() below, when the message has # # been successfully delivered or failed permanently. ret = p.produce('Applog', data.encode('utf-8'), callback=delivery_report) print(ret) # Wait for any outstanding messages to be delivered and delivery report # callbacks to be triggered. # p.flush() except Exception as e: print(traceback.format_exc())
def produce(queue, key, val, headers=None): logging.info("Producing into %s: %s %s", queue, key, val) producer = Producer({ 'bootstrap.servers': KAFK, "message.send.max.retries": 2 }) producer.poll(0) producer.produce(queue, key=key, value=val, headers=headers) producer.flush()
class ConfluentKafkaProducer(KafkaProducerInterface): def __init__(self, config, logger): super(ConfluentKafkaProducer, self).__init__() self.logger = logger conf = { 'bootstrap.servers': ','.join(config['kafka_bootstrap_servers']), 'broker.version.fallback': config['kafka_broker_version_fallback'], 'api.version.request': config['kafka_api_version_request'], 'queue.buffering.max.ms': config['kafka_producer_batch_linger_ms'], 'queue.buffering.max.kbytes': config['kafka_producer_buffer_kbytes'], 'message.send.max.retries': 3, 'default.topic.config': { 'request.required.acks': 1 } } self.logger.info("Creating a Confluent Kafka Producer", {"config": json.dumps(conf, indent=4)}) self.producer = Producer(dict(conf, **{'error_cb': self.error_callback}), logger=logger.logger) # Service any logging self.producer.poll(0.25) def error_callback(self, error): """ :param error: :type error: KafkaError :param message: :param datum: :return: """ if error: datum = {} datum['success'] = False datum['exception'] = error.name() datum['description'] = error.str() self.logger.error("Kafka error", datum if datum else {}) def send(self, topic, message, callback=None): self.producer.produce(topic, json.dumps(message).encode('utf-8'), callback=callback) # Service the delivery callback queue. self.producer.poll(0) def poll(self): self.producer.poll(0) def close(self): self.producer.flush() self.producer.poll(0)
class EventsProducer: def __init__(self): self.loop = asyncio.get_event_loop() self.loop.run_until_complete(self.__start_producer()) def send_upsert_chat_entity(self, members, lab_id, chat_id, name): event = create_event(entity={"containerReferenceId": lab_id, "chatId": chat_id, "name": name, "members": members}) self.__send_event(UPSERT_CHAT_ENTITY, event) def send_delete_chat_entity(self, chat_id): event = create_event(entity={"chatId": chat_id}) self.__send_event(DELETE_CHAT_ENTITY, event) def send_add_chat_member(self, user_auth_key, chat_id): event = create_event(chat_id=chat_id, entity={"userAuthKey": user_auth_key, "chatId": chat_id}) self.__send_event(ADD_CHAT_MEMBER, event) def send_remove_chat_member(self, user_auth_key, chat_id): event = create_event(entity={"userAuthKey": user_auth_key, "chatId": chat_id}) self.__send_event(REMOVE_MEMBERS_FROM_CHAT_ENTITY, event) async def __start_producer(self): self.producer = Producer(({ "bootstrap.servers": settings.BOOTSTRAP_SERVERS, "security.protocol": "SASL_SSL", "sasl.mechanisms": "PLAIN", "sasl.username": settings.SASL_PLAIN_USERNAME, "sasl.password": settings.SASL_PLAIN_PASSWORD })) def __send_event(self, key, event: dict): self.loop.run_until_complete(self.__send_one(key, event)) def __delivery_report(self, err, msg): """ Called once for each message produced to indicate delivery result. Triggered by poll() or flush(). """ if err is not None: print('Message delivery failed: {}'.format(err)) else: print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition())) async def __send_one(self, key, event: dict): self.producer.produce(topic=settings.TOPIC_CHAT, value=json.dumps(event), key=key, callback=self.__delivery_report) self.producer.poll(10000)
class StopEventsDataPublisher: _logger = logging.getLogger('StopEventsDataPublisher') def __init__(self): kafka_configs = KafkaHelper.get_kafka_configs() self._producer = Producer(kafka_configs) def publish_stop_event_records(self, stop_event_records, topic=STOP_EVENT_TOPIC): self._logger.info( "Publishing {} stop event records to {} topic ...".format( len(stop_event_records), topic)) delivered_records = 0 def callback(err, msg): nonlocal delivered_records if err is not None: self._logger.error("Failed to deliver message: %s: %s" % (str(msg), str(err))) else: delivered_records += 1 self._logger.debug( "Published record to topic {} partition [{}] @ offset {}". format(msg.topic(), msg.partition(), msg.offset())) self._logger.debug( 'Published records count: {}'.format(delivered_records)) for trip_id, stop_events in stop_event_records.items(): stop_data_record = dict() stop_data_record[trip_id] = stop_events self._producer.produce(topic, value=json.dumps(stop_data_record), on_delivery=callback) self._producer.poll(0) self._producer.flush() self._logger.info( 'Done delivering records to {} topic! A total of {} records were published' .format(topic, delivered_records))
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detection with an orage bounding box around a head and writes demography data (gender & age) data above the heads. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.genders.GenderRecord.json - <prefix>.cam.0.ages.AgeRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" gender_topic = f"{args.prefix}.cam.0.genders.GenderRecord.json" age_topic = f"{args.prefix}.cam.0.ages.AgeRecord.json" output_topic_name = f"{args.prefix}.cam.0.demography.Image.jpg" # handle full screen window_name = "DEMO: Demography (gender & age)" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(gender_topic), TopicInfo(age_topic), ], 100, None, True) i = 0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] age_record = v[args.prefix]["0"]["head_detection"][ head_detection]["age"] gender_record = v[args.prefix]["0"]["head_detection"][ head_detection]["gender"] age = "" if age_record['age'] == {} else age_record['age'] gender = "" if gender_record[ 'gender'] == {} else gender_record['gender'] # draw bounding_box img = draw_nice_bounding_box( img, object_detection_record["bounding_box"], (10, 95, 255)) # write age and gender img = draw_nice_text( img, str(gender) + " " + str(age), object_detection_record["bounding_box"], (10, 95, 255)) # draw ultinous logo img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detections and tracks, and pass detections. Displays the result on screen ('-d') or stores result in kafka ('-o'). Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.tracks.TrackChangeRecord.json - <prefix>.cam.0.passdet.PassDetectionRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument("config", help="Path to service config.", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() passdet_config_json = parse_config_data(args=args, parser=parser) if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) passlines: Dict[str, PassLine] = { pl["id"]: PassLine(next(pass_colors), [(int(p["x"]), int(p["y"])) for p in pl["poly"]]) for pl in passdet_config_json["passLines"] } image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" track_topic = f"{args.prefix}.cam.0.tracks.TrackChangeRecord.json" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" passdet_topic = f"{args.prefix}.cam.0.passdet.PassDetectionRecord.json" output_topic_name = f"{args.prefix}.cam.0.passdet.Image.jpg" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Pass detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(track_topic, drop=False), TopicInfo(passdet_topic, drop=False), TopicInfo(detection_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None tracks: DefaultDict[Any, ColoredPolyLine] = defaultdict( lambda: ColoredPolyLine(next(track_colors))) for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): for track_key, track_val in v[ args.prefix]["0"]["track"].items(): if track_val["end_of_track"]: if track_key in tracks: del tracks[track_key] continue point = track_val["point"]["x"], track_val["point"]["y"] tracks[track_key].add_point(point) for pass_det in v[args.prefix]["0"]["passdet"].values(): if pass_det["type"] == "HEARTBEAT": continue elif pass_det["type"] == "END_OF_TRACK": continue elif pass_det["type"] == "PASS_CANDIDATE": pass_id = pass_det["pass_candidate"]["pass"][ "pass_line_id"] cross_dir = pass_det["pass_candidate"]["pass"][ "cross_dir"] if pass_id in passlines: passlines[pass_id].add_event(cross_dir) elif pass_det["type"] == "PASS_REALIZED": continue img = v[args.prefix]["0"]["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record[ "bounding_box"], color=(10, 95, 255), scaling=scaling) for t in tracks.values(): t.draw(img, scaling) for idx, l in enumerate(passlines.values()): l.draw(img, scaling) cv2.putText(img, "".join(l.events), (40, (idx + 1) * 50), cv2.FONT_HERSHEY_COMPLEX, 2, l.color, 5, bottomLeftOrigin=True) img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = init_parser() args = parser.parse_args() config_data = parse_config_data(args=args, parser=parser) positive_areas = parse_areas(config_data, "positive_areas") negative_areas = parse_areas(config_data, "negative_areas") detection_types = parse_detection_types(config_data) if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 output_topic_name = f"{args.prefix}.cam.0.filtered_dets.Image.jpg" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Filtered detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( broker=args.broker, groupid="detection", topics_infos=[ TopicInfo( f"{args.prefix}.cam.0.original.Image.jpg"), # image_topic TopicInfo( f"{args.prefix}.cam.0.filtered_dets.ObjectDetectionRecord.json" ), # filtered_detection_topic TopicInfo(f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" ) # detection_topic ], latency_ms=100, group_by_time=True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): frame_info = v[args.prefix]["0"] img = frame_info["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = frame_info["head_detection"].pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in frame_info["head_detection"]: img = draw_bounding_box( object_detection_record=frame_info["head_detection"] [head_detection]["bounding_box"], detection_types=detection_types, img=img, scaling=scaling, color=COLOR_GREY) for head_detection in frame_info["filtered_head_detection"]: img = draw_bounding_box(object_detection_record=frame_info[ "filtered_head_detection"][head_detection] ["filtered_bounding_box"], detection_types=detection_types, img=img, scaling=scaling, color=COLOR_ORANGE) draw_areas(areas=positive_areas, img=img, color=COLOR_GREEN) draw_areas(areas=negative_areas, img=img, color=COLOR_RED) draw_ultinous_logo(canvas=img, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) else: break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Basic Reidentification demo using two cameras: Camera0 for object registration and Camera1 for reidentification. Plays a video from a jpeg topic, visualizes head detection with an orange bounding box around a head and writes the dwell time and ID (derived from the reid MS ID) above the heads. Displays ('-d') or stores ('-o') the result of this demo in kafka topics. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.1.original.Image.jpg - <prefix>.cam.1.dets.ObjectDetectionRecord.json - <prefix>.cam.1.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_reg_topic = f"{args.prefix}.cam.{REG_CAMERA_ID}.original.Image.jpg" image_reid_topic = f"{args.prefix}.cam.{REID_CAMERA_ID}.original.Image.jpg" detection_reg_topic = f"{args.prefix}.cam.{REG_CAMERA_ID}.dets.ObjectDetectionRecord.json" detection_reid_topic = f"{args.prefix}.cam.{REID_CAMERA_ID}.dets.ObjectDetectionRecord.json" reid_topic = f"{args.prefix}.cam.{REID_TOPIC_ID}.reids.ReidRecord.json" output_reg_topic_name = f"{args.prefix}.cam.{REG_CAMERA_ID}.reids.Image.jpg" output_reid_topic_name = f"{args.prefix}.cam.{REID_CAMERA_ID}.reids.Image.jpg" # handle full screen window_name = TITLE if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( broker=args.broker, groupid="detection", topics_infos=[ TopicInfo(image_reg_topic), TopicInfo(image_reid_topic), TopicInfo(detection_reg_topic), TopicInfo(detection_reid_topic), TopicInfo(reid_topic, drop=False), ], latency_ms=500, commit_interval_sec=None, group_by_time=True) registrations: Dict[str, int] = {} i = 0 inner_id = 0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) # Register the recognized faces reid_records = message[REID_TOPIC_ID].get("reid", {}) for reid_key, reid_record in reid_records.items(): record_type = reid_record["type"] # Get the stored registration key registration_key = reid_record["reg_refs"][0]["subject"]["key"] if record_type == "REG" and registration_key not in registrations: inner_id += 1 registrations[registration_key] = inner_id for topic_key, topic_message in message.items(): img = topic_message.get("image", {}) if not isinstance(img, np.ndarray): continue # Process detections head_detections = topic_message.get("head_detection", {}) for detection_key, detection_record in head_detections.items(): object_detection_record = detection_record["bounding_box"] color = COLOR_GREY key_to_display = "" # Reidentification received reid_record = reid_records.get(detection_key) if reid_record and reid_record["type"] == "REID": reid_key = reid_record["reg_refs"][0]["subject"][ "key"] # We only use the first identified face now registered_id = registrations.get(reid_key) if registered_id: color = COLOR_ORANGE dwell_time = time - int(reid_key.split('_')[0]) key_to_display = f"id: {registered_id}; dwell time: {dwell_time}ms" # draw text above bounding box img = draw_nice_text( canvas=img, text=key_to_display, bounding_box=object_detection_record["bounding_box"], color=color) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT) # produce output topic if args.output: out_topic = output_reg_topic_name if topic_key is REG_CAMERA_ID else output_reid_topic_name producer.produce(out_topic, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display # if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detection with a bounding box around a head. The boundig box is grey when mask detection did not run; it is green when a mask is detected; it is orange and 'NO MASK' is written above the head when no mask is detected. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.masks.FaceMaskRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" mask_topic = f"{args.prefix}.cam.0.masks.FaceMaskRecord.json" output_topic_name = f"{args.prefix}.cam.0.face_mask.Image.jpg" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Face Mask" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(mask_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] != "PERSON_HEAD": continue mask_record = v[args.prefix]["0"]["head_detection"][ head_detection]["face_mask"] mask_text = "" if not mask_record: color = COLOR_DARK_GREY elif mask_record["has_mask"]: color = COLOR_GREEN else: mask_text = "NO MASK" color = COLOR_ORANGE # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # write age and gender img = draw_nice_text( img, mask_text, object_detection_record["bounding_box"], color, scale=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
class KafmanProducer(metaclass=Singleton): """TODO""" def __init__(self): super().__init__() self.topic = None self.producer = None self.started = False self.bus = EventBus.get(PRODUCER_BUS) self.console_bus = EventBus.get(HConsole.CONSOLE_BUS) def flush(self, timeout: int = 0) -> None: """TODO""" if self.producer: self.producer.flush(timeout=timeout) def purge(self) -> None: """TODO""" if self.producer: self.producer.purge() def start(self, settings: dict) -> None: """TODO""" if self.producer is None: self.producer = Producer(settings) self.started = True def stop(self) -> None: """TODO""" if self.producer is not None: self.purge() self.flush() del self.producer self.producer = None self.started = False def produce(self, topics: List[str], messages: List[str]) -> None: """TODO""" if self.started: tr = threading.Thread(target=self._produce, args=(topics,messages,)) tr.setDaemon(True) tr.start() def _produce(self, topics: List[str], messages: List[str]): """TODO""" try: for topic in topics: for msg in messages: if msg: self.producer.produce(topic, msg, callback=self._message_produced) self.producer.poll(POLLING_INTERVAL) except KeyboardInterrupt: print("Keyboard interrupted") finally: self.producer.flush(30) def _message_produced(self, error: KafkaError, message: Message) -> None: """TODO""" topic = message.topic() msg = message.value().decode(Charset.UTF_8.value) if error is not None: print(f"Failed to deliver message: {msg}: {error.str()}") else: self.bus.emit(MSG_PROD_EVT, message=msg, topic=topic)
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes head detection with an orage bounding box around a head and writes the IDs given by reid MS above the heads. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.99.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" reid_topic = f"{args.prefix}.cam.{REID_TOPIC_ID}.reids.ReidRecord.json" output_topic_name = f"{args.prefix}.cam.0.reids.Image.jpg" # handle full screen window_name = TITLE if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(reid_topic, drop=False), ], 500, None, True) i = 0 stored_ids = {} for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): reid_records = v[args.prefix][REID_TOPIC_ID].get("reid", {}) img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: for key in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][key]["bounding_box"] color = COLOR_GREY reid_record = reid_records.get(key) if reid_record: color = COLOR_ORANGE reid_key = reid_record["reg_refs"][0]["subject"]["key"] key_to_display = stored_ids.get(reid_key, None) if key_to_display is None: key_to_display = len(stored_ids) + 1 stored_ids[reid_key] = key_to_display # user id img = draw_nice_text( canvas=img, text=str(key_to_display), bounding_box=object_detection_record[ "bounding_box"], color=color) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
filename = args.filename with open(args.filename) as json_file: data = json.load(json_file) while True: job_id = str(uuid.uuid4()) file_name = filename.split(os.sep)[-1] data["children"][0]["children"][1]["children"][0]["stream"]["shape"][ 2]["edges"] = [float(i) for i in range(5, 20001)] structure = json.dumps(data) blob = serialise_pl72( nexus_structure=structure, broker=args.broker, filename=f"{str(uuid.uuid4())}.nxs", job_id=job_id, ) prod.produce(topic, value=blob) prod.poll(5) sleep(600) # increase this to allow more time for the file to write stop_msg = serialise_6s4t(job_id=job_id) prod.produce(topic, value=stop_msg) prod.poll(5)
class AioProducer: def __init__(self, config, kafka_redis, message_queue_key, name=None, loop=None): self.loop = loop or asyncio.get_event_loop() assert config is not None, 'init kafka product error, config is None' self.kafka_redis = kafka_redis self.message_queue_key = message_queue_key self._producer = Producer(**config) # 'INIT' -> 'RUNNING' -> 'STOP' self.status = 'INIT' self.name = name self.__heath_check = Thread(target=self.__producer_health_loop) self.__heath_check.setDaemon(True) self.__heath_check.start() def __producer_health_loop(self): while self.status != 'STOP': self._producer.poll(1) def stop(self): self.status = 'STOP' def close(self): """ kafka生产者的poll()方法使用异步方式进行数据推送,当程序结束的时候,不能保证数据已经完成推送。 因此需要在结束生产者之前,使用flush()方法将未推送的数据已同步方式推送完成,等待推送完成后再结束进程。 :return: """ try: self.__heath_check.join() self._producer.flush() except Exception as e: logger.error(f'{self.name} close error: {e.args}, traceback: {traceback.format_exc()}') async def publish(self, topic: str, message: str): """ kafka生产者主函数,将传入的数据data推送到指定topic中, 并在推送完成后调用callback回调函数 :param topic: 推送数据的kafka主题 :param message: 推送数据 str :return: 是否推送成功 True/False """ result = self.loop.create_future() def ack(err, msg): """ 成功/失败的处理函数 """ if err is not None: logger.error(f'{message} delivery failed: {err}') self.loop.call_soon_threadsafe(result.set_result, False) else: logger.info(f'{message} delivered to {msg.topic()} partition:[{msg.partition()}]') self.loop.call_soon_threadsafe(result.set_result, True) try: self._producer.produce(topic, message, on_delivery=ack) return await result except BufferError as e: logger.error('Local producer queue is full ({} messages awaiting delivery): try again\n'.format( len(self._producer))) await asyncio.sleep(1) except KafkaException as e: logger.error(f'producer publish {message} error, ' f'topic:{topic}.error_info: {e.args[0]}') except Exception as e: logger.error(f'producer publish {message} error' f'topic:{topic}.error_info: {traceback.format_exc()}', exc_info=e) return False async def __get_message(self) -> str: try: with await self.kafka_redis.pool as p: return await p.lpop(self.message_queue_key) except TimeoutError: logger.info(f'redis_key:{self.message_queue_key} timeout') return '' async def __retry_message(self, message): try: with await self.kafka_redis.pool as p: return await p.rpush(self.message_queue_key, message) except TimeoutError: logger.info(f'redis_key:{self.message_queue_key} timeout') return 0 async def run(self): while self.status == 'RUNNING': message = Message.loads(await self.__get_message()) if not message: await asyncio.sleep(1) continue flag = await self.publish(topic=message.topic, message=message.dumps()) push_2_redis_flag = False while not flag and not push_2_redis_flag: message.delivery_retry() push_2_redis_flag = await self.__retry_message(message.dumps()) if not push_2_redis_flag: await asyncio.sleep(5) await self.loop.run_in_executor(None, self.close)
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes head detection with an orage bounding box around a head and writes the IDs given by reid MS above the heads. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.99.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = None if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" reid_topic = f"{args.prefix}.cam.{REID_TOPIC_ID}.reids.ReidRecord.json" output_topic_name = f"{args.prefix}.cam.0.reidentification.Image.jpg" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" # handle full screen window_name = TITLE if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(reid_topic), TopicInfo(frameinfo_topic) ], 500, None, True, begin_flag=begin_flag, end_flag=end_flag, ) i = 0 stored_ids = {} scaling = 1.0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) reid_records = message[REID_TOPIC_ID].get("reid", {}) img = message["0"].get("image", {}) if type(img) == np.ndarray: head_detections = message["0"].get("head_detection", {}) # Set the image scale shape_orig = head_detections.pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # Processing detections for detection_key, detection_record in head_detections.items(): object_detection_record = detection_record["bounding_box"] color = COLOR_GREY reid_records_for_det = reid_records.get(detection_key, ()) for reid_record in filter(lambda r: "reid_event" in r, reid_records_for_det): color = COLOR_ORANGE reid_key = reid_record["reid_event"]["match_list"][0][ "id"]["first_detection_key"] key_to_display = stored_ids.get(reid_key, None) if key_to_display is None: key_to_display = len(stored_ids) + 1 stored_ids[reid_key] = key_to_display # user id img = draw_nice_text( canvas=img, text=str(key_to_display), bounding_box=object_detection_record[ "bounding_box"], color=color, scale=scaling) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays video from a jpeg topic, visualizes main points of a human skeleton linked with colorful lines. Displays the result on screen ('-d') or stores in a kafka topic with '-o' parameter. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.skeletons.SkeletonRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = None if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) img_topic = f"{args.prefix}.cam.0.original.Image.jpg" skeleton_topic = f"{args.prefix}.cam.0.skeletons.SkeletonRecord.json" output_topic_name = f"{args.prefix}.cam.0.skeleton.Image.jpg" # handle full screen window_name = "DEMO: Human skeleton" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "skeleton", [TopicInfo(img_topic), TopicInfo(skeleton_topic)], 100, None, True, begin_flag=begin_flag, end_flag=end_flag) i = 0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # draw skeletons for skeleton_id, skeleton in v[ args.prefix]["0"]["skeleton"].items(): img = draw_skeleton_with_background( img, skeleton["points"]) # draw ultinous logo img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detections and tracks, and pass detections. Displays the result on screen ('-d') or stores result in kafka ('-o'). Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.tracks.TrackChangeRecord.json - <prefix>.cam.0.passdet.PassDetectionRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument("config", help="Path to service config.", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() config_file = Path(args.config) if not config_file.is_file(): parser.error(f"{args.config} does not exist.") if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) with config_file.open() as f: try: passdet_config_json = json.loads( javaproperties.load(f) ["ultinous.service.kafka.passdet.config"]) except KeyError: parser.error( "Missing property: ultinous.service.kafka.passdet.config") except JSONDecodeError as e: parser.error(f"Error parsing {e}") overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) passlines: Dict[str, PassLine] = { pl["id"]: PassLine(next(pass_colors), [(int(p["x"]), int(p["y"])) for p in pl["poly"]]) for pl in passdet_config_json["passLines"] } image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" track_topic = f"{args.prefix}.cam.0.tracks.TrackChangeRecord.json" passdet_topic = f"{args.prefix}.cam.0.passdet.PassDetectionRecord.json" output_topic_name = f"{args.prefix}.cam.0.passdet.Image.jpg" # handle full screen window_name = "DEMO: Pass detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(track_topic, drop=False), TopicInfo(passdet_topic, drop=False), TopicInfo(detection_topic) ], 100, None, True) i = 0 tracks: DefaultDict[Any, ColoredPolyLine] = defaultdict( lambda: ColoredPolyLine(next(track_colors))) for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): for track_key, track_val in v[args.prefix]["0"]["track"].items(): if track_val["end_of_track"]: if track_key in tracks: del tracks[track_key] continue point = track_val["point"]["x"], track_val["point"]["y"] tracks[track_key].add_point(point) for pass_det in v[args.prefix]["0"]["passdet"].values(): if pass_det["type"] == "HEARTBEAT": continue elif pass_det["type"] == "END_OF_TRACK": continue elif pass_det["type"] == "PASS_CANDIDATE": pass_id = pass_det["pass_candidate"]["pass"][ "pass_line_id"] cross_dir = pass_det["pass_candidate"]["pass"]["cross_dir"] if pass_id in passlines: passlines[pass_id].add_event(cross_dir) elif pass_det["type"] == "PASS_REALIZED": continue img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( img, object_detection_record["bounding_box"], (10, 95, 255)) for t in tracks.values(): t.draw(img) for idx, l in enumerate(passlines.values()): l.draw(img) cv2.putText(img, "".join(l.events), (40, (idx + 1) * 50), cv2.FONT_HERSHEY_COMPLEX, 2, l.color, 5, bottomLeftOrigin=True) img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic and visualizes the head detection with an orange bounding box around a head. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = None if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" output_topic_name = f"{args.prefix}.cam.0.head_detection.Image.jpg" # handle full screen window_name = "DEMO: Head detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag) i = 0 scaling = 1.0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # Set the image scale shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record[ "bounding_box"], color=(10, 95, 255), scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) else: break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
class Kafka(object): def __init__(self, target_key) -> None: super().__init__() self.address = _address_for_key(target_key) kafka_config = { 'bootstrap.servers': self.address, 'group.id': "up9-test-group", 'enable.auto.commit': 'false' # important for passive observing } if "ssl://" in self.address.lower(): kafka_config['security.protocol'] = 'SSL' self.consumer = Consumer(kafka_config) self.producer = Producer(kafka_config) self.watching_topics = [] self.consumer.list_topics(timeout=5) # to check for connectivity def watch_topics(self, topics: list): def my_on_assign(consumer, partitions): logging.debug("On assign: %r", partitions) consumer.assign(partitions) for partition in partitions: low, high = consumer.get_watermark_offsets(partition) partition.offset = high logging.debug("Setting offset: %r", partition) consumer.seek(partition) self.watching_topics.extend(topics) self.consumer.subscribe(topics, on_assign=my_on_assign) self.consumer.poll(0.01) # to trigger partition assignments def get_watched_messages(self, interval=0.0, predicate=lambda x: True): logging.debug( "Checking messages that appeared on kafka topics: %r", self.watching_topics) res = [] start = time.time() while True: msg = self.consumer.poll(interval) if msg is None or time.time() - start > interval: break # done reading if msg.error(): raise KafkaException("kafka consumer error: {}".format( msg.error())) logging.debug( "Potential message: %r", (msg.partition(), msg.key(), msg.headers(), msg.value())) if predicate(msg): res.append(msg) # TODO: consumer.close() return res def assert_seen_message(self, resp, delay=0, predicate=lambda x: True): @recorder.assertion_decorator def assert_seen_kafka_message(resp, topics, delay): messages = self.get_watched_messages(delay, predicate) messages = [(m.topic(), m.key(), m.value(), m.headers()) for m in messages] if not messages: raise AssertionError("No messages on Kafka topic %r" % topics) else: logging.info("Validated the messages have appeared: %s", messages) return messages return assert_seen_kafka_message(resp, self.watching_topics, delay) def put(self, topic, data=None, json=None, headers=None): # TODO: parse key out of URL if topic.startswith('/'): topic = topic[1:] if data is None and json is not None: data = json_lib.dumps(json) with apiritif.transaction('kafka://[' + self.address + ']/' + topic): logging.info("Sending message to Kafka topic %r: %r", topic, data) self.producer.produce( topic, data, headers=[] if headers is None else headers) self.producer.poll(0) self.producer.flush() wrapped_req = self._make_request( 'PUT', 'kafka://' + self.address.split(',')[0] + '/' + topic, data) wrapped_response = self._make_response(wrapped_req) recorder.record_http_request('PUT', self.address, wrapped_req, wrapped_response, _context.session) return wrapped_response def _make_request(self, method, url, request): req = requests.Request(method, url=url, data=request) prepared = req.prepare() _context.grpc_mapping[id(request)] = prepared return prepared def _make_response(self, wrapped_req): resp = requests.Response() resp.status_code = 202 resp.request = wrapped_req resp._request = wrapped_req resp.msg = 'Accepted' resp.raw = io.BytesIO() return resp
def main(): parser = init_parser() args = parser.parse_args() config_data = parse_config_data(args=args, parser=parser) positive_areas = parse_areas(config_data, "positive_areas") negative_areas = parse_areas(config_data, "negative_areas") detection_types = parse_detection_types(config_data) if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = None if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION output_topic_name = f"{args.prefix}.cam.0.filtered_dets.Image.jpg" # handle full screen window_name = "DEMO: Filtered detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( broker=args.broker, groupid="detection", topics_infos=[ TopicInfo( f"{args.prefix}.cam.0.original.Image.jpg"), # image_topic TopicInfo( f"{args.prefix}.cam.0.filtered_dets.ObjectDetectionRecord.json" ), # filtered_detection_topic TopicInfo(f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" ) # detection_topic ], latency_ms=100, group_by_time=True, begin_flag=begin_flag, end_flag=end_flag) i = 0 scaling = 1.0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): frame_info = v[args.prefix]["0"] img = frame_info["image"] if type(img) == np.ndarray: # Set the image scale shape_orig = frame_info["head_detection"].pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in frame_info["head_detection"]: img = draw_bounding_box( object_detection_record=frame_info["head_detection"] [head_detection]["bounding_box"], detection_types=detection_types, img=img, scaling=scaling, color=COLOR_GREY) for head_detection in frame_info["filtered_head_detection"]: img = draw_bounding_box(object_detection_record=frame_info[ "filtered_head_detection"][head_detection] ["filtered_bounding_box"], detection_types=detection_types, img=img, scaling=scaling, color=COLOR_ORANGE) draw_areas(areas=positive_areas, img=img, color=COLOR_GREEN) draw_areas(areas=negative_areas, img=img, color=COLOR_RED) draw_ultinous_logo(canvas=img, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) else: break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detections and tracks. Displays the result on screen ('-d') or stores result in kafka ('-o'). Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.tracks.TrackChangeRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" track_topic = f"{args.prefix}.cam.0.tracks.TrackChangeRecord.json" output_topic_name = f"{args.prefix}.cam.0.tracks.Image.jpg" # handle full screen window_name = "DEMO: Head detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(track_topic, drop=False), TopicInfo(detection_topic) ], 100, None, True) i = 0 tracks: DefaultDict[Any, Track] = defaultdict(lambda: Track(next(colors))) for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): for track_key, track_val in v[args.prefix]["0"]["track"].items(): if track_val["end_of_track"]: if track_key in tracks: del tracks[track_key] continue point = track_val["point"]["x"], track_val["point"]["y"] tracks[track_key].add_point(point) img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( img, object_detection_record["bounding_box"], (10, 95, 255)) for t_key, t in tracks.items(): cv2.polylines(img=img, pts=[np.array(t.points, np.int32)], isClosed=False, color=t.color, thickness=3) # draw ultinous logo img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
print("Message delivered to {} [{}]".format(msg.topic(), msg.partition())) if __name__ == "__main__": # async producer # default serializers -> BYTES p = Producer({ "bootstrap.servers": BOOTSTRAP_SERVERS, }) @atexit.register def finisher(): # Wait for any outstanding messages to be delivered and delivery report # callbacks to be triggered. p.flush(5) while True: # Trigger any available delivery report callbacks from previous produce() calls p.poll(0) # Asynchronously produce a message, the delivery report callback # will be triggered from poll() above, or flush() below, when the message has # been successfully delivered or failed permanently. value = generate_data_json().encode() key = KEY if KEY.strip() != "" else None print("Producing message: KEY: {!r} | VALUE: {!r}".format(key, value)) p.produce(TOPIC, value, key, callback=delivery_report) # wait specified interval time.sleep(INTERVAL)
def main(): parser = argparse.ArgumentParser( epilog="""Description: Reidentification demo using any number of cameras: Either camera can be used for registration or reidentification only, or for both. Plays a video from a jpeg topic, visualizes head detection with a gray bounding box around a head. When a detection is identified, changes the bounding box color to orange and writes the dwell time, age and ID (derived from the reid MS ID) above the heads. Displays ('-d') or stores ('-o') the result of this demo in kafka topics. Required topics (example): - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.frameinfo.FrameInfoRecord.json - <prefix>.cam.0.ages.AgeRecord.json - <prefix>.cam.1.original.Image.jpg - <prefix>.cam.1.dets.ObjectDetectionRecord.json - <prefix>.cam.1.frameinfo.FrameInfoRecord.json - <prefix>.cam.1.ages.AgeRecord.json ... - <prefix>.cam.1.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') parser.add_argument('text', help='Text to display (age|dwell_time|both).', type=str) args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) # Prepare the topics to read input_topics = [ f"{args.prefix}.cam.{id}.{topic_postfix}" for id in CAMERA_TOPIC_IDS for topic_postfix in TOPIC_POSTFIXES ] reid_topics = [ f"{args.prefix}.cam.{id}.{topic_postfix}" for id in REID_TOPIC_IDS for topic_postfix in REID_TOPIC_POSTFIXES ] consumable_topics = list(map(TopicInfo, input_topics)) \ + (list(map(lambda t: TopicInfo(t, drop=False), reid_topics))) # TODO (when names via person stream): Remove this consumer reg_consumer = Consumer({ 'bootstrap.servers': args.broker, 'group.id': 'multicamreid_reg', 'auto.offset.reset': 'earliest' }) reg_consumer.assign( [TopicPartition(topic="named.records.json", partition=0, offset=0)]) output_topics = dict((id, f"{args.prefix}.cam.{id}.{OUTPUT_TOPIC_POSTFIX}") for id in CAMERA_TOPIC_IDS) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(broker=args.broker, groupid="detection", topics_infos=consumable_topics, latency_ms=200, commit_interval_sec=None, group_by_time=True) registrations: Dict[str, Registration] = {} i = 0 inner_id = 0 scaling = 1.0 for msgs in consumer.getMessages(): k = -1 for time, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) # Collect Reid records reid_records = {} for reid_id in REID_TOPIC_IDS: reid_message = message.get(reid_id, {}) reid_records.update(reid_message.get("reid", {})) # Process the image for topic_key, topic_message in filter( lambda t: t[0] not in REID_TOPIC_IDS, message.items()): img = topic_message.get("image", {}) if not isinstance(img, np.ndarray): continue head_detections = topic_message.get("head_detection", {}) # Set the image scale shape_orig = head_detections.pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # Processing the detections of the image for detection_key, detection_record in head_detections.items(): object_detection_record = detection_record.get( "bounding_box", {}) if not object_detection_record: continue key_to_display = "" color = COLOR_DARK_GREY face_detection = detection_record.get("unknown", {}) if face_detection: color = COLOR_LIGHT_GREY age = None age_detection_record = detection_record.get("age", {}) if age_detection_record: age = age_detection_record["age"] if args.text == "age" or args.text == "both": key_to_display = f"Age: {age}" if age else "" # Reidentification received for the detection reid_records_for_det = reid_records.get(detection_key, {}) if reid_records_for_det: for reid_record in filter(lambda r: "reid_event" in r, reid_records_for_det): # We only use the first [0] identified face now reid_key = reid_record["reid_event"]["match_list"][ 0]["id"]["first_detection_key"] registered = registrations.get(reid_key, None) if registered: age_to_display = "" if age: registered.addAge(age) if args.text == "age" or args.text == "both": age_to_display = f"; Age: {registered.age:d}" if age else "" # Calculate the dwell time if required dwell_time_display = "" if args.text == "dwell_time" or args.text == "both": detection_time = reid_record["reid_event"][ "match_list"][0]["id"][ "first_detection_time"] dwell_time = time - int(detection_time) dwell_time_display = f"; Dwell time: {dwell_time}ms" color = COLOR_ORANGE name_to_display = registered.name if registered.name else f"ID: {registered.id}" key_to_display = f"{name_to_display}{age_to_display}{dwell_time_display}" else: inner_id += 1 registrations[reid_key] = Registration( id=inner_id) if age: registrations[reid_key].addAge(age) # Update the technical naming topic # TODO (when names via person stream): remove producer.produce( "detected.records.json", key=str(reid_key).encode("utf-8"), value=(str(inner_id) + ";").encode("utf-8"), timestamp=time) # Read the technical naming topic # TODO (when names via person stream): remove reg_msg = reg_consumer.poll(0.01) if reg_msg is not None: try: key = reg_msg.key().decode("utf-8") name = reg_msg.value().decode("utf-8") # Update the person name reg_to_update = registrations.get(key) if reg_to_update: reg_to_update.addName(name) else: registrations[key] = Registration(name=name) except: print( "Decoding entry of the named.records topic failed.", flush=True) # draw text above bounding box img = draw_nice_text( canvas=img, text=key_to_display, bounding_box=object_detection_record["bounding_box"], color=color, scale=scaling) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: out_topic = output_topics.get(topic_key) producer.produce(out_topic, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 1000 == 0: producer.flush() i += 1 # display # if args.display: cv2.imshow(f"DEMO Camera {topic_key}", img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays video from a jpeg topic, and visualize the head pose records with 3 diff color lines. [see: The yaw, pitch, and roll angles in the human head motion] Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.poses.HeadPose3DRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" pose_topic = f"{args.prefix}.cam.0.poses.HeadPose3DRecord.json" output_topic_name = f"{args.prefix}.cam.0.head_pose.Image.jpg" # handle full screen window_name = "DEMO: Head pose" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(pose_topic), ], 100, None, True) i = 0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": pose_record = v[args.prefix]["0"]["head_detection"][ head_detection]["head_pose"] if "pose" in pose_record: img = draw_head_pose( img, pose_record["pose"], object_detection_record["bounding_box"], ) # draw ultinous logo img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")