def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detections and tracks, and pass detections. Displays the result on screen ('-d') or stores result in kafka ('-o'). Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.tracks.TrackChangeRecord.json - <prefix>.cam.0.passdet.PassDetectionRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument("config", help="Path to service config.", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() passdet_config_json = parse_config_data(args=args, parser=parser) if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) passlines: Dict[str, PassLine] = { pl["id"]: PassLine(next(pass_colors), [(int(p["x"]), int(p["y"])) for p in pl["poly"]]) for pl in passdet_config_json["passLines"] } image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" track_topic = f"{args.prefix}.cam.0.tracks.TrackChangeRecord.json" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" passdet_topic = f"{args.prefix}.cam.0.passdet.PassDetectionRecord.json" output_topic_name = f"{args.prefix}.cam.0.passdet.Image.jpg" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Pass detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(track_topic, drop=False), TopicInfo(passdet_topic, drop=False), TopicInfo(detection_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None tracks: DefaultDict[Any, ColoredPolyLine] = defaultdict( lambda: ColoredPolyLine(next(track_colors))) for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): for track_key, track_val in v[ args.prefix]["0"]["track"].items(): if track_val["end_of_track"]: if track_key in tracks: del tracks[track_key] continue point = track_val["point"]["x"], track_val["point"]["y"] tracks[track_key].add_point(point) for pass_det in v[args.prefix]["0"]["passdet"].values(): if pass_det["type"] == "HEARTBEAT": continue elif pass_det["type"] == "END_OF_TRACK": continue elif pass_det["type"] == "PASS_CANDIDATE": pass_id = pass_det["pass_candidate"]["pass"][ "pass_line_id"] cross_dir = pass_det["pass_candidate"]["pass"][ "cross_dir"] if pass_id in passlines: passlines[pass_id].add_event(cross_dir) elif pass_det["type"] == "PASS_REALIZED": continue img = v[args.prefix]["0"]["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record[ "bounding_box"], color=(10, 95, 255), scaling=scaling) for t in tracks.values(): t.draw(img, scaling) for idx, l in enumerate(passlines.values()): l.draw(img, scaling) cv2.putText(img, "".join(l.events), (40, (idx + 1) * 50), cv2.FONT_HERSHEY_COMPLEX, 2, l.color, 5, bottomLeftOrigin=True) img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = init_parser() args = parser.parse_args() config_data = parse_config_data(args=args, parser=parser) positive_areas = parse_areas(config_data, "positive_areas") negative_areas = parse_areas(config_data, "negative_areas") detection_types = parse_detection_types(config_data) if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 output_topic_name = f"{args.prefix}.cam.0.filtered_dets.Image.jpg" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Filtered detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( broker=args.broker, groupid="detection", topics_infos=[ TopicInfo( f"{args.prefix}.cam.0.original.Image.jpg"), # image_topic TopicInfo( f"{args.prefix}.cam.0.filtered_dets.ObjectDetectionRecord.json" ), # filtered_detection_topic TopicInfo(f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" ) # detection_topic ], latency_ms=100, group_by_time=True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): frame_info = v[args.prefix]["0"] img = frame_info["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = frame_info["head_detection"].pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in frame_info["head_detection"]: img = draw_bounding_box( object_detection_record=frame_info["head_detection"] [head_detection]["bounding_box"], detection_types=detection_types, img=img, scaling=scaling, color=COLOR_GREY) for head_detection in frame_info["filtered_head_detection"]: img = draw_bounding_box(object_detection_record=frame_info[ "filtered_head_detection"][head_detection] ["filtered_bounding_box"], detection_types=detection_types, img=img, scaling=scaling, color=COLOR_ORANGE) draw_areas(areas=positive_areas, img=img, color=COLOR_GREEN) draw_areas(areas=negative_areas, img=img, color=COLOR_RED) draw_ultinous_logo(canvas=img, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) else: break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Reidentification demo using any number of cameras: Either camera can be used for registration or reidentification only, or for both. Plays a video from a jpeg topic, visualizes head detection with a gray bounding box around a head. When a detection is identified, changes the bounding box color to orange and writes the dwell time, age and ID (derived from the reid MS ID) above the heads. Displays ('-d') or stores ('-o') the result of this demo in kafka topics. Required topics (example): - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.frameinfo.FrameInfoRecord.json - <prefix>.cam.0.ages.AgeRecord.json - <prefix>.cam.1.original.Image.jpg - <prefix>.cam.1.dets.ObjectDetectionRecord.json - <prefix>.cam.1.frameinfo.FrameInfoRecord.json - <prefix>.cam.1.ages.AgeRecord.json ... - <prefix>.cam.1.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') parser.add_argument('text', help='Text to display (age|dwell_time|both).', type=str) args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) # Prepare the topics to read input_topics = [ f"{args.prefix}.cam.{id}.{topic_postfix}" for id in CAMERA_TOPIC_IDS for topic_postfix in TOPIC_POSTFIXES ] reid_topics = [ f"{args.prefix}.cam.{REID_TOPIC_ID}.{topic_postfix}" for topic_postfix in REID_TOPIC_POSTFIXES ] consumable_topics = list(map(TopicInfo, input_topics)) \ + (list(map(lambda t: TopicInfo(t, drop=False), reid_topics))) # TODO (when names via person stream): Remove this consumer reg_consumer = Consumer({ 'bootstrap.servers': args.broker, 'group.id': 'multicamreid_reg', 'auto.offset.reset': 'earliest' }) reg_consumer.assign( [TopicPartition(topic="named.records.json", partition=0, offset=0)]) output_topics = dict((id, f"{args.prefix}.cam.{id}.{OUTPUT_TOPIC_POSTFIX}") for id in CAMERA_TOPIC_IDS) # Write notification if no message is received for this long notification_delay_sec = 10 begin_flag = None end_flag = EndFlag.NEVER heartbeat_interval_ms = 1000 # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( broker=args.broker, groupid="detection", topics_infos=consumable_topics, latency_ms=200, commit_interval_sec=None, group_by_time=True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) registrations: Dict[str, Registration] = {} i = 0 inner_id = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None cameras = { "DEMO Camera 0": (last_image_ts, img_dimensions) } # We assume that Camera 0 is always configured for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) # Collect Reid records reid_records = {} reid_message = message.get(REID_TOPIC_ID, {}) reid_records.update(reid_message.get("reid", {})) # Process the image for topic_key, topic_message in filter( lambda t: t[0] != REID_TOPIC_ID, message.items()): img = topic_message.get("image", {}) if not isinstance(img, np.ndarray): continue head_detections = topic_message.get("head_detection", {}) # Update the camera properties for display cameras[f"DEMO Camera {topic_key}"] = (int(time.time()), (img.shape[0], img.shape[1])) # Set the image scale shape_orig = head_detections.pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"][ "columns"] # Processing the detections of the image for detection_key, detection_record in head_detections.items( ): object_detection_record = detection_record.get( "bounding_box", {}) if not object_detection_record: continue key_to_display = "" color = COLOR_DARK_GREY face_detection = detection_record.get("unknown", {}) if face_detection: color = COLOR_LIGHT_GREY age = None age_detection_record = detection_record.get("age", {}) if age_detection_record: age = age_detection_record["age"] if args.text == "age" or args.text == "both": key_to_display = f"Age: {age}" if age else "" # Reidentification received for the detection reid_records_for_det = reid_records.get( detection_key, {}) if reid_records_for_det: for reid_record in filter( lambda r: "reid_event" in r, reid_records_for_det): # We only use the first [0] identified face now reid_key = reid_record["reid_event"][ "match_list"][0]["id"][ "first_detection_key"] registered = registrations.get(reid_key, None) if registered: age_to_display = "" if age: registered.addAge(age) if args.text == "age" or args.text == "both": age_to_display = f"; Age: {registered.age:d}" if age else "" # Calculate the dwell time if required dwell_time_display = "" if args.text == "dwell_time" or args.text == "both": detection_time = reid_record[ "reid_event"]["match_list"][0][ "id"]["first_detection_time"] dwell_time = ts - int(detection_time) dwell_time_display = f"; Dwell time: {dwell_time}ms" color = COLOR_ORANGE name_to_display = registered.name if registered.name else f"ID: {registered.id}" key_to_display = f"{name_to_display}{age_to_display}{dwell_time_display}" else: inner_id += 1 registrations[reid_key] = Registration( id=inner_id) if age: registrations[reid_key].addAge(age) # Update the technical naming topic # TODO (when names via person stream): remove producer.produce( "detected.records.json", key=str(reid_key).encode("utf-8"), value=(str(inner_id) + ";").encode("utf-8"), timestamp=ts) # Read the technical naming topic # TODO (when names via person stream): remove reg_msg = reg_consumer.poll(0.01) if reg_msg is not None: try: key = reg_msg.key().decode("utf-8") name = reg_msg.value().decode("utf-8") # Update the person name reg_to_update = registrations.get(key) if reg_to_update: reg_to_update.addName(name) else: registrations[key] = Registration( name=name) except: print( "Decoding entry of the named.records topic failed.", flush=True) # draw text above bounding box img = draw_nice_text( canvas=img, text=key_to_display, bounding_box=object_detection_record[ "bounding_box"], color=color, scale=scaling) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record[ "bounding_box"], color=color, scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: out_topic = output_topics.get(topic_key) producer.produce(out_topic, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 1000 == 0: producer.flush() i += 1 # display # if args.display: cv2.imshow(f"DEMO Camera {topic_key}", img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display: for camera_name, (last_image_ts, dimension) in cameras.items(): if last_image_ts is None or last_image_ts + notification_delay_sec < int( time.time()): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(camera_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detection with a bounding box around a head. The boundig box is grey when mask detection did not run; it is green when a mask is detected; it is orange and 'NO MASK' is written above the head when no mask is detected. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.masks.FaceMaskRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" mask_topic = f"{args.prefix}.cam.0.masks.FaceMaskRecord.json" output_topic_name = f"{args.prefix}.cam.0.face_mask.Image.jpg" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Face Mask" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(mask_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] != "PERSON_HEAD": continue mask_record = v[args.prefix]["0"]["head_detection"][ head_detection]["face_mask"] mask_text = "" if not mask_record: color = COLOR_DARK_GREY elif mask_record["has_mask"]: color = COLOR_GREEN else: mask_text = "NO MASK" color = COLOR_ORANGE # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # write age and gender img = draw_nice_text( img, mask_text, object_detection_record["bounding_box"], color, scale=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes head detection with an orage bounding box around a head and writes the IDs given by reid MS above the heads. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.99.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" reid_topic = f"{args.prefix}.cam.{REID_TOPIC_ID}.reids.ReidRecord.json" output_topic_name = f"{args.prefix}.cam.0.reidentification.Image.jpg" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = TITLE if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(reid_topic), TopicInfo(frameinfo_topic) ], 500, None, True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 stored_ids = {} scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) img = message["0"].get("image", {}) if type(img) != np.ndarray: continue last_image_ts = int(time.time()) reid_records = message[REID_TOPIC_ID].get("reid", {}) head_detections = message["0"].get("head_detection", {}) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = head_detections.pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # Processing detections for detection_key, detection_record in head_detections.items(): object_detection_record = detection_record["bounding_box"] color = COLOR_GREY reid_records_for_det = reid_records.get(detection_key, ()) for reid_record in filter(lambda r: "reid_event" in r, reid_records_for_det): color = COLOR_ORANGE reid_key = reid_record["reid_event"]["match_list"][0][ "id"]["first_detection_key"] key_to_display = stored_ids.get(reid_key, None) if key_to_display is None: key_to_display = len(stored_ids) + 1 stored_ids[reid_key] = key_to_display # user id img = draw_nice_text( canvas=img, text=str(key_to_display), bounding_box=object_detection_record[ "bounding_box"], color=color, scale=scaling) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")