def draw_bounding_box(object_detection_record, detection_types: List[str], img: np.array, scaling, color) -> np.array: if object_detection_record["type"] in detection_types: img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) img = draw_nice_text( canvas=img, text=str(object_detection_record["detection_confidence"]), bounding_box=object_detection_record["bounding_box"], color=color, scale=scaling) return img
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detection with an orage bounding box around a head and writes demography data (gender & age) data above the heads. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.genders.GenderRecord.json - <prefix>.cam.0.ages.AgeRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" gender_topic = f"{args.prefix}.cam.0.genders.GenderRecord.json" age_topic = f"{args.prefix}.cam.0.ages.AgeRecord.json" output_topic_name = f"{args.prefix}.cam.0.demography.Image.jpg" # handle full screen window_name = "DEMO: Demography (gender & age)" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(gender_topic), TopicInfo(age_topic), ], 100, None, True) i = 0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] age_record = v[args.prefix]["0"]["head_detection"][ head_detection]["age"] gender_record = v[args.prefix]["0"]["head_detection"][ head_detection]["gender"] age = "" if age_record['age'] == {} else age_record['age'] gender = "" if gender_record[ 'gender'] == {} else gender_record['gender'] # draw bounding_box img = draw_nice_bounding_box( img, object_detection_record["bounding_box"], (10, 95, 255)) # write age and gender img = draw_nice_text( img, str(gender) + " " + str(age), object_detection_record["bounding_box"], (10, 95, 255)) # draw ultinous logo img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detections and tracks, and pass detections. Displays the result on screen ('-d') or stores result in kafka ('-o'). Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.tracks.TrackChangeRecord.json - <prefix>.cam.0.passdet.PassDetectionRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument("config", help="Path to service config.", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() passdet_config_json = parse_config_data(args=args, parser=parser) if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) passlines: Dict[str, PassLine] = { pl["id"]: PassLine(next(pass_colors), [(int(p["x"]), int(p["y"])) for p in pl["poly"]]) for pl in passdet_config_json["passLines"] } image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" track_topic = f"{args.prefix}.cam.0.tracks.TrackChangeRecord.json" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" passdet_topic = f"{args.prefix}.cam.0.passdet.PassDetectionRecord.json" output_topic_name = f"{args.prefix}.cam.0.passdet.Image.jpg" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Pass detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(track_topic, drop=False), TopicInfo(passdet_topic, drop=False), TopicInfo(detection_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None tracks: DefaultDict[Any, ColoredPolyLine] = defaultdict( lambda: ColoredPolyLine(next(track_colors))) for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): for track_key, track_val in v[ args.prefix]["0"]["track"].items(): if track_val["end_of_track"]: if track_key in tracks: del tracks[track_key] continue point = track_val["point"]["x"], track_val["point"]["y"] tracks[track_key].add_point(point) for pass_det in v[args.prefix]["0"]["passdet"].values(): if pass_det["type"] == "HEARTBEAT": continue elif pass_det["type"] == "END_OF_TRACK": continue elif pass_det["type"] == "PASS_CANDIDATE": pass_id = pass_det["pass_candidate"]["pass"][ "pass_line_id"] cross_dir = pass_det["pass_candidate"]["pass"][ "cross_dir"] if pass_id in passlines: passlines[pass_id].add_event(cross_dir) elif pass_det["type"] == "PASS_REALIZED": continue img = v[args.prefix]["0"]["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record[ "bounding_box"], color=(10, 95, 255), scaling=scaling) for t in tracks.values(): t.draw(img, scaling) for idx, l in enumerate(passlines.values()): l.draw(img, scaling) cv2.putText(img, "".join(l.events), (40, (idx + 1) * 50), cv2.FONT_HERSHEY_COMPLEX, 2, l.color, 5, bottomLeftOrigin=True) img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Basic Reidentification demo using two cameras: Camera0 for object registration and Camera1 for reidentification. Plays a video from a jpeg topic, visualizes head detection with an orange bounding box around a head and writes the dwell time and ID (derived from the reid MS ID) above the heads. Displays ('-d') or stores ('-o') the result of this demo in kafka topics. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.1.original.Image.jpg - <prefix>.cam.1.dets.ObjectDetectionRecord.json - <prefix>.cam.1.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_reg_topic = f"{args.prefix}.cam.{REG_CAMERA_ID}.original.Image.jpg" image_reid_topic = f"{args.prefix}.cam.{REID_CAMERA_ID}.original.Image.jpg" detection_reg_topic = f"{args.prefix}.cam.{REG_CAMERA_ID}.dets.ObjectDetectionRecord.json" detection_reid_topic = f"{args.prefix}.cam.{REID_CAMERA_ID}.dets.ObjectDetectionRecord.json" reid_topic = f"{args.prefix}.cam.{REID_TOPIC_ID}.reids.ReidRecord.json" output_reg_topic_name = f"{args.prefix}.cam.{REG_CAMERA_ID}.reids.Image.jpg" output_reid_topic_name = f"{args.prefix}.cam.{REID_CAMERA_ID}.reids.Image.jpg" # handle full screen window_name = TITLE if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( broker=args.broker, groupid="detection", topics_infos=[ TopicInfo(image_reg_topic), TopicInfo(image_reid_topic), TopicInfo(detection_reg_topic), TopicInfo(detection_reid_topic), TopicInfo(reid_topic, drop=False), ], latency_ms=500, commit_interval_sec=None, group_by_time=True) registrations: Dict[str, int] = {} i = 0 inner_id = 0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) # Register the recognized faces reid_records = message[REID_TOPIC_ID].get("reid", {}) for reid_key, reid_record in reid_records.items(): record_type = reid_record["type"] # Get the stored registration key registration_key = reid_record["reg_refs"][0]["subject"]["key"] if record_type == "REG" and registration_key not in registrations: inner_id += 1 registrations[registration_key] = inner_id for topic_key, topic_message in message.items(): img = topic_message.get("image", {}) if not isinstance(img, np.ndarray): continue # Process detections head_detections = topic_message.get("head_detection", {}) for detection_key, detection_record in head_detections.items(): object_detection_record = detection_record["bounding_box"] color = COLOR_GREY key_to_display = "" # Reidentification received reid_record = reid_records.get(detection_key) if reid_record and reid_record["type"] == "REID": reid_key = reid_record["reg_refs"][0]["subject"][ "key"] # We only use the first identified face now registered_id = registrations.get(reid_key) if registered_id: color = COLOR_ORANGE dwell_time = time - int(reid_key.split('_')[0]) key_to_display = f"id: {registered_id}; dwell time: {dwell_time}ms" # draw text above bounding box img = draw_nice_text( canvas=img, text=key_to_display, bounding_box=object_detection_record["bounding_box"], color=color) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT) # produce output topic if args.output: out_topic = output_reg_topic_name if topic_key is REG_CAMERA_ID else output_reid_topic_name producer.produce(out_topic, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display # if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Reidentification demo using any number of cameras: Either camera can be used for registration or reidentification only, or for both. Plays a video from a jpeg topic, visualizes head detection with a gray bounding box around a head. When a detection is identified, changes the bounding box color to orange and writes the dwell time, age and ID (derived from the reid MS ID) above the heads. Displays ('-d') or stores ('-o') the result of this demo in kafka topics. Required topics (example): - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.frameinfo.FrameInfoRecord.json - <prefix>.cam.0.ages.AgeRecord.json - <prefix>.cam.1.original.Image.jpg - <prefix>.cam.1.dets.ObjectDetectionRecord.json - <prefix>.cam.1.frameinfo.FrameInfoRecord.json - <prefix>.cam.1.ages.AgeRecord.json ... - <prefix>.cam.1.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') parser.add_argument('text', help='Text to display (age|dwell_time|both).', type=str) args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) # Prepare the topics to read input_topics = [ f"{args.prefix}.cam.{id}.{topic_postfix}" for id in CAMERA_TOPIC_IDS for topic_postfix in TOPIC_POSTFIXES ] reid_topics = [ f"{args.prefix}.cam.{id}.{topic_postfix}" for id in REID_TOPIC_IDS for topic_postfix in REID_TOPIC_POSTFIXES ] consumable_topics = list(map(TopicInfo, input_topics)) \ + (list(map(lambda t: TopicInfo(t, drop=False), reid_topics))) # TODO (when names via person stream): Remove this consumer reg_consumer = Consumer({ 'bootstrap.servers': args.broker, 'group.id': 'multicamreid_reg', 'auto.offset.reset': 'earliest' }) reg_consumer.assign( [TopicPartition(topic="named.records.json", partition=0, offset=0)]) output_topics = dict((id, f"{args.prefix}.cam.{id}.{OUTPUT_TOPIC_POSTFIX}") for id in CAMERA_TOPIC_IDS) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(broker=args.broker, groupid="detection", topics_infos=consumable_topics, latency_ms=200, commit_interval_sec=None, group_by_time=True) registrations: Dict[str, Registration] = {} i = 0 inner_id = 0 scaling = 1.0 for msgs in consumer.getMessages(): k = -1 for time, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) # Collect Reid records reid_records = {} for reid_id in REID_TOPIC_IDS: reid_message = message.get(reid_id, {}) reid_records.update(reid_message.get("reid", {})) # Process the image for topic_key, topic_message in filter( lambda t: t[0] not in REID_TOPIC_IDS, message.items()): img = topic_message.get("image", {}) if not isinstance(img, np.ndarray): continue head_detections = topic_message.get("head_detection", {}) # Set the image scale shape_orig = head_detections.pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # Processing the detections of the image for detection_key, detection_record in head_detections.items(): object_detection_record = detection_record.get( "bounding_box", {}) if not object_detection_record: continue key_to_display = "" color = COLOR_DARK_GREY face_detection = detection_record.get("unknown", {}) if face_detection: color = COLOR_LIGHT_GREY age = None age_detection_record = detection_record.get("age", {}) if age_detection_record: age = age_detection_record["age"] if args.text == "age" or args.text == "both": key_to_display = f"Age: {age}" if age else "" # Reidentification received for the detection reid_records_for_det = reid_records.get(detection_key, {}) if reid_records_for_det: for reid_record in filter(lambda r: "reid_event" in r, reid_records_for_det): # We only use the first [0] identified face now reid_key = reid_record["reid_event"]["match_list"][ 0]["id"]["first_detection_key"] registered = registrations.get(reid_key, None) if registered: age_to_display = "" if age: registered.addAge(age) if args.text == "age" or args.text == "both": age_to_display = f"; Age: {registered.age:d}" if age else "" # Calculate the dwell time if required dwell_time_display = "" if args.text == "dwell_time" or args.text == "both": detection_time = reid_record["reid_event"][ "match_list"][0]["id"][ "first_detection_time"] dwell_time = time - int(detection_time) dwell_time_display = f"; Dwell time: {dwell_time}ms" color = COLOR_ORANGE name_to_display = registered.name if registered.name else f"ID: {registered.id}" key_to_display = f"{name_to_display}{age_to_display}{dwell_time_display}" else: inner_id += 1 registrations[reid_key] = Registration( id=inner_id) if age: registrations[reid_key].addAge(age) # Update the technical naming topic # TODO (when names via person stream): remove producer.produce( "detected.records.json", key=str(reid_key).encode("utf-8"), value=(str(inner_id) + ";").encode("utf-8"), timestamp=time) # Read the technical naming topic # TODO (when names via person stream): remove reg_msg = reg_consumer.poll(0.01) if reg_msg is not None: try: key = reg_msg.key().decode("utf-8") name = reg_msg.value().decode("utf-8") # Update the person name reg_to_update = registrations.get(key) if reg_to_update: reg_to_update.addName(name) else: registrations[key] = Registration(name=name) except: print( "Decoding entry of the named.records topic failed.", flush=True) # draw text above bounding box img = draw_nice_text( canvas=img, text=key_to_display, bounding_box=object_detection_record["bounding_box"], color=color, scale=scaling) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: out_topic = output_topics.get(topic_key) producer.produce(out_topic, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 1000 == 0: producer.flush() i += 1 # display # if args.display: cv2.imshow(f"DEMO Camera {topic_key}", img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detection with a bounding box around a head. The boundig box is grey when mask detection did not run; it is green when a mask is detected; it is orange and 'NO MASK' is written above the head when no mask is detected. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.masks.FaceMaskRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = EndFlag.NEVER if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION heartbeat_interval_ms = 1000 overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" mask_topic = f"{args.prefix}.cam.0.masks.FaceMaskRecord.json" output_topic_name = f"{args.prefix}.cam.0.face_mask.Image.jpg" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" # Write notification if no message is received for this long notification_delay_sec = 10 # handle full screen window_name = "DEMO: Face Mask" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(mask_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag, heartbeat_interval_ms=heartbeat_interval_ms) i = 0 scaling = 1.0 img_dimensions = (768, 1024) last_image_ts = None for msgs in consumer.getMessages(): if not isinstance(msgs, HeartBeat): for ts, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) != np.ndarray: continue last_image_ts = int(time.time()) # Set the image scale img_dimensions = (img.shape[0], img.shape[1]) shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] != "PERSON_HEAD": continue mask_record = v[args.prefix]["0"]["head_detection"][ head_detection]["face_mask"] mask_text = "" if not mask_record: color = COLOR_DARK_GREY elif mask_record["has_mask"]: color = COLOR_GREEN else: mask_text = "NO MASK" color = COLOR_ORANGE # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # write age and gender img = draw_nice_text( img, mask_text, object_detection_record["bounding_box"], color, scale=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=ts) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) # Write notification until the first message is received # (output topic is not updated to ensure kafka timestamp consistency) elif args.display and ( last_image_ts is None or last_image_ts + notification_delay_sec < int(time.time())): img = np.zeros((*img_dimensions, 3), np.uint8) text = "Waiting for input Kafka topics to be populated. \n" \ "Please make sure that MGR and other necessary services are running." img = draw_simple_text(canvas=img, text=text, color=(10, 95, 255)) cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detections and tracks. Displays the result on screen ('-d') or stores result in kafka ('-o'). Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.tracks.TrackChangeRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" track_topic = f"{args.prefix}.cam.0.tracks.TrackChangeRecord.json" output_topic_name = f"{args.prefix}.cam.0.tracks.Image.jpg" # handle full screen window_name = "DEMO: Head detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(track_topic, drop=False), TopicInfo(detection_topic) ], 100, None, True) i = 0 tracks: DefaultDict[Any, Track] = defaultdict(lambda: Track(next(colors))) for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): for track_key, track_val in v[args.prefix]["0"]["track"].items(): if track_val["end_of_track"]: if track_key in tracks: del tracks[track_key] continue point = track_val["point"]["x"], track_val["point"]["y"] tracks[track_key].add_point(point) img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( img, object_detection_record["bounding_box"], (10, 95, 255)) for t_key, t in tracks.items(): cv2.polylines(img=img, pts=[np.array(t.points, np.int32)], isClosed=False, color=t.color, thickness=3) # draw ultinous logo img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes head detection with an orage bounding box around a head and writes the IDs given by reid MS above the heads. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.99.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = None if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" reid_topic = f"{args.prefix}.cam.{REID_TOPIC_ID}.reids.ReidRecord.json" output_topic_name = f"{args.prefix}.cam.0.reidentification.Image.jpg" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" # handle full screen window_name = TITLE if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout( args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(reid_topic), TopicInfo(frameinfo_topic) ], 500, None, True, begin_flag=begin_flag, end_flag=end_flag, ) i = 0 stored_ids = {} scaling = 1.0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): message = v.get(args.prefix, {}) reid_records = message[REID_TOPIC_ID].get("reid", {}) img = message["0"].get("image", {}) if type(img) == np.ndarray: head_detections = message["0"].get("head_detection", {}) # Set the image scale shape_orig = head_detections.pop("image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # Processing detections for detection_key, detection_record in head_detections.items(): object_detection_record = detection_record["bounding_box"] color = COLOR_GREY reid_records_for_det = reid_records.get(detection_key, ()) for reid_record in filter(lambda r: "reid_event" in r, reid_records_for_det): color = COLOR_ORANGE reid_key = reid_record["reid_event"]["match_list"][0][ "id"]["first_detection_key"] key_to_display = stored_ids.get(reid_key, None) if key_to_display is None: key_to_display = len(stored_ids) + 1 stored_ids[reid_key] = key_to_display # user id img = draw_nice_text( canvas=img, text=str(key_to_display), bounding_box=object_detection_record[ "bounding_box"], color=color, scale=scaling) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color, scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes the head detections and tracks, and pass detections. Displays the result on screen ('-d') or stores result in kafka ('-o'). Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.0.tracks.TrackChangeRecord.json - <prefix>.cam.0.passdet.PassDetectionRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument("config", help="Path to service config.", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() config_file = Path(args.config) if not config_file.is_file(): parser.error(f"{args.config} does not exist.") if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) with config_file.open() as f: try: passdet_config_json = json.loads( javaproperties.load(f) ["ultinous.service.kafka.passdet.config"]) except KeyError: parser.error( "Missing property: ultinous.service.kafka.passdet.config") except JSONDecodeError as e: parser.error(f"Error parsing {e}") overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) passlines: Dict[str, PassLine] = { pl["id"]: PassLine(next(pass_colors), [(int(p["x"]), int(p["y"])) for p in pl["poly"]]) for pl in passdet_config_json["passLines"] } image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" track_topic = f"{args.prefix}.cam.0.tracks.TrackChangeRecord.json" passdet_topic = f"{args.prefix}.cam.0.passdet.PassDetectionRecord.json" output_topic_name = f"{args.prefix}.cam.0.passdet.Image.jpg" # handle full screen window_name = "DEMO: Pass detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(track_topic, drop=False), TopicInfo(passdet_topic, drop=False), TopicInfo(detection_topic) ], 100, None, True) i = 0 tracks: DefaultDict[Any, ColoredPolyLine] = defaultdict( lambda: ColoredPolyLine(next(track_colors))) for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): for track_key, track_val in v[args.prefix]["0"]["track"].items(): if track_val["end_of_track"]: if track_key in tracks: del tracks[track_key] continue point = track_val["point"]["x"], track_val["point"]["y"] tracks[track_key].add_point(point) for pass_det in v[args.prefix]["0"]["passdet"].values(): if pass_det["type"] == "HEARTBEAT": continue elif pass_det["type"] == "END_OF_TRACK": continue elif pass_det["type"] == "PASS_CANDIDATE": pass_id = pass_det["pass_candidate"]["pass"][ "pass_line_id"] cross_dir = pass_det["pass_candidate"]["pass"]["cross_dir"] if pass_id in passlines: passlines[pass_id].add_event(cross_dir) elif pass_det["type"] == "PASS_REALIZED": continue img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( img, object_detection_record["bounding_box"], (10, 95, 255)) for t in tracks.values(): t.draw(img) for idx, l in enumerate(passlines.values()): l.draw(img) cv2.putText(img, "".join(l.events), (40, (idx + 1) * 50), cv2.FONT_HERSHEY_COMPLEX, 2, l.color, 5, bottomLeftOrigin=True) img = draw_overlay(img, overlay, Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic and visualizes the head detection with an orange bounding box around a head. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-v', "--video_file", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) begin_flag = None end_flag = None if args.video_file: begin_flag = BeginFlag.BEGINNING end_flag = EndFlag.END_OF_PARTITION overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" frameinfo_topic = f"{args.prefix}.cam.0.frameinfo.FrameInfoRecord.json" output_topic_name = f"{args.prefix}.cam.0.head_detection.Image.jpg" # handle full screen window_name = "DEMO: Head detection" if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(frameinfo_topic) ], 100, None, True, begin_flag=begin_flag, end_flag=end_flag) i = 0 scaling = 1.0 for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: # Set the image scale shape_orig = v[args.prefix]["0"]["head_detection"].pop( "image", {}) if shape_orig: scaling = img.shape[1] / shape_orig["frame_info"]["columns"] # draw bounding_box for head_detection in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][head_detection]["bounding_box"] if object_detection_record["type"] == "PERSON_HEAD": img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record[ "bounding_box"], color=(10, 95, 255), scaling=scaling) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT, scale=scaling) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i = 0 i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop if args.video_file: exit(130) else: break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")
def main(): parser = argparse.ArgumentParser( epilog="""Description: Plays a video from a jpeg topic, visualizes head detection with an orage bounding box around a head and writes the IDs given by reid MS above the heads. Displays ('-d') or stores ('-o') the result of this demo in the kafka topic. Required topics: - <prefix>.cam.0.original.Image.jpg - <prefix>.cam.0.dets.ObjectDetectionRecord.json - <prefix>.cam.99.reids.ReidRecord.json """, formatter_class=argparse.RawTextHelpFormatter) parser.add_argument("broker", help="The name of the kafka broker.", type=str) parser.add_argument("prefix", help="Prefix of topics (base|skeleton).", type=str) parser.add_argument('-f', "--full_screen", action='store_true') parser.add_argument('-d', "--display", action='store_true') parser.add_argument('-o', '--output', help='write output image into kafka topic', action='store_true') args = parser.parse_args() if not args.display and not args.output: parser.error( "Missing argument: -d (display output) or -o (write output to kafka) is needed" ) if args.output: producer = Producer({'bootstrap.servers': args.broker}) overlay = cv2.imread('resources/powered_by_white.png', cv2.IMREAD_UNCHANGED) image_topic = f"{args.prefix}.cam.0.original.Image.jpg" detection_topic = f"{args.prefix}.cam.0.dets.ObjectDetectionRecord.json" reid_topic = f"{args.prefix}.cam.{REID_TOPIC_ID}.reids.ReidRecord.json" output_topic_name = f"{args.prefix}.cam.0.reids.Image.jpg" # handle full screen window_name = TITLE if args.full_screen: cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) cv2.setWindowProperty(window_name, cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN) # read message, draw and display them consumer = TimeOrderedGeneratorWithTimeout(args.broker, "detection", [ TopicInfo(image_topic), TopicInfo(detection_topic), TopicInfo(reid_topic, drop=False), ], 500, None, True) i = 0 stored_ids = {} for msgs in consumer.getMessages(): for time, v in message_list_to_frame_structure(msgs).items(): reid_records = v[args.prefix][REID_TOPIC_ID].get("reid", {}) img = v[args.prefix]["0"]["image"] if type(img) == np.ndarray: for key in v[args.prefix]["0"]["head_detection"]: object_detection_record = v[args.prefix]["0"][ "head_detection"][key]["bounding_box"] color = COLOR_GREY reid_record = reid_records.get(key) if reid_record: color = COLOR_ORANGE reid_key = reid_record["reg_refs"][0]["subject"]["key"] key_to_display = stored_ids.get(reid_key, None) if key_to_display is None: key_to_display = len(stored_ids) + 1 stored_ids[reid_key] = key_to_display # user id img = draw_nice_text( canvas=img, text=str(key_to_display), bounding_box=object_detection_record[ "bounding_box"], color=color) # draw bounding_box img = draw_nice_bounding_box( canvas=img, bounding_box=object_detection_record["bounding_box"], color=color) # draw ultinous logo img = draw_overlay(canvas=img, overlay=overlay, position=Position.BOTTOM_RIGHT) # produce output topic if args.output: producer.produce(output_topic_name, value=encode_image_to_message(img), timestamp=time) producer.poll(0) if i % 100 == 0: producer.flush() i += 1 # display if args.display: cv2.imshow(window_name, img) k = cv2.waitKey(33) if k == 113: # The 'q' key to stop break elif k == -1: # normally -1 returned,so don't print it continue else: print(f"Press 'q' key for EXIT!")