def process_frame(frame: VideoFrame, threshold: float = DETECT_THRESHOLD) -> bool: width = frame.video_info().width height = frame.video_info().height for tensor in frame.tensors(): dims = tensor.dims() data = tensor.data() object_size = dims[-1] for i in range(dims[-2]): image_id = data[i * object_size + 0] confidence = data[i * object_size + 2] x_min = int(data[i * object_size + 3] * width + 0.5) y_min = int(data[i * object_size + 4] * height + 0.5) x_max = int(data[i * object_size + 5] * width + 0.5) y_max = int(data[i * object_size + 6] * height + 0.5) if image_id != 0: break if confidence < threshold: continue frame.add_region(x_min, y_min, x_max - x_min, y_max - y_min, 1, region_tensor=REGION_TENSOR) return True
def process_frame(self, frame: VideoFrame, _: float = DETECT_THRESHOLD) -> bool: while self.json_objects: metadata_pts = self.json_objects[0][ "timestamp"] + self.offset_timestamp timestamp_difference = abs(frame.video_meta().buffer.pts - metadata_pts) # A margin of error of 1000 nanoseconds # If the difference is greater than the margin of error: # If frame has a higher pts then the timestamp at the head of the list, # pop the head of the list for being outdated # If frame has a lower pts then the timestamp at the head of the list, # its still possible for the timestamp to come up, so break # Otherwise, assume this timestamp at the head of the list is accurate to that frame if timestamp_difference > 1000: if (frame.video_meta().buffer.pts - metadata_pts) > 0: self.json_objects.pop(0) continue break detected_objects = self.json_objects[0]["objects"] for indv_object in detected_objects: frame.add_region( indv_object["detection"]["bounding_box"]["x_min"], indv_object["detection"]["bounding_box"]["y_min"], indv_object["detection"]["bounding_box"]["x_max"] - \ indv_object["detection"]["bounding_box"]["x_min"], indv_object["detection"]["bounding_box"]["y_max"] - \ indv_object["detection"]["bounding_box"]["y_min"], indv_object["detection"]["label"], indv_object["detection"]["confidence"], True) self.json_objects.pop(0) break return True
def process_frame(self, frame: VideoFrame) -> bool: timestamp = int(round(time.time()*1000)) events = [] new_counts = Counter() for detection in frame.regions(): new_counts[detection.meta().get_roi_type()] += 1 for key, count in new_counts.items(): if key in self.item_count: if count > self.item_count[key]: for x in range(0, count-self.item_count[key]): events.append({'event_time': timestamp, 'roi_action': 'ENTER', 'object_id': key}) elif count < self.item_count[key]: for x in range(0, self.item_count[key]-count): events.append({'event_time': timestamp, 'roi_action': 'DEPART', 'object_id': key}) else: for x in range(0, count): events.append({'event_time': timestamp, 'roi_action': 'ENTER', 'object_id': key}) for key, count in self.item_count.items(): if key not in new_counts: for x in range(0, count): events.append({'event_time': timestamp, 'roi_action': 'DEPART', 'object_id': key}) if events: frame.add_message(json.dumps(events)) self.item_count = new_counts if self.item_count['bottle'] <= 1: frame.add_region(0, 0, 0, 0, 0, region_tensor=REGION_TENSOR) else: frame.add_region(0, 0, 0, 0, 0, region_tensor=VideoFrame.create_labels_structure( ["Bottle Count: " + str(self.item_count['bottle'])])) if (self.item_count['person'] > 0) and self.redact_person: with frame.data() as contents: contents[:] = 0 self.remove_regions(frame) frame.add_region(0, 0, 0, 0, 1, region_tensor=REGION_TENSOR) return True