def frame_callback(frame: VideoFrame):
    with frame.data() as mat:
        for roi in frame.regions():
            labels = []
            rect = roi.rect()
            for tensor in roi.tensors():
                data = tensor.data()
                if "align_fc3" == tensor.layer_name():
                    lm_color = (255, 0, 0)
                    for i in range(0, len(data), 2):
                        x = int(rect.x + rect.w * data[i])
                        y = int(rect.y + rect.h * data[i + 1])
                        cv2.circle(mat, (x, y), int(1 + 0.02 * rect.w),
                                   lm_color, -1)
                if "prob" == tensor.layer_name():
                    if data[1] > 0.5:
                        labels.append("M")
                    else:
                        labels.append("F")
                if "age_conv3" == tensor.layer_name():
                    labels.append(str(int(data[0] * 100)))
                if "prob_emotion" == tensor.layer_name():
                    emotions = ["neutral", "happy", "sad", "surprise", "anger"]
                    index = numpy.argmax(data)
                    labels.append(emotions[index])

            if labels:
                label = " ".join(labels)
                cv2.putText(mat, label, (rect.x, rect.y + rect.h + 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
Пример #2
0
def frame_callback(frame: VideoFrame):
    with frame.data() as mat:
        for roi in frame.regions():
            labels = []
            for tensor in roi:
                data = tensor.data()
                if "landmarks" in tensor.model_name():
                    lm_color = (255, 0, 0)
                    for i in range(0, len(data), 2):
                        x = int(roi.meta().x + roi.meta().w * data[i])
                        y = int(roi.meta().y + roi.meta().h * data[i + 1])
                        cv2.circle(mat, (x, y), int(1 + 0.02 * roi.meta().w), lm_color, -1)
                if "gender" in tensor.model_name() and "prob" in tensor.layer_name():
                    if data[1] > 0.5:
                        labels.append("M")
                    else:
                        labels.append("F")
                elif "age" in tensor.layer_name():
                    labels.append(str(int(data[0] * 100)))
                elif "EmoNet" in tensor.model_name():
                    emotions = ["neutral", "happy", "sad", "surprise", "anger"]
                    index = numpy.argmax(data)
                    labels.append(emotions[index])

            if labels:
                label = " ".join(labels)
                cv2.putText(mat, label, (roi.meta().x, roi.meta().y + roi.meta().h + 30), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2)
Пример #3
0
def draw_conf(frame: VideoFrame):
    with frame.data() as img:
        for roi in frame.regions():
            rect = roi.rect()
            conf = roi.confidence()
            if rect:
                cv2.putText(img, f'{conf:.2f}', (rect.x, rect.y + rect.h + 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
Пример #4
0
    def process_frame(self, frame: VideoFrame) -> bool:
        timestamp = int(round(time.time()*1000))
        events = []

        new_counts = Counter()

        for detection in frame.regions():
            new_counts[detection.meta().get_roi_type()] += 1

        for key, count in new_counts.items():
            if key in self.item_count:
                if count > self.item_count[key]:
                    for x in range(0, count-self.item_count[key]):
                        events.append({'event_time': timestamp,
                                       'roi_action': 'ENTER',
                                       'object_id': key})
                elif count < self.item_count[key]:
                    for x in range(0, self.item_count[key]-count):
                        events.append({'event_time': timestamp,
                                       'roi_action': 'DEPART',
                                       'object_id': key})
            else:
                for x in range(0, count):
                    events.append({'event_time': timestamp,
                                   'roi_action': 'ENTER',
                                   'object_id': key})
        for key, count in self.item_count.items():
            if key not in new_counts:
                for x in range(0, count):
                    events.append({'event_time': timestamp,
                                   'roi_action': 'DEPART',
                                   'object_id': key})

        if events:
            frame.add_message(json.dumps(events))

        self.item_count = new_counts

        if self.item_count['bottle'] <= 1:
            frame.add_region(0, 0, 0, 0, 0, region_tensor=REGION_TENSOR)
        else:
            frame.add_region(0, 0, 0, 0, 0, region_tensor=VideoFrame.create_labels_structure(
                ["Bottle Count: " + str(self.item_count['bottle'])]))

        if (self.item_count['person'] > 0) and self.redact_person:
            with frame.data() as contents:
                contents[:] = 0
            self.remove_regions(frame)
            frame.add_region(0, 0, 0, 0, 1, region_tensor=REGION_TENSOR)

        return True
Пример #5
0
def draw_ts(frame: VideoFrame) -> bool:
    font = cv2.FONT_HERSHEY_SIMPLEX
    msgs = frame.messages()
    for m in msgs:
        data = json.loads(m)
        if start_ts is not None:
            data["ts"] = int(start_ts) + int(data["timestamp"] / 1000000000)
        else:
            data["ts"] = int(time.time())
        frame_ts = data["ts"]
        frame.remove_message(m)
        frame.add_message(json.dumps(data))
    with frame.data() as mat:
        cv2.putText(mat, str(frame_ts), (20, 20), font, 1, (255, 255, 255), 2)
    return True
    def process_frame(self, frame: VideoFrame) -> bool:

        msgs = frame.messages()

        for m in msgs:
            data = json.loads(m)
            if self.ts is not None:
                ts = int(ciso8601.parse_datetime(self.ts).timestamp())
                data["ts"] = int(ts) + int(data["timestamp"] / 1000000000)
            else:
                data["ts"] = int(time.time())

            self.fs = data["ts"]
            frame.remove_message(m)
            frame.add_message(json.dumps(data))
            # print(data)
        with frame.data() as mat:
            cv2.putText(mat, str(self.fs), (20, 20), font, 0.5,
                        (255, 255, 255), 2)
        return True
Пример #7
0
def frame_callback(frame: VideoFrame):
    color = (255, 0, 0)
    with frame.data() as mat:
        poses = process(frame)
        x_values = poses[0]
        y_values = poses[1]
        for person in range(len(x_values)):
            for i in range(len(x_values[0])):
                cv2.circle(
                    mat, (int(x_values[person][i]), int(y_values[person][i])),
                    3, color, -1)
            for j in range(len(mapping)):
                start_idx = mapping[j][0]
                end_idx = mapping[j][1]
                cv2.line(mat, (int(x_values[person][start_idx]),
                               int(y_values[person][start_idx])),
                         (int(x_values[person][end_idx]),
                          int(y_values[person][end_idx])), color, 3)

    return True
Пример #8
0
def process_frame(frame: VideoFrame,
                  threshold: float = DETECT_THRESHOLD) -> bool:
    global input_height, input_width, prob_threshold, iou_threshold
    width = frame.video_info().width
    height = frame.video_info().height
    objects = list()
    for tensor in frame.tensors():
        dims = tensor.dims()
        data = tensor.data()
        layer_params = YoloParams(dims[2])
        data = data.reshape(dims[0], dims[1], dims[2], dims[3])
        objects += parse_yolo_region(data, (input_height, input_width),
                                     (height, width),
                                     layer_params,
                                     prob_threshold,
                                     is_proportional=False)
    objects = filter_objects(objects, iou_threshold, prob_threshold)
    with frame.data() as frame:
        for obj in objects:
            # Validation bbox of detected object
            obj['xmax'] = min(obj['xmax'], width)
            obj['ymax'] = min(obj['ymax'], height)
            obj['xmin'] = max(obj['xmin'], 0)
            obj['ymin'] = max(obj['ymin'], 0)
            color = (min(obj['class_id'] * 12.5,
                         255), min(obj['class_id'] * 7,
                                   255), min(obj['class_id'] * 5, 255))
            det_label = labels_map[obj['class_id']] if labels_map and len(
                labels_map) >= obj['class_id'] else str(obj['class_id'])
            cv2.rectangle(frame, (obj['xmin'], obj['ymin']),
                          (obj['xmax'], obj['ymax']), color, 2)
            cv2.putText(
                frame, "#" + det_label + ' ' +
                str(round(obj['confidence'] * 100, 1)) + ' %',
                (obj['xmin'], obj['ymin'] - 7), cv2.FONT_HERSHEY_COMPLEX, 0.6,
                color, 1)
    return True