Ejemplo n.º 1
0
def frame_callback(frame: VideoFrame):
    with frame.data() as mat:
        for roi in frame.regions():
            labels = []
            for tensor in roi:
                data = tensor.data()
                if "landmarks" in tensor.model_name():
                    lm_color = (255, 0, 0)
                    for i in range(0, len(data), 2):
                        x = int(roi.meta().x + roi.meta().w * data[i])
                        y = int(roi.meta().y + roi.meta().h * data[i + 1])
                        cv2.circle(mat, (x, y), int(1 + 0.02 * roi.meta().w), lm_color, -1)
                if "gender" in tensor.model_name() and "prob" in tensor.layer_name():
                    if data[1] > 0.5:
                        labels.append("M")
                    else:
                        labels.append("F")
                elif "age" in tensor.layer_name():
                    labels.append(str(int(data[0] * 100)))
                elif "EmoNet" in tensor.model_name():
                    emotions = ["neutral", "happy", "sad", "surprise", "anger"]
                    index = numpy.argmax(data)
                    labels.append(emotions[index])

            if labels:
                label = " ".join(labels)
                cv2.putText(mat, label, (roi.meta().x, roi.meta().y + roi.meta().h + 30), cv2.FONT_HERSHEY_SIMPLEX,
                            1, (0, 0, 255), 2)
def frame_callback(frame: VideoFrame):
    with frame.data() as mat:
        for roi in frame.regions():
            labels = []
            rect = roi.rect()
            for tensor in roi.tensors():
                data = tensor.data()
                if "align_fc3" == tensor.layer_name():
                    lm_color = (255, 0, 0)
                    for i in range(0, len(data), 2):
                        x = int(rect.x + rect.w * data[i])
                        y = int(rect.y + rect.h * data[i + 1])
                        cv2.circle(mat, (x, y), int(1 + 0.02 * rect.w),
                                   lm_color, -1)
                if "prob" == tensor.layer_name():
                    if data[1] > 0.5:
                        labels.append("M")
                    else:
                        labels.append("F")
                if "age_conv3" == tensor.layer_name():
                    labels.append(str(int(data[0] * 100)))
                if "prob_emotion" == tensor.layer_name():
                    emotions = ["neutral", "happy", "sad", "surprise", "anger"]
                    index = numpy.argmax(data)
                    labels.append(emotions[index])

            if labels:
                label = " ".join(labels)
                cv2.putText(mat, label, (rect.x, rect.y + rect.h + 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
def frame_callback(frame: VideoFrame):
    event_list = None
    
    if True:
        for i, detection in enumerate(frame.regions()):
            #print("number of tensors: ", len(detection.tensors()), " label: ", detection.label(), " roi_type: ", detection.meta().get_roi_type())
            # , " object_id : ", detection.object_id()
            for j, tensor in enumerate(detection.tensors()):
                if "detection" in tensor.name():                    
                    # bbbox = (tensor["x_min"], tensor["y_min"], tensor["x_max"], tensor["y_max"])
                    # print(i, detection.meta().get_roi_type(), bbbox, tensor["confidence"])
                    pass
                elif "object_id" in tensor.name():
                    # print(i, tensor["id"])
                    pass
                elif "ocr" in tensor.name():
                    #print(i, "MONOTOSH: ", tensor)
                    pass
                else:
                    print(tensor.name())
        if event_list is not None:
            #event publish logic
            #with frame.data() as mat:
            #    cv2.imwrite("dump.jpg", mat)
            pass
Ejemplo n.º 4
0
def draw_conf(frame: VideoFrame):
    with frame.data() as img:
        for roi in frame.regions():
            rect = roi.rect()
            conf = roi.confidence()
            if rect:
                cv2.putText(img, f'{conf:.2f}', (rect.x, rect.y + rect.h + 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
Ejemplo n.º 5
0
 def log_age(self, frame: VideoFrame) -> bool:
     for roi in frame.regions():
         for tensor in roi.tensors():
             if tensor.name() == 'detection':
                 continue
             layer_name = tensor.layer_name()
             if 'age_conv3' == layer_name:
                 self.log_file.write(tensor.label() + "\n")
                 continue
     return True
Ejemplo n.º 6
0
    def process_frame(self, frame: VideoFrame) -> bool:
        timestamp = int(round(time.time()*1000))
        events = []

        new_counts = Counter()

        for detection in frame.regions():
            new_counts[detection.meta().get_roi_type()] += 1

        for key, count in new_counts.items():
            if key in self.item_count:
                if count > self.item_count[key]:
                    for x in range(0, count-self.item_count[key]):
                        events.append({'event_time': timestamp,
                                       'roi_action': 'ENTER',
                                       'object_id': key})
                elif count < self.item_count[key]:
                    for x in range(0, self.item_count[key]-count):
                        events.append({'event_time': timestamp,
                                       'roi_action': 'DEPART',
                                       'object_id': key})
            else:
                for x in range(0, count):
                    events.append({'event_time': timestamp,
                                   'roi_action': 'ENTER',
                                   'object_id': key})
        for key, count in self.item_count.items():
            if key not in new_counts:
                for x in range(0, count):
                    events.append({'event_time': timestamp,
                                   'roi_action': 'DEPART',
                                   'object_id': key})

        if events:
            frame.add_message(json.dumps(events))

        self.item_count = new_counts

        if self.item_count['bottle'] <= 1:
            frame.add_region(0, 0, 0, 0, 0, region_tensor=REGION_TENSOR)
        else:
            frame.add_region(0, 0, 0, 0, 0, region_tensor=VideoFrame.create_labels_structure(
                ["Bottle Count: " + str(self.item_count['bottle'])]))

        if (self.item_count['person'] > 0) and self.redact_person:
            with frame.data() as contents:
                contents[:] = 0
            self.remove_regions(frame)
            frame.add_region(0, 0, 0, 0, 1, region_tensor=REGION_TENSOR)

        return True
Ejemplo n.º 7
0
def process_frame(frame: VideoFrame) -> bool:
    for roi in frame.regions():
        for tensor in roi:
            if tensor.name() == 'detection':
                continue
            layer_name = tensor.layer_name()
            data = tensor.data()
            if 'age' in layer_name:
                tensor.set_label(str(int(data[0] * 100)))
            if 'gender' in tensor.model_name() and 'prob' in layer_name:
                tensor.set_label(" M " if data[1] > 0.5 else " F ")
            if 'EmoNet' in layer_name:
                emotions = ["neutral", "happy", "sad", "surprise", "anger"]
                tensor.set_label(emotions[data.index(max(data))])

    return True
def process_frame(frame: VideoFrame) -> bool:
    for roi in frame.regions():
        for tensor in roi.tensors():
            if tensor.name() == 'detection':
                continue
            layer_name = tensor.layer_name()
            data = tensor.data()
            if 'age_conv3' == layer_name:
                tensor.set_label(str(int(data[0] * 100)))
                continue
            if 'prob' == layer_name:
                tensor.set_label(" M " if data[1] > 0.5 else " F ")
                continue
            if 'prob_emotion' == layer_name:
                emotions = ["neutral", "happy", "sad", "surprise", "anger"]
                tensor.set_label(emotions[data.index(max(data))])
                continue

    return True
Ejemplo n.º 9
0
 def remove_regions(self, frame: VideoFrame):
     for _ in range(len(frame.regions())):
         frame.pop_region()