示例#1
0
    def stream_yolov3_personal(self):
        net = ObjectDetection(conf_thresh=0.8, nms_thresh=0.4)
        while True:
            begin = process_time()
            frame = Image.open(requests.get(self.url,
                                            stream=True).raw).convert('RGB')
            image, scale, padding = SingleImage(frame)[0]
            image = torch.unsqueeze(image, 0)
            detections = net.detect(image, scale, padding)
            image_with_detections = net.draw_result(frame,
                                                    detections[0],
                                                    show=False)
            opencvImage = cv2.cvtColor(np.array(image_with_detections),
                                       cv2.COLOR_RGB2BGR)
            end = process_time()
            print(end - begin)
            # show the frame to our screen
            cv2.imshow("Frame", opencvImage)
            key = cv2.waitKey(1) & 0xFF
            # if the 'q' key is pressed, stop the loop
            if key == ord("q"):
                break

        # close all windows
        cv2.destroyAllWindows()
示例#2
0
class PeopleDetection:
    """
		The class recognizes people from the image and using face recognition detects their name
	"""
    def __init__(self):
        self.obj_detect = ObjectDetection()
        self.detect_face = FaceDetection()
        self.reconize_face = FaceRecognition()

    def detect(self, frame):
        """
			Detect people
			:param frame -- numpy image
			:return -- details of each person(name, x, y, confidence, time_when_detected)
		"""

        details = []
        detections = self.obj_detect.detect(frame)

        # Get the objects detected by ObjectDetection
        for i in range(detections.shape[0]):
            confidence = detections[i, 2]
            idx = int(detections[i, 1])

            # If the object is a person and high confidence continue
            if confidence > 0.25 and CLASSES[idx] == "person":

                (h, w) = frame.shape[:2]
                box = detections[i, 3:7] * np.array([w, h, w, h])
                (startX, startY, endX, endY) = box.astype("int")

                # In the person subframe detect faces(mostly should get 1 face)
                sub_frame = frame[startY:endY, startX:endX, :]
                face_locations = self.detect_face.detect(sub_frame)
                area = float((endX - startX) * (endY - startY)) / (h * w)

                # For each face get the person's name
                for face_location in face_locations:
                    if face_location == ():
                        continue

                    (face_start_x, face_start_y, face_end_x,
                     face_end_y) = face_location
                    if face_end_x + startX > endX:
                        continue

                    # Draw the face
                    cv2.rectangle(
                        frame, (face_start_x + startX, face_start_y + startY),
                        (face_end_x + startX, face_end_y + startY),
                        (255, 0, 0), 2)

                    # Using Face recognition get the face and its score
                    name, score = self.reconize_face.face_recognize(
                        sub_frame, face_location)

                    # Important Conditions
                    face_h = face_end_y - face_start_y
                    face_w = face_end_x - face_end_y
                    y = startY - 15 if startY - 15 > 15 else startY + 15
                    start_x = np.floor(face_start_x - (face_w) / 4 + startX)
                    end_x = np.ceil(face_end_x + (face_w) / 4 + startX)
                    start_y = np.floor(face_end_y + 3 * (face_h) / 16 + startY)
                    end_y = np.ceil(face_end_x + 5 * (face_h) / 2 + startY)
                    t_start_x = int(start_x) if start_x > startX else startX
                    t_start_y = int(start_y) if start_y < endY else endY
                    t_end_x = int(end_x) if end_x < endX else endX
                    t_end_y = int(end_y) if end_y < endY else endY

                    cv2.rectangle(frame, (t_start_x, t_start_y),
                                  (t_end_x, t_end_y), (255, 0, 0), 2)
                    # Area covered by the person of the screen
                    details.append({
                        'name':
                        name,
                        'x':
                        int((startX + endX) / 2),
                        'y':
                        int((startY + endY) / 2),
                        'score':
                        score,
                        'area':
                        area,
                        'time':
                        strftime("%Y-%m-%d %H:%M:%S", gmtime())
                    })

                # Color detected person
                sub_frame[:, :, 2] = sub_frame[:, :, 2] + 80
                cv2.circle(frame, ((startX + endX) / 2, (startY + endY) / 2),
                           3, (0, 255, 0), -1)

        # For no person detected, we require this for path planning
        if len(details) == 0:
            details.append({
                'name': "No_person",
                'x': int(480),
                'y': int(270),
                'score': 10.0,
                'area': 0.0,
                'time': strftime("%Y-%m-%d %H:%M:%S", gmtime())
            })

        cv2.imshow('frame', frame)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            sys.exit(0)
        return details
示例#3
0
    print('------------------------')
    print('NUM:', count)

    # 接收图片是否导入成功、帧图像
    success, img = cap.read()

    # 如果读入不到图像就退出
    if not success:
        break

    center_points_current = []  # 储存当前帧的所有目标的中心点坐标

    # 目标检测
    # 将每一帧的图像传给目标检测方法
    # 返回class_ids图像属于哪个分类;scores图像属于某个分类的概率;boxes目标检测的识别框
    class_ids, scores, boxes = od.detect(img)

    # 绘制检测框,boxes中包含每个目标检测框的左上坐标和每个框的宽、高
    for box in boxes:
        (x, y, w, h) = box

        # 获取每一个框的中心点坐标,像素坐标是整数
        cx, cy = int((x + x + w) / 2), int((y + y + h) / 2)

        # 存放每一帧的所有框的中心点坐标
        center_points_current.append((cx, cy))

        # 绘制矩形框。传入帧图像,框的左上和右下坐标,框颜色,框的粗细
        cv2.rectangle(img, (x, y), (x + w, y + h), (255, 0, 0), 2)

    # 显示所有检测框的中心点,pt代表所有中心点坐标