Esempio n. 1
0
def run():
    d2d = Detector2D(properties={
        "pupil_size_max": 180,
        "pupil_size_min": 10,
    })
    d3d = Detector3D(d2d.get_properties())
    p3d = Pye3D()
    p3d.settings["focal_length"] = 283.0

    angles3d = []
    anglespye3d = []
    for i, (frame, ts, gaze_vec) in enumerate(iterate_frame_data()):

        result3d = d3d.detect(frame, ts)

        result2d = d2d.detect(frame)
        result2d["timestamp"] = ts
        resultpye3d = p3d.update_and_detect(result2d, frame)
        if i < 100:
            continue

        if result3d["confidence"] > 0.9:
            angles3d.append(
                180 / np.pi *
                angle_between(gaze_vec, result3d["circle_3d"]["normal"]))
        if resultpye3d["confidence"] > 0.9:
            anglespye3d.append(
                180 / np.pi *
                angle_between(gaze_vec, resultpye3d["circle_3d"]["normal"]))
    return {
        "median(angle_error(old3d))": np.median(angles3d),
        "median(angle_error(pye3d))": np.median(anglespye3d),
    }
Esempio n. 2
0
def run():

    d2d = Detector2D()
    d3d = Pye3D()

    times = []

    for vid in get_videos():
        for frame_n, frame in enumerate(iterate_frames(vid)):
            t1 = time.perf_counter()

            result_2d = d2d.detect(frame)
            result_2d["timestamp"] = t1
            result_pye3d = d3d.update_and_detect(result_2d, frame)

            t2 = time.perf_counter()
            times.append(t2 - t1)

    if not times:
        raise RuntimeWarning(
            "Could not find any eye videos in ./data/!"
            "Place videos there to profile them!"
        )

    times = np.array(times)
    return {
        "q0.05": np.quantile(times, 0.05),
        "q0.5": np.quantile(times, 0.5),
        "q0.95": np.quantile(times, 0.95),
    }
Esempio n. 3
0
 def __init__(
     self,
     g_pool=None,
     properties=None,
     detector_2d: Detector2D = None,
 ):
     super().__init__(g_pool=g_pool)
     self.detector_2d = detector_2d or Detector2D(properties or {})
Esempio n. 4
0
 def __init__(self,
              g_pool=None,
              namespaced_properties=None,
              detector_2d: Detector2D = None):
     super().__init__(g_pool=g_pool)
     self.detector_2d = detector_2d or Detector2D(namespaced_properties
                                                  or {})
     self.proxy = PropertyProxy(self.detector_2d)
Esempio n. 5
0
def main(eye_video_path):
    # create 2D detector
    detector_2d = Detector2D()
    # create pye3D detector
    camera = CameraModel(focal_length=561.5, resolution=[400, 400])
    detector_3d = Detector3D(camera=camera, long_term_mode=DetectorMode.blocking)
    # load eye video
    eye_video = cv2.VideoCapture(eye_video_path)
    # read each frame of video and run pupil detectors
    while eye_video.isOpened():
        frame_number = eye_video.get(cv2.CAP_PROP_POS_FRAMES)
        fps = eye_video.get(cv2.CAP_PROP_FPS)
        ret, eye_frame = eye_video.read()
        if ret:
            # read video frame as numpy array
            grayscale_array = cv2.cvtColor(eye_frame, cv2.COLOR_BGR2GRAY)
            # run 2D detector on video frame
            result_2d = detector_2d.detect(grayscale_array)
            result_2d["timestamp"] = frame_number / fps
            # pass 2D detection result to 3D detector
            result_3d = detector_3d.update_and_detect(result_2d, grayscale_array)
            ellipse_3d = result_3d["ellipse"]
            # draw 3D detection result on eye frame
            cv2.ellipse(
                eye_frame,
                tuple(int(v) for v in ellipse_3d["center"]),
                tuple(int(v / 2) for v in ellipse_3d["axes"]),
                ellipse_3d["angle"],
                0,
                360,  # start/end angle for drawing
                (0, 255, 0),  # color (BGR): red
            )
            # show frame
            cv2.imshow("eye_frame", eye_frame)
            # press esc to exit
            if cv2.waitKey(1) & 0xFF == 27:
                break
        else:
            break
    eye_video.release()
    cv2.destroyAllWindows()
Esempio n. 6
0
        while not self._stop:
            self._data = receiver.recv_jpg()
            self._data_ready.set()
        receiver.close()

    def close(self):
        self._stop = True


if __name__ == "__main__":
    # Receive from broadcast
    #hostname = "127.0.0.1"  # Use to receive from localhost
    hostname = "192.168.0.137"  # Use to receive from other computer
    port = 5555
    receiver = VideoStreamSubscriber(hostname, port)
    detector = Detector2D()
    try:
        start_time = time.time()
        x = 1
        counter = 0
        font = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (0, 30)
        fontScale = 1
        fontColor = (255, 255, 255)
        lineType = 1
        fps = 0
        while True:
            # Due to the IO thread constantly fetching images, we can do any amount
            # of processing here and the next call to receive() will still give us
            # the most recent frame (more or less realtime behaviour)