Esempio n. 1
0
    def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions):
        if not self.initialized:
            return

        requested_eye_frame_idx = self.eye_world_frame_map[frame.index]
        # 1. do we need a new frame?
        if requested_eye_frame_idx != self.current_eye_frame.index:
            if requested_eye_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_eye_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(requested_eye_frame_idx)

            try:
                self.current_eye_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info("Reached the end of the eye video for eye video {}.".format(self.eyeid))

        # 2. dragging image
        if self.drag_offset is not None:
            x, y = glfwGetCursorPos(glfwGetCurrentContext())
            pos = x * self.hdpi_fac, y * self.hdpi_fac
            pos = normalize(pos, self.g_pool.camera_render_size)
            # Position in img pixels
            pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0]))
            self.pos = int(pos[0] + self.drag_offset[0]), int(pos[1] + self.drag_offset[1])

        # 3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
        video_size = round(self.current_eye_frame.width * scale), round(self.current_eye_frame.height * scale)

        # frame.img.shape[0] is height, frame.img.shape[1] is width of screen
        self.pos = (min(frame.img.shape[1] - video_size[0], max(self.pos[0], 0)),
                    min(frame.img.shape[0] - video_size[1], max(self.pos[1], 0)))


        # 4. vflipping images, converting to greyscale
        eyeimage = self.current_eye_frame.gray
        eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)

        if show_ellipses:
            try:
                pp = next((pp for pp in pupil_positions if pp['id'] == self.eyeid and pp['timestamp'] == self.current_eye_frame.timestamp))
            except StopIteration:
                pass
            else:
                el = pp['ellipse']
                conf = int(pp.get('model_confidence', pp.get('confidence', 0.1)) * 255)
                el_points = getEllipsePts((el['center'], el["axes"], el['angle']))
                cv2.polylines(eyeimage, [np.asarray(el_points,dtype='i')], True, (0, 0, 255, conf), thickness=1)
                cv2.circle(eyeimage,(int(el['center'][0]),int(el['center'][1])), 5, (0, 0, 255, conf), thickness=-1)


        #flip and scale
        eyeimage = cv2.resize(eyeimage, (0, 0), fx=scale, fy=scale)
        if self.hflip:
            eyeimage = np.fliplr(eyeimage)
        if self.vflip:
            eyeimage = np.flipud(eyeimage)

        transparent_image_overlay(self.pos, eyeimage, frame.img, alpha)
Esempio n. 2
0
    def update(self, frame, events):
        requested_eye_frame_idx = self.eye0_world_frame_map[frame.index]

        # do we need a new frame?
        if requested_eye_frame_idx != self._frame.index:
            # do we need to seek?
            if requested_eye_frame_idx == self.cap.get_frame_index() + 1:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                _ = self.cap.get_frame()
            if requested_eye_frame_idx != self.cap.get_frame_index():
                # only now do I need to seek
                self.cap.seek_to_frame(requested_eye_frame_idx)
            # reading the new eye frame frame
            try:
                self._frame = self.cap.get_frame()
            except EndofVideoFileError:
                logger.warning("Reached the end of the eye video.")
        else:
            #our old frame is still valid because we are doing upsampling
            pass

        # drawing the eye overlay
        pad = 10
        pos = frame.width - self.width - pad, pad
        if self.mirror:
            transparent_image_overlay(pos, np.fliplr(self._frame.img),
                                      frame.img, self.alpha)
        else:
            transparent_image_overlay(pos, self._frame.img, frame.img,
                                      self.alpha)
Esempio n. 3
0
    def update(self,frame,events):
        requested_eye_frame_idx = self.eye0_world_frame_map[frame.index]

        # do we need a new frame?
        if requested_eye_frame_idx != self._frame.index:
            # do we need to seek?
            if requested_eye_frame_idx == self.cap.get_frame_index()+1:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                _ = self.cap.get_frame()
            if requested_eye_frame_idx != self.cap.get_frame_index():
               # only now do I need to seek
               self.cap.seek_to_frame(requested_eye_frame_idx)
            # reading the new eye frame frame
            try:
               self._frame = self.cap.get_frame()
            except EndofVideoFileError:
                logger.warning("Reached the end of the eye video.")
        else:
            #our old frame is still valid because we are doing upsampling
            pass

        # drawing the eye overlay
        pad = 10
        pos = frame.width-self.width-pad, pad
        if self.mirror:
            transparent_image_overlay(pos,np.fliplr(self._frame.img),frame.img,self.alpha)
        else:
            transparent_image_overlay(pos,self._frame.img,frame.img,self.alpha)
Esempio n. 4
0
    def update(self,frame,events):
        for eye_index in self.showeyes:
            requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]

            #1. do we need a new frame?
            if requested_eye_frame_idx != self.eye_frames[eye_index].index:
                # do we need to seek?
                if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
                    # if we just need to seek by one frame, its faster to just read one and and throw it away.
                    _ = self.eye_cap[eye_index].get_frame()
                if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
                    # only now do I need to seek
                    self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
                # reading the new eye frame frame
                try:
                    self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
                except EndofVideoFileError:
                    logger.warning("Reached the end of the eye video for eye video %s."%eye_index)
            else:
                #our old frame is still valid because we are doing upsampling
                pass

            #2. dragging image
            if self.drag_offset[eye_index] is not None:
                pos = glfwGetCursorPos(glfwGetCurrentContext())
                pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
                self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
            else:
                self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]

            #3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
            self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
            self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))

            #4. flipping images, converting to greyscale
            eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
            eyeimage = cv2.resize(eye_gray,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor)
            if self.mirror[str(eye_index)]:
                eyeimage = np.fliplr(eyeimage)
            if self.flip[str(eye_index)]:
                eyeimage = np.flipud(eyeimage)

            #5. finally overlay the image
            x,y = int(self.pos[eye_index][0]),int(self.pos[eye_index][1])
            transparent_image_overlay((x,y),cv2.cvtColor(eyeimage,cv2.COLOR_GRAY2BGR),frame.img,self.alpha)
Esempio n. 5
0
    def update(self,frame,events):
        for eye_index in self.showeyes:
            requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]

            #1. do we need a new frame?
            if requested_eye_frame_idx != self.eye_frames[eye_index].index:
                # do we need to seek?
                if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
                    # if we just need to seek by one frame, its faster to just read one and and throw it away.
                    _ = self.eye_cap[eye_index].get_frame()
                if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
                    # only now do I need to seek
                    self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
                # reading the new eye frame frame
                try:
                    self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
                except EndofVideoFileError:
                    logger.warning("Reached the end of the eye video for eye video %s."%eye_index)
            else:
                #our old frame is still valid because we are doing upsampling
                pass

            #2. dragging image
            if self.drag_offset[eye_index] is not None:
                pos = glfwGetCursorPos(glfwGetCurrentContext())
                pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
                self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
            else:
                self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]

            #3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
            self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
            self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))

            #4. flipping images, converting to greyscale
            eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
            eyeimage = cv2.resize(eye_gray,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor) 
            if self.mirror[str(eye_index)]:
                eyeimage = np.fliplr(eyeimage)
            if self.flip[str(eye_index)]:
                eyeimage = np.flipud(eyeimage)

            #5. finally overlay the image
            x,y = int(self.pos[eye_index][0]),int(self.pos[eye_index][1])
            transparent_image_overlay((x,y),cv2.cvtColor(eyeimage,cv2.COLOR_GRAY2BGR),frame.img,self.alpha)
Esempio n. 6
0
    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        for eye_index in self.showeyes:
            requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]

            #1. do we need a new frame?
            if requested_eye_frame_idx != self.eye_frames[eye_index].index:
                # do we need to seek?
                if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
                    # if we just need to seek by one frame, its faster to just read one and and throw it away.
                    _ = self.eye_cap[eye_index].get_frame()
                if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
                    # only now do I need to seek
                    self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
                # reading the new eye frame frame
                try:
                    self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
                except EndofVideoFileError:
                    logger.warning("Reached the end of the eye video for eye video {}.".format(eye_index))
            else:
                #our old frame is still valid because we are doing upsampling
                pass

            #2. dragging image
            if self.drag_offset[eye_index] is not None:
                pos = glfwGetCursorPos(glfwGetCurrentContext())
                pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
                self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
            else:
                self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]

            #3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
            self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
            self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))

            #4. flipping images, converting to greyscale
            eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
            eyeimage = cv2.resize(eye_gray,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor)
            if self.mirror[str(eye_index)]:
                eyeimage = np.fliplr(eyeimage)
            if self.flip[str(eye_index)]:
                eyeimage = np.flipud(eyeimage)

            eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)

            if self.show_ellipses and events['pupil_positions']:
                for pd in events['pupil_positions']:
                    if pd['id'] == eye_index and pd['timestamp'] == self.eye_frames[eye_index].timestamp:
                        el = pd['ellipse']
                        conf = int(pd.get('model_confidence', pd.get('confidence', 0.1)) * 255)
                        center = list(map(lambda val: int(self.eye_scale_factor*val), el['center']))
                        el['axes'] = tuple(map(lambda val: int(self.eye_scale_factor*val/2), el['axes']))
                        el['angle'] = int(el['angle'])
                        el_points = cv2.ellipse2Poly(tuple(center), el['axes'], el['angle'], 0, 360, 1)

                        if self.mirror[str(eye_index)]:
                            el_points = [(self.video_size[0] - x, y) for x, y in el_points]
                            center[0] = self.video_size[0] - center[0]
                        if self.flip[str(eye_index)]:
                            el_points = [(x, self.video_size[1] - y) for x, y in el_points]
                            center[1] = self.video_size[1] - center[1]

                        cv2.polylines(eyeimage, [np.asarray(el_points)], True, (0, 0, 255, conf), thickness=math.ceil(2*self.eye_scale_factor))
                        cv2.circle(eyeimage, tuple(center), int(5*self.eye_scale_factor), (0, 0, 255, conf), thickness=-1)

            # 5. finally overlay the image
            x, y = int(self.pos[eye_index][0]), int(self.pos[eye_index][1])
            transparent_image_overlay((x, y), eyeimage, frame.img, self.alpha)
Esempio n. 7
0
    def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions):
        if not self.initialized:
            return

        requested_eye_frame_idx = self.eye_world_frame_map[frame.index]
        # 1. do we need a new frame?
        if requested_eye_frame_idx != self.current_eye_frame.index:
            if requested_eye_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_eye_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(int(requested_eye_frame_idx))

            try:
                self.current_eye_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info(
                    "Reached the end of the eye video for eye video {}.".
                    format(self.eyeid))

        # 2. dragging image
        if self.drag_offset is not None:
            x, y = glfwGetCursorPos(glfwGetCurrentContext())
            pos = x * self.hdpi_fac, y * self.hdpi_fac
            pos = normalize(pos, self.g_pool.camera_render_size)
            # Position in img pixels
            pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0]))
            self.pos = (
                int(pos[0] + self.drag_offset[0]),
                int(pos[1] + self.drag_offset[1]),
            )

        # 3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
        video_size = (
            round(self.current_eye_frame.width * scale),
            round(self.current_eye_frame.height * scale),
        )

        # frame.img.shape[0] is height, frame.img.shape[1] is width of screen
        self.pos = (
            min(frame.img.shape[1] - video_size[0], max(self.pos[0], 0)),
            min(frame.img.shape[0] - video_size[1], max(self.pos[1], 0)),
        )

        # 4. flipping images, converting to greyscale
        eyeimage = self.current_eye_frame.gray
        eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)

        if show_ellipses:
            try:
                pp = next(
                    (pp for pp in pupil_positions if pp["id"] == self.eyeid
                     and pp["timestamp"] == self.current_eye_frame.timestamp))
            except StopIteration:
                pass
            else:
                draw_pupil_on_image(eyeimage, pp)

        # flip and scale
        eyeimage = cv2.resize(eyeimage, (0, 0), fx=scale, fy=scale)
        if self.hflip:
            eyeimage = np.fliplr(eyeimage)
        if self.vflip:
            eyeimage = np.flipud(eyeimage)

        transparent_image_overlay(self.pos, eyeimage, frame.img, alpha)
Esempio n. 8
0
    def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions):
        if not self.initialized:
            return

        requested_eye_frame_idx = self.eye_world_frame_map[frame.index]
        # 1. do we need a new frame?
        if requested_eye_frame_idx != self.current_eye_frame.index:
            if requested_eye_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_eye_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(requested_eye_frame_idx)

            try:
                self.current_eye_frame = self.source.get_frame()
            except EndofVideoFileError:
                logger.info(
                    "Reached the end of the eye video for eye video {}.".
                    format(self.eyeid))

        # 2. dragging image
        if self.drag_offset is not None:
            pos = glfwGetCursorPos(glfwGetCurrentContext())
            pos = normalize(pos, glfwGetWindowSize(glfwGetCurrentContext()))
            # Position in img pixels
            pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0]))
            self.pos = int(pos[0] +
                           self.drag_offset[0]), int(pos[1] +
                                                     self.drag_offset[1])

        # 3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
        video_size = round(self.current_eye_frame.width * scale), round(
            self.current_eye_frame.height * scale)

        # frame.img.shape[0] is height, frame.img.shape[1] is width of screen
        self.pos = (min(frame.img.shape[1] - video_size[0],
                        max(self.pos[0], 0)),
                    min(frame.img.shape[0] - video_size[1],
                        max(self.pos[1], 0)))

        # 4. vflipping images, converting to greyscale
        eye_gray = self.current_eye_frame.gray
        eyeimage = cv2.resize(eye_gray, (0, 0), fx=scale, fy=scale)
        if self.hflip:
            eyeimage = np.fliplr(eyeimage)
        if self.vflip:
            eyeimage = np.flipud(eyeimage)

        eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)
        if show_ellipses:
            try:
                pp = next(
                    (pp for pp in pupil_positions if pp['id'] == self.eyeid
                     and pp['timestamp'] == self.current_eye_frame.timestamp))
            except StopIteration:
                pass
            else:
                el = pp['ellipse']
                conf = int(
                    pp.get('model_confidence', pp.get('confidence', 0.1)) *
                    255)
                center = list(map(lambda val: int(scale * val), el['center']))
                el['axes'] = tuple(
                    map(lambda val: int(scale * val / 2), el['axes']))
                el['angle'] = int(el['angle'])
                el_points = cv2.ellipse2Poly(tuple(center), el['axes'],
                                             el['angle'], 0, 360, 1)
                if self.hflip:
                    el_points = [(video_size[0] - x, y) for x, y in el_points]
                    center[0] = video_size[0] - center[0]
                if self.vflip:
                    el_points = [(x, video_size[1] - y) for x, y in el_points]
                    center[1] = video_size[1] - center[1]

                cv2.polylines(eyeimage, [np.asarray(el_points)],
                              True, (0, 0, 255, conf),
                              thickness=int(np.ceil(2 * scale)))
                cv2.circle(eyeimage,
                           tuple(center),
                           int(5 * scale), (0, 0, 255, conf),
                           thickness=-1)

        transparent_image_overlay(self.pos, eyeimage, frame.img, alpha)
Esempio n. 9
0
 def _render_overlay(self, target_image, overlay_image):
     overlay_origin = (self.config.origin.x.value, self.config.origin.y.value)
     pm.transparent_image_overlay(
         overlay_origin, overlay_image, target_image, self.config.alpha.value
     )
Esempio n. 10
0
    def update(self, frame, events):
        for eye_index in self.showeyes:
            requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]

            #1. do we need a new frame?
            if (requested_eye_frame_idx != self.eye_frames[eye_index].index or self.urActive) and self.recalculating == 0:
                # do we need to seek?
                if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
                    # if we just need to seek by one frame, its faster to just read one and and throw it away.
                    _ = self.eye_cap[eye_index].get_frame()
                if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
                    # only now do I need to seek
                    self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
                # reading the new eye frame frame
                try:
                    self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
                except EndofVideoFileError:
                    logger.warning("Reached the end of the eye video for eye video {}.".format(eye_index))
            else:
                #our old frame is still valid because we are doing upsampling
                pass



            #2. dragging image
            if self.drag_offset[eye_index] is not None:
                pos = glfwGetCursorPos(glfwGetCurrentContext())
                pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
                self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
            else:
                self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]

            pos = glfwGetCursorPos(glfwGetCurrentContext())
            pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) )
            pos = (int(pos[0] - self.pos[eye_index][0]), int(pos[1] - self.pos[eye_index][1]))
            self.mouse_img[eye_index] = pos
            
            if self.urActive == eye_index + 1:
                self.u_r[eye_index].move_vertex(self.u_r[eye_index].active_pt_idx,pos)

            if self.recalculating == 0:
                self.setPupilDetectors()
                pupil_detector = self.pupil_detectors[eye_index]
                glint_detector = self.glint_detectors[eye_index]
        
                settings = pupil_detector.get_settings()

                glint_settings = glint_detector.settings()
                self.setSettings(eye_index, settings, glint_settings)
                glint_detector.update()


                new_frame = self.eye_frames[eye_index]
                if self.algorithm == 1:
                  view = "algorithm"
                else:
                  view = False
                result, roi = pupil_detector.detect(new_frame, self.u_r[eye_index], view)
                glints = [[0,0,0,0,0,0], [0,0,0,0,0,1]] #glint_detector.glint(new_frame, eye_index, u_roi=self.u_r, pupil=result, roi=roi)

                if eye_index == 0:
                    self.gPool0.pupil_queue.put(result)
                else:
                    self.gPool1.pupil_queue.put(result)

                #3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
                #self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
                #self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))

                #4. flipping images, converting to greyscale
                #eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
                pts = cv2.ellipse2Poly( (int(result['ellipse']['center'][0]),int(result['ellipse']['center'][1])),
                                                (int(result['ellipse']['axes'][0]/2),int(result['ellipse']['axes'][1]/2)),
                                                int(result['ellipse']['angle']),0,360,15)
                cv2.polylines(self.eye_frames[eye_index].img, [pts], 1, (0,255,0))
                center = result['ellipse']['center']
                center = [int(x) for x in center]
                cv2.circle(self.eye_frames[eye_index].img, tuple(center), True, (0,255,0), thickness=15)

                #img = 255 - np.mean(new_frame.img, axis=2)
                #img = (img*255).astype(np.uint8)
                #ret = pyelse.run(img, 10)
                #x = ret.center.x
                #y = ret.center.y
                #cv2.circle(self.eye_frames[eye_index].img, (int(x), int(y)), True, (255,0,0), thickness=10)


                glints = np.array(glints)
                #if len(glints)>0 and glints[0][3]:
                #    for g in glints:
                #        cv2.circle(self.eye_frames[eye_index].img, (int(g[1]),int(g[2])), True,(255,0,0),thickness=5)

                if result['method'] == '3d c++':

                        eye_ball = result['projected_sphere']
                        try:
                            pts = cv2.ellipse2Poly( (int(eye_ball['center'][0]),int(eye_ball['center'][1])),
                                                (int(eye_ball['axes'][0]/2),int(eye_ball['axes'][1]/2)),
                                                int(eye_ball['angle']),0,360,8)
                        except ValueError as e:
                            pass
                        else:
                            cv2.polylines(self.eye_frames[eye_index].img, [pts], 1, (255,0,0))

            if self.show_ellipses and events['pupil_positions'] and self.recalculating == 0:
                for pd in events['pupil_positions']:
                    if pd['id'] == eye_index and pd['timestamp'] == self.eye_frames[eye_index].timestamp:
                        break

                if pd['method'] == '3d c++':
                    eye_ball = pd['projected_sphere']
                    try:
                        pts = cv2.ellipse2Poly( (int(eye_ball['center'][0]),int(eye_ball['center'][1])),
                                            (int(eye_ball['axes'][0]/2),int(eye_ball['axes'][1]/2)),
                                            int(eye_ball['angle']),0,360,8)
                    except ValueError as e:
                        pass
                    else:
                        cv2.polylines(self.eye_frames[eye_index].img, [pts], 1, (0,255,0))

                el = pd['ellipse']
                conf = int(pd.get('model_confidence', pd.get('confidence', 0.1)) * 255)
                center = list(map(lambda val: int(val), el['center']))
                el['axes'] = tuple(map(lambda val: int(val/2), el['axes']))
                el['angle'] = int(el['angle'])
                el_points = cv2.ellipse2Poly(tuple(center), el['axes'], el['angle'], 0, 360, 1)

                cv2.polylines(self.eye_frames[eye_index].img, [np.asarray(el_points)], True, (0, 0, 255, conf), thickness=math.ceil(2))
                cv2.circle(self.eye_frames[eye_index].img, tuple(center), int(5*self.eye_scale_factor), (0, 0, 255, conf), thickness=-1)


            rect = np.asarray(self.u_r[eye_index].rect)
            #rect[:,[0, 1]] = rect[:,[1, 0]]
            cv2.polylines(self.eye_frames[eye_index].img, [rect],True, (0, 255, 255), thickness=math.ceil(2))
            for corner in rect:
                cv2.circle(self.eye_frames[eye_index].img, (int(corner[0]), int(corner[1])), int(5*self.eye_scale_factor), (0, 255, 255), thickness=-1)


            
            #3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
            self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
            self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))

            #4. flipping images, converting to greyscale
            #eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
            eyeimage = cv2.resize(self.eye_frames[eye_index].img,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor)
            if self.mirror[str(eye_index)]:
                eyeimage = np.fliplr(eyeimage)
            if self.flip[str(eye_index)]:
                eyeimage = np.flipud(eyeimage)


            #eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)


            # 5. finally overlay the image
            x, y = int(self.pos[eye_index][0]), int(self.pos[eye_index][1])
            transparent_image_overlay((x, y), eyeimage, frame.img, self.alpha)