Exemplo n.º 1
0
    def update(self,frame,events):
        # TODO: leave this to a dependency plugin loader
        if any(isinstance(p,Scan_Path) for p in self.g_pool.plugins):
            if self.sp_active:
                pass
            else:
                self.set_bar_ok(True)
                self.sp_active = True
        else:
            if self.sp_active:
                self.set_bar_ok(False)
                self.sp_active = False
            else:
                pass

        img = frame.img
        img_shape = img.shape[:-1][::-1] # width,height

        filtered_gaze = []

        for gp1, gp2 in zip(events['pupil_positions'][:-1], events['pupil_positions'][1:]):
            gp1_norm = denormalize(gp1['norm_gaze'], img_shape,flip_y=True)
            gp2_norm = denormalize(gp2['norm_gaze'], img_shape,flip_y=True)
            x_dist =  abs(gp1_norm[0] - gp2_norm[0])
            y_dist = abs(gp1_norm[1] - gp2_norm[1])
            man = x_dist + y_dist
            # print "man: %s\tdist: %s" %(man,self.distance)
            if man < self.distance:
                filtered_gaze.append(gp1)
            else:
                # print "filtered"
                pass

        events['pupil_positions'][:] = filtered_gaze[:]
        events['pupil_positions'].sort(key=lambda x: x['timestamp']) #this may be redundant...
Exemplo n.º 2
0
 def on_button(window,button, action, mods):
     g_pool.gui.update_button(button,action,mods)
     pos = glfwGetCursorPos(window)
     pos = normalize(pos,glfwGetWindowSize(window))
     pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
     for p in g_pool.plugins:
         p.on_click(pos,button,action)
Exemplo n.º 3
0
    def update(self,frame,recent_pupil_positions,events):

        falloff = self.falloff.value

        img = frame.img
        img_shape = img.shape[:-1][::-1]#width,height
        norm_gaze = [ng['norm_gaze'] for ng in recent_pupil_positions if ng['norm_gaze'] is not None]
        screen_gaze = [denormalize(ng,img_shape,flip_y=True) for ng in norm_gaze]


        overlay = np.ones(img.shape[:-1],dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in screen_gaze:
            try:
                overlay[int(gaze_point[1]),int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay,cv2.cv.CV_DIST_L2, 5)

        # fix for opencv binding incositency
        if type(out)==tuple:
            out = out[0]

        overlay =  1/(out/falloff+1)

        img *= cv2.cvtColor(overlay,cv2.COLOR_GRAY2RGB)
Exemplo n.º 4
0
 def on_button(button, pressed):
     if not atb.TwEventMouseButtonGLFW(button,pressed):
         if pressed:
             pos = glfwGetMousePos()
             pos = normalize(pos,glfwGetWindowSize())
             pos = denormalize(pos,(img.shape[1],img.shape[0]) ) #pos in img pixels
             ref.detector.new_ref(pos)
Exemplo n.º 5
0
 def recent_events(self, events):
     frame = events.get("frame")
     if not frame:
         return
     pts = [
         denormalize(pt["norm_pos"], frame.img.shape[:-1][::-1], flip_y=True)
         for pt in events.get("gaze", [])
         if pt["confidence"] >= self.g_pool.min_data_confidence
     ]
     bgra = (self.b * 255, self.g * 255, self.r * 255, self.a * 255)
     for pt in pts:
         lines = np.array(
             [
                 ((pt[0] - self.inner, pt[1]), (pt[0] - self.outer, pt[1])),
                 ((pt[0] + self.inner, pt[1]), (pt[0] + self.outer, pt[1])),
                 ((pt[0], pt[1] - self.inner), (pt[0], pt[1] - self.outer)),
                 ((pt[0], pt[1] + self.inner), (pt[0], pt[1] + self.outer)),
             ],
             dtype=np.int32,
         )
         cv2.polylines(
             frame.img,
             lines,
             isClosed=False,
             color=bgra,
             thickness=self.thickness,
             lineType=cv2.LINE_AA,
         )
Exemplo n.º 6
0
    def recent_events(self, events):
        frame = events.get("frame")
        if not frame:
            return
        falloff = self.falloff

        img = frame.img
        pts = [
            denormalize(pt["norm_pos"], frame.img.shape[:-1][::-1], flip_y=True)
            for pt in events.get("gaze", [])
            if pt["confidence"] >= self.g_pool.min_data_confidence
        ]

        overlay = np.ones(img.shape[:-1], dtype=img.dtype)

        # draw recent gaze postions as black dots on an overlay image.
        for gaze_point in pts:
            try:
                overlay[int(gaze_point[1]), int(gaze_point[0])] = 0
            except:
                pass

        out = cv2.distanceTransform(overlay, cv2.DIST_L2, 5)

        # fix for opencv binding inconsitency
        if type(out) == tuple:
            out = out[0]

        overlay = 1 / (out / falloff + 1)

        img[:] = np.multiply(
            img, cv2.cvtColor(overlay, cv2.COLOR_GRAY2RGB), casting="unsafe"
        )
Exemplo n.º 7
0
    def update(self,frame,events):
        if self.fill:
            thickness = -1
        else:
            thickness = self.thickness

        fixation_pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('fixations',[])]
        not_fixation_pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[])]


        if fixation_pts:
            for pt in fixation_pts:
                transparent_circle(frame.img, pt, radius=self.radius, color=(self.b, self.g, self.r, self.a), thickness=thickness)
        else:
            for pt in not_fixation_pts:
                transparent_circle(frame.img, pt, radius=7.0, color=(0.2, 0.0, 0.7, 0.5), thickness=thickness)
Exemplo n.º 8
0
 def uroi_on_mouse_button(button, action, mods):
     if g_pool.display_mode == "roi":
         if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
             g_pool.u_r.active_edit_pt = False
             # if the roi interacts we dont want
             # the gui to interact as well
             return
         elif action == glfw.GLFW_PRESS:
             x, y = glfw.glfwGetCursorPos(main_window)
             # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
             x *= hdpi_factor
             y *= hdpi_factor
             pos = normalize((x, y), camera_render_size)
             if g_pool.flip:
                 pos = 1 - pos[0], 1 - pos[1]
             # Position in img pixels
             pos = denormalize(
                 pos, g_pool.capture.frame_size
             )  # Position in img pixels
             if g_pool.u_r.mouse_over_edit_pt(
                 pos, g_pool.u_r.handle_size, g_pool.u_r.handle_size
             ):
                 # if the roi interacts we dont want
                 # the gui to interact as well
                 return
Exemplo n.º 9
0
 def on_resize(window,w, h):
     active_window = glfwGetCurrentContext()
     glfwMakeContextCurrent(window)
     norm_size = normalize((w,h),glfwGetWindowSize(window))
     fb_size = denormalize(norm_size,glfwGetFramebufferSize(window))
     atb.TwWindowSize(*map(int,fb_size))
     glfwMakeContextCurrent(active_window)
Exemplo n.º 10
0
 def on_button(window,button, action, mods):
     if not atb.TwEventMouseButtonGLFW(button,action):
         pos = glfwGetCursorPos(window)
         pos = normalize(pos,glfwGetWindowSize(world_window))
         pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
         for p in g_pool.plugins:
             p.on_click(pos,button,action)
Exemplo n.º 11
0
    def recent_events(self, events):
        frame = events.get("frame")
        if not frame:
            return
        if self.drag_offset is not None:
            pos = glfwGetCursorPos(glfwGetCurrentContext())
            pos = normalize(pos, glfwGetWindowSize(glfwGetCurrentContext()))
            pos = denormalize(
                pos, (frame.img.shape[1], frame.img.shape[0])
            )  # Position in img pixels
            self.pos[0] = pos[0] + self.drag_offset[0]
            self.pos[1] = pos[1] + self.drag_offset[1]

        if self.watermark is not None:
            # keep in image bounds, do this even when not dragging because the image sizes could change.
            self.pos[1] = max(
                0,
                min(frame.img.shape[0] - self.watermark.shape[0], max(self.pos[1], 0)),
            )
            self.pos[0] = max(
                0,
                min(frame.img.shape[1] - self.watermark.shape[1], max(self.pos[0], 0)),
            )
            pos = int(self.pos[0]), int(self.pos[1])
            img = frame.img
            roi = (
                slice(pos[1], pos[1] + self.watermark.shape[0]),
                slice(pos[0], pos[0] + self.watermark.shape[1]),
            )
            w_roi = slice(0, img.shape[0] - pos[1]), slice(0, img.shape[1] - pos[0])
            img[roi] = self.watermark[w_roi] * self.alpha_mask[w_roi] + img[roi] * (
                1 - self.alpha_mask[w_roi]
            )
Exemplo n.º 12
0
    def update(self, frame, events):
        img = frame.img
        img_shape = img.shape[:-1][::-1]  # width,height

        succeeding_frame = frame.index - self.prev_frame_idx == 1
        same_frame = frame.index == self.prev_frame_idx
        gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

        # vars for calcOpticalFlowPyrLK
        lk_params = dict(
            winSize=(90, 90), maxLevel=3, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03)
        )

        updated_past_gaze = []

        # lets update past gaze using optical flow: this is like sticking the gaze points onto the pixels of the img.
        if self.past_gaze_positions and succeeding_frame:
            past_screen_gaze = np.array(
                [denormalize(ng["norm_pos"], img_shape, flip_y=True) for ng in self.past_gaze_positions],
                dtype=np.float32,
            )
            new_pts, status, err = cv2.calcOpticalFlowPyrLK(
                self.prev_gray, gray_img, past_screen_gaze, minEigThreshold=0.005, **lk_params
            )
            for gaze, new_gaze_pt, s, e in zip(self.past_gaze_positions, new_pts, status, err):
                if s:
                    # print "norm,updated",gaze['norm_gaze'], normalize(new_gaze_pt,img_shape[:-1],flip_y=True)
                    gaze["norm_pos"] = normalize(new_gaze_pt, img_shape, flip_y=True)
                    updated_past_gaze.append(gaze)
                    # logger.debug("updated gaze")

                else:
                    # logger.debug("dropping gaze")
                    # Since we will replace self.past_gaze_positions later,
                    # not appedning tu updated_past_gaze is like deliting this data point.
                    pass
        else:
            # we must be seeking, do not try to do optical flow, or pausing: see below.
            pass

        if same_frame:
            # paused
            # re-use last result
            events["gaze_positions"][:] = self.past_gaze_positions[:]
        else:
            # trim gaze that is too old
            if events["gaze_positions"]:
                now = events["gaze_positions"][0]["timestamp"]
                cutoff = now - self.timeframe
                updated_past_gaze = [g for g in updated_past_gaze if g["timestamp"] > cutoff]

            # inject the scan path gaze points into recent_gaze_positions
            events["gaze_positions"][:] = updated_past_gaze + events["gaze_positions"]
            events["gaze_positions"].sort(key=lambda x: x["timestamp"])  # this may be redundant...

        # update info for next frame.
        self.prev_gray = gray_img
        self.prev_frame_idx = frame.index
        self.past_gaze_positions = events["gaze_positions"]
Exemplo n.º 13
0
 def on_button(button, pressed):
     if not atb.TwEventMouseButtonGLFW(button,pressed):
         if pressed:
             pos = glfwGetMousePos()
             pos = normalize(pos,glfwGetWindowSize())
             pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
             for p in g.plugins:
                 p.on_click(pos)
Exemplo n.º 14
0
 def on_button(window, button, action, mods):
     g_pool.gui.update_button(button, action, mods)
     pos = glfw.glfwGetCursorPos(window)
     pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
     # Position in img pixels
     pos = denormalize(pos, g_pool.capture.frame_size)
     for p in g_pool.plugins:
         p.on_click(pos, button, action)
Exemplo n.º 15
0
 def save_new_format(self, row, shape, flip):
     pos = [row['norm_pos_x'], row['norm_pos_y']]
     x, y = denormalize(pos, shape, flip_y=flip)
     #y = denormalize(row['norm_pos_y'], shape, flip_y=flip)
     #x = row['norm_pos_x']
     #y = row['norm_pos_y']
     self.ret.append((int(row['index']), row['confidence'], x, y))
     return True
Exemplo n.º 16
0
 def gl_display(self):
     if self.recent_fixation:
         fs = self.g_pool.capture.frame_size  # frame height
         pt = denormalize(self.recent_fixation["norm_pos"], fs, flip_y=True)
         draw_circle(
             pt, radius=48.0, stroke_width=10.0, color=RGBA(1.0, 1.0, 0.0, 1.0)
         )
         self.glfont.draw_text(pt[0] + 48.0, pt[1], str(self.recent_fixation["id"]))
Exemplo n.º 17
0
def current_mouse_pos(window, camera_render_size, frame_size):
    hdpi_fac = getHDPIFactor(window)
    x, y = glfwGetCursorPos(glfwGetCurrentContext())
    pos = x * hdpi_fac, y * hdpi_fac
    pos = normalize(pos, camera_render_size)
    # Position in img pixels
    pos = denormalize(pos, frame_size)
    return (int(pos[0]), int(pos[1]))
Exemplo n.º 18
0
    def update(self,frame,recent_pupil_positions,events):
        img = frame.img
        self.img_shape = frame.img.shape

        if self.robust_detection.value:
            self.markers = detect_markers_robust(img,
                                                grid_size = 5,
                                                prev_markers=self.markers,
                                                min_marker_perimeter=self.min_marker_perimeter,
                                                aperture=self.aperture.value,
                                                visualize=0,
                                                true_detect_every_frame=3)
        else:
            self.markers = detect_markers_simple(img,
                                                grid_size = 5,
                                                min_marker_perimeter=self.min_marker_perimeter,
                                                aperture=self.aperture.value,
                                                visualize=0)

        # locate surfaces
        for s in self.surfaces:
            s.locate(self.markers)
            if s.detected:
                events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp})

        if self.draw_markers.value:
            draw_markers(img,self.markers)

        # edit surfaces by user
        if self.surface_edit_mode:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True)
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)

        #map recent gaze onto detected surfaces used for pupil server
        for s in self.surfaces:
            if s.detected:
                s.gaze_on_srf = []
                for p in recent_pupil_positions:
                    if p['norm_pupil'] is not None:
                        gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_gaze'])))
                        p['realtime gaze on '+s.name] = gp_on_s
                        s.gaze_on_srf.append(gp_on_s)


        #allow surfaces to open/close windows
        for s in self.surfaces:
            if s.window_should_close:
                s.close_window()
            if s.window_should_open:
                s.open_window()
Exemplo n.º 19
0
    def update(self,frame,events):
        if self.fill:
            thickness = -1
        else:
            thickness = self.thickness

        pts = [denormalize(pt['norm_gaze'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events['pupil_positions'] if pt['norm_gaze'] is not None]
        for pt in pts:
            transparent_circle(frame.img, pt, radius=self.radius, color=(self.b, self.g, self.r, self.a), thickness=thickness)
Exemplo n.º 20
0
 def update(self, frame, events):
     pts = [
         denormalize(pt["norm_pos"], frame.img.shape[:-1][::-1], flip_y=True)
         for pt in events.get("gaze_positions", [])
     ]
     bgra = (self.b * 255, self.g * 255, self.r * 255, self.a * 255)
     if pts:
         pts = np.array([pts], dtype=np.int32)
         cv2.polylines(frame.img, pts, isClosed=False, color=bgra, thickness=self.thickness, lineType=cv2.cv.CV_AA)
Exemplo n.º 21
0
 def recent_events(self, events):
     frame = events.get('frame')
     if not frame:
         return
     pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]
     bgra = (self.b*255,self.g*255,self.r*255,self.a*255)
     if pts:
         pts = np.array([pts],dtype=np.int32)
         cv2.polylines(frame.img, pts, isClosed=False, color=bgra, thickness=self.thickness, lineType=cv2.LINE_AA)
Exemplo n.º 22
0
 def recent_events(self, events):
     frame = events.get('frame')
     if not frame:
         return
     pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]
     bgra = (self.b*255,self.g*255,self.r*255,self.a*255)
     if pts:
         pts = np.array([pts],dtype=np.int32)
         cv2.polylines(frame.img, pts, isClosed=False, color=bgra, thickness=self.thickness, lineType=cv2.LINE_AA)
Exemplo n.º 23
0
    def update(self,frame,events):
        if self.fill:
            thickness = -1
        else:
            thickness = self.thickness

        pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[]) if pt['confidence']>=self.g_pool.min_data_confidence]
        for pt in pts:
            transparent_circle(frame.img, pt, radius=self.radius, color=(self.b, self.g, self.r, self.a), thickness=thickness)
Exemplo n.º 24
0
 def on_button(window, button, action, mods):
     if not atb.TwEventMouseButtonGLFW(button, action):
         pos = glfwGetCursorPos(window)
         pos = normalize(pos, glfwGetWindowSize(main_window))
         pos = denormalize(pos,
                           (frame.img.shape[1],
                            frame.img.shape[0]))  # Position in img pixels
         for p in g.plugins:
             p.on_click(pos, button, action)
Exemplo n.º 25
0
def _csv_exported_gaze_data(gaze_positions, destination_folder, export_range,
                            timestamps, capture):

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(timestamps,
                                    (export_start, export_stop - 1))
    gaze_section = gaze_positions.init_dict_for_window(export_window)

    # find closest world idx for each gaze datum
    gaze_world_idc = pm.find_closest(timestamps, gaze_section["data_ts"])

    csv_header = (
        "GazeTimeStamp",
        "MediaTimeStamp",
        "MediaFrameIndex",
        "Gaze3dX",
        "Gaze3dY",
        "Gaze3dZ",
        "Gaze2dX",
        "Gaze2dY",
        "PupilDiaLeft",
        "PupilDiaRight",
        "Confidence",
    )

    csv_rows = []

    for gaze_pos, media_idx in zip(gaze_section["data"], gaze_world_idc):
        media_timestamp = timestamps[media_idx]
        try:
            pupil_dia = {}
            for p in gaze_pos["base_data"]:
                pupil_dia[p["id"]] = p["diameter_3d"]

            pixel_pos = denormalize(gaze_pos["norm_pos"],
                                    capture.frame_size,
                                    flip_y=True)
            undistorted3d = capture.intrinsics.unprojectPoints(pixel_pos)
            undistorted2d = capture.intrinsics.projectPoints(
                undistorted3d, use_distortion=False)

            data = (
                gaze_pos["timestamp"],
                media_timestamp,
                media_idx - export_range[0],
                *gaze_pos["gaze_point_3d"],  # Gaze3dX/Y/Z
                *undistorted2d.flat,  # Gaze2dX/Y
                pupil_dia.get(1, 0.0),  # PupilDiaLeft
                pupil_dia.get(0, 0.0),  # PupilDiaRight
                gaze_pos["confidence"],  # Confidence
            )
        except KeyError:
            raise _iMotionsExporterNo3DGazeDataError()

        csv_rows.append(data)

    return csv_header, csv_rows
Exemplo n.º 26
0
    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        if self.fill:
            thickness = -1
        else:
            thickness = self.thickness

        fixation_pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('fixations',[])]
        not_fixation_pts = [denormalize(pt['norm_pos'],frame.img.shape[:-1][::-1],flip_y=True) for pt in events.get('gaze_positions',[])]

        if fixation_pts:
            for pt in fixation_pts:
                transparent_circle(frame.img, pt, radius=self.radius, color=(self.b, self.g, self.r, self.a), thickness=thickness)
        else:
            for pt in not_fixation_pts:
                transparent_circle(frame.img, pt, radius=7.0, color=(0.2, 0.0, 0.7, 0.5), thickness=thickness)
Exemplo n.º 27
0
 def on_button(window, button, action, mods):
     g_pool.gui.update_button(button, action, mods)
     pos = glfw.glfwGetCursorPos(window)
     pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
     pos = denormalize(
         pos,
         (frame.img.shape[1], frame.img.shape[0]))  # Position in img pixels
     for p in g_pool.plugins:
         p.on_click(pos, button, action)
Exemplo n.º 28
0
    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        img = frame.img
        img_shape = img.shape[:-1][::-1] # width,height

        succeeding_frame = frame.index-self.prev_frame_idx == 1
        same_frame = frame.index == self.prev_frame_idx
        gray_img = frame.gray

        #vars for calcOpticalFlowPyrLK
        lk_params = dict( winSize  = (90, 90),
                  maxLevel = 3,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.03))

        updated_past_gaze = []

        #lets update past gaze using optical flow: this is like sticking the gaze points onto the pixels of the img.
        if self.past_gaze_positions and succeeding_frame:
            past_screen_gaze = np.array([denormalize(ng['norm_pos'] ,img_shape,flip_y=True) for ng in self.past_gaze_positions],dtype=np.float32)
            new_pts, status, err = cv2.calcOpticalFlowPyrLK(self.prev_gray,gray_img,past_screen_gaze,None,minEigThreshold=0.005,**lk_params)
            for gaze,new_gaze_pt,s,e in zip(self.past_gaze_positions,new_pts,status,err):
                if s:
                    # print "norm,updated",gaze['norm_gaze'], normalize(new_gaze_pt,img_shape[:-1],flip_y=True)
                    gaze['norm_pos'] = normalize(new_gaze_pt,img_shape,flip_y=True)
                    updated_past_gaze.append(gaze)
                    # logger.debug("updated gaze")

                else:
                    # logger.debug("dropping gaze")
                    # Since we will replace self.past_gaze_positions later,
                    # not appedning tu updated_past_gaze is like deliting this data point.
                    pass
        else:
            # we must be seeking, do not try to do optical flow, or pausing: see below.
            pass

        if same_frame:
            # paused
            # re-use last result
            events['gaze_positions'][:] = self.past_gaze_positions[:]
        else:
            # trim gaze that is too old
            if events['gaze_positions']:
                now = events['gaze_positions'][0]['timestamp']
                cutoff = now-self.timeframe
                updated_past_gaze = [g for g in updated_past_gaze if g['timestamp']>cutoff]

            #inject the scan path gaze points into recent_gaze_positions
            events['gaze_positions'][:] = updated_past_gaze + events['gaze_positions']
            events['gaze_positions'].sort(key=lambda x: x['timestamp']) #this may be redundant...

        #update info for next frame.
        self.prev_gray = gray_img
        self.prev_frame_idx = frame.index
        self.past_gaze_positions = events['gaze_positions']
Exemplo n.º 29
0
 def on_pos(window, x, y):
     x, y = x * hdpi_factor, y * hdpi_factor
     g_pool.gui.update_mouse(x, y)
     pos = x, y
     pos = normalize(pos, g_pool.camera_render_size)
     # Position in img pixels
     pos = denormalize(pos, g_pool.capture.frame_size)
     for p in g_pool.plugins:
         p.on_pos(pos)
Exemplo n.º 30
0
 def gl_display(self):
     if self.fixation is not None:
         abs_fixation = denormalize(self.fixation,
                                    self.g_pool.capture.frame_size,
                                    flip_y=True)
         ellipse = cv2.ellipse2Poly(
             (int(abs_fixation[0]), int(abs_fixation[1])), (25, 25), 0, 0,
             360, 15)
         draw_gl_polyline(ellipse, (0., 0., .5, .75), 'Polygon')
Exemplo n.º 31
0
 def on_pos(window, x, y):
     x, y = x * hdpi_factor, y * hdpi_factor
     g_pool.gui.update_mouse(x, y)
     pos = x, y
     pos = normalize(pos, g_pool.camera_render_size)
     # Position in img pixels
     pos = denormalize(pos, g_pool.capture.frame_size)
     for p in g_pool.plugins:
         p.on_pos(pos)
Exemplo n.º 32
0
 def on_pos(window, x, y):
     x, y = gl_utils.window_coordinate_to_framebuffer_coordinate(
         window, x, y, cached_scale=None)
     g_pool.gui.update_mouse(x, y)
     pos = x, y
     pos = normalize(pos, g_pool.camera_render_size)
     # Position in img pixels
     pos = denormalize(pos, g_pool.capture.frame_size)
     for p in g_pool.plugins:
         p.on_pos(pos)
Exemplo n.º 33
0
    def update(self,frame,recent_pupil_positions,events):
        color = map(lambda x:int(x*255),self.color)
        color = color[::-1]

        thickness = self.thickness.value

        pts = [denormalize(pt['norm_gaze'],frame.img.shape[:-1][::-1],flip_y=True) for pt in recent_pupil_positions if pt['norm_gaze'] is not None]
        if pts:
            pts = np.array([pts],dtype=np.int32)
            cv2.polylines(frame.img, pts, isClosed=False, color=color, thickness=thickness, lineType=cv2.cv.CV_AA)
Exemplo n.º 34
0
        def on_pos(window,x, y):
            hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0]/glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x*hdpi_factor,y*hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x,y),glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,(frame.width,frame.height) )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)
Exemplo n.º 35
0
 def on_resize(window, w, h):
     active_window = glfwGetCurrentContext()
     glfwMakeContextCurrent(window)
     norm_size = normalize((w, h), glfwGetWindowSize(window))
     fb_size = denormalize(norm_size, glfwGetFramebufferSize(window))
     atb.TwWindowSize(*map(int, fb_size))
     adjust_gl_view(w, h, window)
     glfwMakeContextCurrent(active_window)
     for p in g_pool.plugins:
         p.on_window_resize(window, w, h)
Exemplo n.º 36
0
 def on_resize(window,w, h):
     active_window = glfwGetCurrentContext()
     glfwMakeContextCurrent(window)
     norm_size = normalize((w,h),glfwGetWindowSize(window))
     fb_size = denormalize(norm_size,glfwGetFramebufferSize(window))
     atb.TwWindowSize(*map(int,fb_size))
     adjust_gl_view(w,h,window)
     glfwMakeContextCurrent(active_window)
     for p in g_pool.plugins:
         p.on_window_resize(window,w,h)
Exemplo n.º 37
0
    def update(self,frame,recent_pupil_positions,events):
        color = map(lambda x:int(x*255),self.color)
        color = color[::-1]

        thickness = self.thickness.value

        pts = [denormalize(pt['norm_gaze'],frame.img.shape[:-1][::-1],flip_y=True) for pt in recent_pupil_positions if pt['norm_gaze'] is not None]
        if pts:
            pts = np.array([pts],dtype=np.int32)
            cv2.polylines(frame.img, pts, isClosed=False, color=color, thickness=thickness, lineType=cv2.cv.CV_AA)
Exemplo n.º 38
0
 def gl_display(self):
     if self.recent_fixation:
         fs = self.g_pool.capture.frame_size  # frame height
         pt = denormalize(self.recent_fixation["norm_pos"], fs, flip_y=True)
         draw_circle(pt,
                     radius=48.0,
                     stroke_width=10.0,
                     color=RGBA(1.0, 1.0, 0.0, 1.0))
         self.glfont.draw_text(pt[0] + 48.0, pt[1],
                               str(self.recent_fixation["id"]))
Exemplo n.º 39
0
        def on_pos(window,x, y):
            hdpi_factor = float(glfw.glfwGetFramebufferSize(window)[0]/glfw.glfwGetWindowSize(window)[0])
            g_pool.gui.update_mouse(x*hdpi_factor,y*hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x,y),glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,(frame.width,frame.height) )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)
Exemplo n.º 40
0
        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)
Exemplo n.º 41
0
    def update(self,frame,recent_pupil_positions,events):
        color = map(lambda x:int(x*255),self.color)
        color = color[:3][::-1]+color[-1:]
        thickness = self.thickness.value
        inner = self.inner.value
        outer = self.outer.value

        pts = [denormalize(pt['norm_gaze'],frame.img.shape[:-1][::-1],flip_y=True) for pt in recent_pupil_positions if pt['norm_gaze'] is not None]
        for pt in pts:
            lines =  np.array( [((pt[0]-inner,pt[1]),(pt[0]-outer,pt[1])),((pt[0]+inner,pt[1]),(pt[0]+outer,pt[1])) , ((pt[0],pt[1]-inner),(pt[0],pt[1]-outer)) , ((pt[0],pt[1]+inner),(pt[0],pt[1]+outer))],dtype=np.int32 )
            cv2.polylines(frame.img, lines, isClosed=False, color=color, thickness=thickness, lineType=cv2.cv.CV_AA)
Exemplo n.º 42
0
        def on_pos(window, x, y):
            hdpi_factor = glfw.glfwGetFramebufferSize(
                window)[0] / glfw.glfwGetWindowSize(window)[0]
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,g_pool.capture.frame_size )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)
Exemplo n.º 43
0
Arquivo: eye.py Projeto: sleip87/pupil
        def on_pos(window, x, y):
            hdpi_factor = glfw.glfwGetFramebufferSize(
                window)[0] / glfw.glfwGetWindowSize(window)[0]
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1-pos[0],1-pos[1]
                pos = denormalize(pos,g_pool.capture.frame_size )
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx,pos)
Exemplo n.º 44
0
        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)
Exemplo n.º 45
0
 def gl_display(self):
     feature = self.__previously_detected_feature
     if feature:
         recent_frame_size = self.g_pool.capture.frame_size
         point = denormalize(feature["norm_pos"],
                             recent_frame_size,
                             flip_y=True)
         self._draw_circle_filled(
             tuple(point),
             size=self._RADIUS_OF_CIRCLE_DISPLAYED / 2,
             color=RGBA(0.0, 1.0, 0.0, 0.5),
         )
Exemplo n.º 46
0
Arquivo: eye.py Projeto: N-M-T/pupil
        def consume_events_and_render_buffer():
            glfw.make_context_current(main_window)
            clear_gl_screen()

            if all(c > 0 for c in g_pool.camera_render_size):
                glViewport(0, 0, *g_pool.camera_render_size)
                for p in g_pool.plugins:
                    p.gl_display()

            glViewport(0, 0, *window_size)
            # render graphs
            fps_graph.draw()
            cpu_graph.draw()

            # render GUI
            try:
                clipboard = glfw.get_clipboard_string(main_window).decode()
            except (AttributeError, glfw.GLFWError):
                # clipboard is None, might happen on startup
                clipboard = ""
            g_pool.gui.update_clipboard(clipboard)
            user_input = g_pool.gui.update()
            if user_input.clipboard != clipboard:
                # only write to clipboard if content changed
                glfw.set_clipboard_string(main_window, user_input.clipboard)

            for button, action, mods in user_input.buttons:
                x, y = glfw.get_cursor_pos(main_window)
                pos = gl_utils.window_coordinate_to_framebuffer_coordinate(
                    main_window, x, y, cached_scale=None
                )
                pos = normalize(pos, g_pool.camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                # Position in img pixels
                pos = denormalize(pos, g_pool.capture.frame_size)

                for plugin in g_pool.plugins:
                    if plugin.on_click(pos, button, action):
                        break

            for key, scancode, action, mods in user_input.keys:
                for plugin in g_pool.plugins:
                    if plugin.on_key(key, scancode, action, mods):
                        break

            for char_ in user_input.chars:
                for plugin in g_pool.plugins:
                    if plugin.on_char(char_):
                        break

            # update screen
            glfw.swap_buffers(main_window)
Exemplo n.º 47
0
 def on_pos(window, x, y):
     hdpi_factor = float(
         glfw.glfwGetFramebufferSize(window)[0] /
         glfw.glfwGetWindowSize(window)[0])
     x, y = x * hdpi_factor, y * hdpi_factor
     g_pool.gui.update_mouse(x, y)
     pos = x, y
     pos = normalize(pos, g_pool.camera_render_size)
     # Position in img pixels
     pos = denormalize(pos, g_pool.capture.frame_size)
     for p in g_pool.plugins:
         p.on_pos(pos)
Exemplo n.º 48
0
    def update(self,frame,recent_pupil_positions,events):
        color = map(lambda x:int(x*255),self.color)
        color = color[:3][::-1]+color[-1:]
        if self.fill.value:
            thickness= -1
        else:
            thickness = self.thickness.value

        radius = self.radius.value
        pts = [denormalize(pt['norm_gaze'],frame.img.shape[:-1][::-1],flip_y=True) for pt in recent_pupil_positions if pt['norm_gaze'] is not None]
        for pt in pts:
            transparent_circle(frame.img, pt, radius=radius, color=color, thickness=thickness)
Exemplo n.º 49
0
    def update(self,frame,recent_pupil_positions,events):
        img = frame.img
        self.img_shape = frame.img.shape
        if self.robust_detection.value:
            self.markers = detect_markers_robust(img,grid_size = 5,
                                                    prev_markers=self.markers,
                                                    min_marker_perimeter=self.min_marker_perimeter,
                                                    aperture=self.aperture.value,
                                                    visualize=0,
                                                    true_detect_every_frame=3)
        else:
            self.markers = detect_markers_simple(img,grid_size = 5,min_marker_perimeter=self.min_marker_perimeter,aperture=self.aperture.value,visualize=0)

        if self.draw_markers.value:
            draw_markers(img,self.markers)

        # print self.markers

        for s in self.surfaces:
            s.locate(self.markers)
            if s.detected:
                events.append({'type':'marker_ref_surface','name':s.name,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp})

        if self.surface_edit_mode:
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos,glfwGetWindowSize(window))
            pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels

            for s,v_idx in self.edit_surfaces:
                if s.detected:
                    pos = normalize(pos,(self.img_shape[1],self.img_shape[0]),flip_y=True)
                    new_pos =  s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx,new_pos)

        #map recent gaze onto detected surfaces used for pupil server
        for p in recent_pupil_positions:
            if p['norm_pupil'] is not None:
                for s in self.surfaces:
                    if s.detected:
                        p['realtime gaze on '+s.name] = tuple(s.img_to_ref_surface(np.array(p['norm_gaze'])))


        if self._window:
            # save a local copy for when we display gaze for debugging on ref surface
            self.recent_pupil_positions = recent_pupil_positions


        if self.window_should_close:
            self.close_window()

        if self.window_should_open:
            self.open_window()
Exemplo n.º 50
0
    def update(self, frame, recent_pupil_positions, events):

        if any(isinstance(p, Scan_Path) for p in self.g_pool.plugins):
            if self.sp_active:
                pass
            else:
                self.set_bar_ok(True)
                self.sp_active = True
        else:
            if self.sp_active:
                self.set_bar_ok(False)
                self.sp_active = False

            else:
                pass

        img = frame.img
        img_shape = img.shape[:-1][::-1]  # width,height

        filtered_gaze = []

        for gp1, gp2 in zip(recent_pupil_positions[:-1],
                            recent_pupil_positions[1:]):
            gp1_norm = denormalize(gp1['norm_gaze'], img_shape, flip_y=True)
            gp2_norm = denormalize(gp2['norm_gaze'], img_shape, flip_y=True)
            x_dist = abs(gp1_norm[0] - gp2_norm[0])
            y_dist = abs(gp1_norm[1] - gp2_norm[1])
            man = x_dist + y_dist
            # print "man: %s\tdist: %s" %(man,self.distance.value)
            if man < self.distance.value:
                filtered_gaze.append(gp1)
            else:
                # print "filtered"
                pass

        recent_pupil_positions[:] = filtered_gaze[:]
        recent_pupil_positions.sort(
            key=lambda x: x['timestamp'])  #this may be redundant...
Exemplo n.º 51
0
    def on_click(self, pos, button, action):
        if self.mode == 'Show Markers and Surfaces':
            if action == GLFW_PRESS:
                for s in self.surfaces:
                    toggle = s.get_mode_toggle(pos, self.img_shape)
                    if toggle == 'surface_mode':
                        if s in self.edit_surfaces:
                            self.edit_surfaces.remove(s)
                        else:
                            self.edit_surfaces.append(s)
                    elif toggle == 'marker_mode':
                        if self.marker_edit_surface == s:
                            self.marker_edit_surface = None
                        else:
                            self.marker_edit_surface = s

            if action == GLFW_RELEASE:
                self.edit_surf_verts = []

            elif action == GLFW_PRESS:
                surf_verts = ((0., 0.), (1., 0.), (1., 1.), (0., 1.))
                x, y = pos
                for s in self.edit_surfaces:
                    if s.detected and s.defined:
                        for (vx, vy), i in zip(
                                s.ref_surface_to_img(np.array(surf_verts)),
                                range(4)):
                            vx, vy = denormalize(
                                (vx, vy),
                                (self.img_shape[1], self.img_shape[0]),
                                flip_y=True)
                            if sqrt((x - vx)**2 +
                                    (y - vy)**2) < 15:  #img pixels
                                self.edit_surf_verts.append((s, i))
                                return

                if self.marker_edit_surface:
                    for m in self.markers:
                        if m['perimeter'] >= self.min_marker_perimeter:
                            vx, vy = m['centroid']
                            if sqrt((x - vx)**2 + (y - vy)**2) < 15:
                                if m['id'] in self.marker_edit_surface.markers:
                                    self.marker_edit_surface.remove_marker(m)
                                else:
                                    self.marker_edit_surface.add_marker(
                                        m, self.markers,
                                        self.camera_calibration,
                                        self.min_marker_perimeter,
                                        self.min_id_confidence)
Exemplo n.º 52
0
    def map_gaze_and_fixation_events(self,
                                     events,
                                     camera_model,
                                     trans_matrix=None):
        """
        Map a list of gaze or fixation events onto the surface and return the
        corresponding list of gaze/fixation on surface events.

        Args:
            events: List of gaze or fixation events.
            camera_model: Camera Model object.
            trans_matrix: The transformation matrix defining the location of
            the surface. If `None`, the current transformation matrix saved in the
            Surface object will be used.

        Returns:
            List of gaze or fixation on surface events.

        """
        results = []
        for event in events:
            gaze_norm_pos = event["norm_pos"]
            gaze_img_point = methods.denormalize(gaze_norm_pos,
                                                 camera_model.resolution,
                                                 flip_y=True)
            gaze_img_point = np.array(gaze_img_point)
            surf_norm_pos = self.map_to_surf(
                gaze_img_point,
                camera_model,
                compensate_distortion=True,
                trans_matrix=trans_matrix,
            )
            on_srf = bool((0 <= surf_norm_pos[0] <= 1)
                          and (0 <= surf_norm_pos[1] <= 1))

            mapped_datum = {
                "topic": f"{event['topic']}_on_surface",
                "norm_pos": surf_norm_pos.tolist(),
                "confidence": event["confidence"],
                "on_surf": on_srf,
                "base_data": (event["topic"], event["timestamp"]),
                "timestamp": event["timestamp"],
            }
            if event["topic"] == "fixations":
                mapped_datum["id"] = event["id"]
                mapped_datum["duration"] = event["duration"]
                mapped_datum["dispersion"] = event["dispersion"]
            results.append(mapped_datum)
        return results
Exemplo n.º 53
0
    def gl_display_in_window(self):
        active_window = glfwGetCurrentContext()
        glfwMakeContextCurrent(self._window)

        clear_gl_screen()

        # Set Matrix unsing gluOrtho2D to include padding for the marker of radius r
        #
        ############################
        #            r             #
        # 0,0##################w,h #
        # #                      # #
        # #                      # #
        #r#                      #r#
        # #                      # #
        # #                      # #
        # 0,h##################w,h #
        #            r             #
        ############################


        hdpi_factor = glfwGetFramebufferSize(self._window)[0]/glfwGetWindowSize(self._window)[0]
        r = 110*self.marker_scale * hdpi_factor
        gl.glMatrixMode(gl.GL_PROJECTION)
        gl.glLoadIdentity()
        p_window_size = glfwGetWindowSize(self._window)
        # compensate for radius of marker
        gl.glOrtho(-r*.6,p_window_size[0]+r*.6,p_window_size[1]+r*.7,-r*.7 ,-1,1)
        # Switch back to Model View Matrix
        gl.glMatrixMode(gl.GL_MODELVIEW)
        gl.glLoadIdentity()

        screen_pos = denormalize(self.display_pos,p_window_size,flip_y=True)
        alpha = interp_fn(self.screen_marker_state,0.,1.,float(self.sample_duration+self.lead_in+self.lead_out),float(self.lead_in),float(self.sample_duration+self.lead_in))

        draw_concentric_circles(screen_pos,r,6,alpha)
        #some feedback on the detection state

        if self.detected and self.on_position:
            draw_points([screen_pos],size=5,color=RGBA(0.,1.,0.,alpha),sharpness=0.95)
        else:
            draw_points([screen_pos],size=5,color=RGBA(1.,0.,0.,alpha),sharpness=0.95)

        if self.clicks_to_close <5:
            self.glfont.set_size(int(p_window_size[0]/30.))
            self.glfont.draw_text(p_window_size[0]/2.,p_window_size[1]/4.,'Touch %s more times to cancel calibration.'%self.clicks_to_close)

        glfwSwapBuffers(self._window)
        glfwMakeContextCurrent(active_window)
Exemplo n.º 54
0
        def on_button(window,button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    return # if the roi interacts we dont what the gui to interact as well
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos,glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1-pos[0],1-pos[1]
                    pos = denormalize(pos,(frame.width,frame.height)) # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,g_pool.u_r.handle_size+40,g_pool.u_r.handle_size+40):
                        return # if the roi interacts we dont what the gui to interact as well

            g_pool.gui.update_button(button,action,mods)
Exemplo n.º 55
0
 def on_click(self,pos,button,action):
     if self.mode == "Surface edit mode":
         if self.edit_surfaces:
             if action == GLFW_RELEASE:
                 self.edit_surfaces = []
         # no surfaces verts in edit mode, lets see if the cursor is close to one:
         else:
             if action == GLFW_PRESS:
                 surf_verts = ((0.,0.),(1.,0.),(1.,1.),(0.,1.))
                 x,y = pos
                 for s in self.surfaces:
                     if s.detected and s.defined:
                         for (vx,vy),i in zip(s.ref_surface_to_img(np.array(surf_verts)),range(4)):
                             vx,vy = denormalize((vx,vy),(self.img_shape[1],self.img_shape[0]),flip_y=True)
                             if sqrt((x-vx)**2 + (y-vy)**2) <15: #img pixels
                                 self.edit_surfaces.append((s,i))
Exemplo n.º 56
0
 def update(self, frame, events):
     pts = [
         denormalize(pt['norm_pos'],
                     frame.img.shape[:-1][::-1],
                     flip_y=True)
         for pt in events.get('gaze_positions', [])
     ]
     bgra = (self.b * 255, self.g * 255, self.r * 255, self.a * 255)
     if pts:
         pts = np.array([pts], dtype=np.int32)
         cv2.polylines(frame.img,
                       pts,
                       isClosed=False,
                       color=bgra,
                       thickness=self.thickness,
                       lineType=cv2.cv.CV_AA)
Exemplo n.º 57
0
    def update(self,frame,events):
        for eye_index in self.showeyes:
            requested_eye_frame_idx = self.eye_world_frame_map[eye_index][frame.index]

            #1. do we need a new frame?
            if requested_eye_frame_idx != self.eye_frames[eye_index].index:
                # do we need to seek?
                if requested_eye_frame_idx == self.eye_cap[eye_index].get_frame_index()+1:
                    # if we just need to seek by one frame, its faster to just read one and and throw it away.
                    _ = self.eye_cap[eye_index].get_frame()
                if requested_eye_frame_idx != self.eye_cap[eye_index].get_frame_index():
                    # only now do I need to seek
                    self.eye_cap[eye_index].seek_to_frame(requested_eye_frame_idx)
                # reading the new eye frame frame
                try:
                    self.eye_frames[eye_index] = self.eye_cap[eye_index].get_frame()
                except EndofVideoFileError:
                    logger.warning("Reached the end of the eye video for eye video %s."%eye_index)
            else:
                #our old frame is still valid because we are doing upsampling
                pass

            #2. dragging image
            if self.drag_offset[eye_index] is not None:
                pos = glfwGetCursorPos(glfwGetCurrentContext())
                pos = normalize(pos,glfwGetWindowSize(glfwGetCurrentContext()))
                pos = denormalize(pos,(frame.img.shape[1],frame.img.shape[0]) ) # Position in img pixels
                self.pos[eye_index][0] = pos[0]+self.drag_offset[eye_index][0]
                self.pos[eye_index][1] = pos[1]+self.drag_offset[eye_index][1]
            else:
                self.video_size = [round(self.eye_frames[eye_index].width*self.eye_scale_factor), round(self.eye_frames[eye_index].height*self.eye_scale_factor)]

            #3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
            self.pos[eye_index][1] = min(frame.img.shape[0]-self.video_size[1],max(self.pos[eye_index][1],0)) #frame.img.shape[0] is height, frame.img.shape[1] is width of screen
            self.pos[eye_index][0] = min(frame.img.shape[1]-self.video_size[0],max(self.pos[eye_index][0],0))

            #4. flipping images, converting to greyscale
            eye_gray = cv2.cvtColor(self.eye_frames[eye_index].img,cv2.COLOR_BGR2GRAY) #auto gray scaling
            eyeimage = cv2.resize(eye_gray,(0,0),fx=self.eye_scale_factor, fy=self.eye_scale_factor) 
            if self.mirror[str(eye_index)]:
                eyeimage = np.fliplr(eyeimage)
            if self.flip[str(eye_index)]:
                eyeimage = np.flipud(eyeimage)

            #5. finally overlay the image
            x,y = int(self.pos[eye_index][0]),int(self.pos[eye_index][1])
            transparent_image_overlay((x,y),cv2.cvtColor(eyeimage,cv2.COLOR_GRAY2BGR),frame.img,self.alpha)
Exemplo n.º 58
0
 def _polyline_points(self, image_size, base_gaze_data,
                      scan_path_gaze_data):
     if scan_path_gaze_data is not None:
         points_fields = ["norm_x", "norm_y"]
         gaze_points = scan_path_gaze_data[points_fields]
         gaze_points = np.array(
             gaze_points.tolist(),
             dtype=gaze_points.dtype[0])  # FIXME: This is a workaround
         gaze_points = gaze_points.reshape((-1, len(points_fields)))
         gaze_points = np_denormalize(gaze_points, image_size, flip_y=True)
         return gaze_points.tolist()
     else:
         return [
             denormalize(datum["norm_pos"], image_size, flip_y=True)
             for datum in base_gaze_data
             if datum["confidence"] >= self.g_pool.min_data_confidence
         ]