Example #1
0
    def _detect_markers(self, frame):
        gray = frame.gray

        if self.robust_detection:
            markers = marker_det.detect_markers_robust(
                gray,
                grid_size=5,
                aperture=11,
                prev_markers=self.previous_markers,
                true_detect_every_frame=3,
                min_marker_perimeter=self.marker_min_perimeter,
                invert_image=self.inverted_markers,
            )
        else:
            markers = marker_det.detect_markers(
                gray,
                grid_size=5,
                aperture=11,
                min_marker_perimeter=self.marker_min_perimeter,
            )

        # Robust marker detection requires previous markers to be in a different
        # format than the surface tracker.
        self.previous_markers = markers
        markers = [
            Square_Marker_Detection(
                m["id"], m["id_confidence"], m["verts"], m["perimeter"]
            )
            for m in markers
        ]
        markers = self._remove_duplicate_markers(markers)
        self.markers_unfiltered = markers
        self.markers = self._filter_markers(markers)
Example #2
0
    def update(self,frame,events):
        self.img_shape = frame.height,frame.width,3

        if self.running:
            gray = frame.gray

            if self.robust_detection:
                self.markers = detect_markers_robust(gray,
                                                    grid_size = 5,
                                                    prev_markers=self.markers,
                                                    min_marker_perimeter=self.min_marker_perimeter,
                                                    aperture=self.aperture,
                                                    visualize=0,
                                                    true_detect_every_frame=3,
                                                    invert_image=self.invert_image)
            else:
                self.markers = detect_markers(gray,
                                                grid_size = 5,
                                                min_marker_perimeter=self.min_marker_perimeter,
                                                aperture=self.aperture,
                                                visualize=0,
                                                invert_image=self.invert_image)


            if self.mode == "Show marker IDs":
                draw_markers(frame.gray,self.markers)

        events['surface'] = []

        # locate surfaces
        for s in self.surfaces:
            s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter, self.locate_3d)
            if s.detected:
                events['surface'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen.tolist(),'m_from_screen':s.m_from_screen.tolist(), 'timestamp':frame.timestamp,'camera_pose_3d':s.camera_pose_3d.tolist()})

        if self.running:
            self.button.status_text = '%s/%s'%(len([s for s in self.surfaces if s.detected]),len(self.surfaces))
        else:
            self.button.status_text = 'tracking paused'

        if self.mode == 'Show Markers and Surfaces':
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)
                for s,v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos = s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx,new_pos)

        #map recent gaze onto detected surfaces used for pupil server
        for s in self.surfaces:
            if s.detected:
                s.gaze_on_srf = []
                for p in events.get('gaze_positions',[]):
                    gp_on_s = tuple(s.img_to_ref_surface(np.array(p['norm_pos'])))
                    p['realtime gaze on ' + s.name] = gp_on_s
                    s.gaze_on_srf.append(gp_on_s)
Example #3
0
    def update(self, frame, events):
        self.img_shape = frame.height, frame.width, 3

        if self.running:
            gray = frame.gray

            if self.robust_detection:
                self.markers = detect_markers_robust(
                    gray,
                    grid_size=5,
                    prev_markers=self.markers,
                    min_marker_perimeter=self.min_marker_perimeter,
                    aperture=self.aperture,
                    visualize=0,
                    true_detect_every_frame=3,
                )
            else:
                self.markers = detect_markers(
                    gray,
                    grid_size=5,
                    min_marker_perimeter=self.min_marker_perimeter,
                    aperture=self.aperture,
                    visualize=0,
                )

            if self.mode == "Show marker IDs":
                draw_markers(frame.img, self.markers)

        # locate surfaces
        for s in self.surfaces:
            s.locate(self.markers, self.locate_3d, self.camera_intrinsics)
            # if s.detected:
            # events.append({'type':'marker_ref_surface','name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen,'m_from_screen':s.m_from_screen, 'timestamp':frame.timestamp})

        if self.running:
            self.button.status_text = "%s/%s" % (len([s for s in self.surfaces if s.detected]), len(self.surfaces))
        else:
            self.button.status_text = "tracking paused"

        # edit surfaces by user
        if self.mode == "Surface edit mode":
            window = glfwGetCurrentContext()
            pos = glfwGetCursorPos(window)
            pos = normalize(pos, glfwGetWindowSize(window), flip_y=True)
            for s, v_idx in self.edit_surfaces:
                if s.detected:
                    new_pos = s.img_to_ref_surface(np.array(pos))
                    s.move_vertex(v_idx, new_pos)

        # map recent gaze onto detected surfaces used for pupil server
        for s in self.surfaces:
            if s.detected:
                s.gaze_on_srf = []
                for p in events.get("gaze_positions", []):
                    gp_on_s = tuple(s.img_to_ref_surface(np.array(p["norm_pos"])))
                    p["realtime gaze on " + s.name] = gp_on_s
                    s.gaze_on_srf.append(gp_on_s)
Example #4
0
    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        self.img_shape = frame.height,frame.width,3

        if self.running:
            gray = frame.gray
            if self.invert_image:
                gray = 255-gray

            if self.robust_detection:
                self.markers = detect_markers_robust(
                    gray, grid_size = 5,aperture=self.aperture,
                    prev_markers=self.markers,
                    true_detect_every_frame=3,
                    min_marker_perimeter=self.min_marker_perimeter)
            else:
                self.markers = detect_markers(
                    gray, grid_size = 5,aperture=self.aperture,
                    min_marker_perimeter=self.min_marker_perimeter)
            if self.mode == "Show marker IDs":
                draw_markers(frame.gray,self.markers)


        # locate surfaces, map gaze
        for s in self.surfaces:
            s.locate(self.markers,self.min_marker_perimeter,self.min_id_confidence, self.locate_3d)
            if s.detected:
                s.gaze_on_srf = s.map_data_to_surface(events.get('gaze_positions',[]),s.m_from_screen)
                s.update_gaze_history()
            else:
                s.gaze_on_srf =[]

        events['surfaces'] = []
        for s in self.surfaces:
            if s.detected:
                events['surfaces'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen.tolist(),'m_from_screen':s.m_from_screen.tolist(),'gaze_on_srf': s.gaze_on_srf, 'timestamp':frame.timestamp,'camera_pose_3d':s.camera_pose_3d.tolist() if s.camera_pose_3d is not None else None})


        if self.running:
            self.button.status_text = '{}/{}'.format(len([s for s in self.surfaces if s.detected]), len(self.surfaces))
        else:
            self.button.status_text = 'tracking paused'

        if self.mode == 'Show Markers and Surfaces':
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)
                for s,v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos = s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx,new_pos)
Example #5
0
    def recent_events(self, events):
        frame = events.get('frame')
        if not frame:
            return
        self.img_shape = frame.height,frame.width,3

        if self.running:
            gray = frame.gray
            if self.invert_image:
                gray = 255-gray

            if self.robust_detection:
                self.markers = detect_markers_robust(
                    gray, grid_size = 5,aperture=self.aperture,
                    prev_markers=self.markers,
                    true_detect_every_frame=3,
                    min_marker_perimeter=self.min_marker_perimeter)
            else:
                self.markers = detect_markers(
                    gray, grid_size = 5,aperture=self.aperture,
                    min_marker_perimeter=self.min_marker_perimeter)
            if self.mode == "Show marker IDs":
                draw_markers(frame.gray,self.markers)


        # locate surfaces, map gaze
        for s in self.surfaces:
            s.locate(self.markers,self.camera_calibration,self.min_marker_perimeter,self.min_id_confidence, self.locate_3d)
            if s.detected:
                s.gaze_on_srf = s.map_data_to_surface(events.get('gaze_positions',[]),s.m_from_screen)
            else:
                s.gaze_on_srf =[]

        events['surfaces'] = []
        for s in self.surfaces:
            if s.detected:
                events['surfaces'].append({'name':s.name,'uid':s.uid,'m_to_screen':s.m_to_screen.tolist(),'m_from_screen':s.m_from_screen.tolist(),'gaze_on_srf': s.gaze_on_srf, 'timestamp':frame.timestamp,'camera_pose_3d':s.camera_pose_3d.tolist() if s.camera_pose_3d is not None else None})


        if self.running:
            self.button.status_text = '{}/{}'.format(len([s for s in self.surfaces if s.detected]), len(self.surfaces))
        else:
            self.button.status_text = 'tracking paused'

        if self.mode == 'Show Markers and Surfaces':
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos,glfwGetWindowSize(window),flip_y=True)
                for s,v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos = s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx,new_pos)
Example #6
0
    def detect_markers_iter(
            self, gray_img,
            frame_index: int) -> typing.Iterable[Surface_Marker]:
        if Surface_Marker_Detector_Mode.SQUARE_MARKER not in self.marker_detector_modes:
            return []

        # If current frame does not follow the previous frame, forget previously detected markers
        if frame_index != self.__previous_frame_index + 1:
            self.__previous_raw_markers = []

        grid_size = 5
        aperture = 9
        true_detect_every_frame = 3
        min_perimeter = self.marker_min_perimeter

        if self.__robust_detection:
            markers = square_marker_detect.detect_markers_robust(
                gray_img=gray_img,
                grid_size=grid_size,
                min_marker_perimeter=min_perimeter,
                aperture=aperture,
                prev_markers=self.__previous_raw_markers,
                true_detect_every_frame=true_detect_every_frame,
                invert_image=self.__inverted_markers,
            )
        else:
            markers = square_marker_detect.detect_markers(
                gray_img=gray_img,
                grid_size=grid_size,
                min_marker_perimeter=min_perimeter,
                aperture=aperture,
            )

        # Robust marker detection requires previous markers to be in a different
        # format than the surface tracker.
        self.__previous_raw_markers = markers
        self.__previous_frame_index = frame_index
        markers = map(Surface_Marker.from_square_tag_detection, markers)
        markers = filter(self._surface_marker_filter, markers)
        return markers
Example #7
0
    def recent_events(self, events):
        frame = events.get("frame")
        if not frame:
            return
        self.img_shape = frame.height, frame.width, 3

        if self.running:
            gray = frame.gray
            if self.invert_image:
                gray = 255 - gray

            if self.robust_detection:
                self.markers = detect_markers_robust(
                    gray,
                    grid_size=5,
                    aperture=self.aperture,
                    prev_markers=self.markers,
                    true_detect_every_frame=3,
                    min_marker_perimeter=self.min_marker_perimeter,
                )
            else:
                self.markers = detect_markers(
                    gray,
                    grid_size=5,
                    aperture=self.aperture,
                    min_marker_perimeter=self.min_marker_perimeter,
                )
            if self.mode == "Show marker IDs":
                draw_markers(frame.gray, self.markers)

        # locate surfaces, map gaze
        for s in self.surfaces:
            s.locate(
                self.markers,
                self.min_marker_perimeter,
                self.min_id_confidence,
                self.locate_3d,
            )
            if s.detected:
                s.gaze_on_srf = s.map_data_to_surface(events.get("gaze", []),
                                                      s.m_from_screen)
                s.fixations_on_srf = s.map_data_to_surface(
                    events.get("fixations", []), s.m_from_screen)
                s.update_gaze_history()
            else:
                s.gaze_on_srf = []
                s.fixations_on_srf = []

        events["surfaces"] = []
        for s in self.surfaces:
            if s.detected:
                datum = {
                    "topic":
                    "surfaces.{}".format(s.name),
                    "name":
                    s.name,
                    "uid":
                    s.uid,
                    "m_to_screen":
                    s.m_to_screen.tolist(),
                    "m_from_screen":
                    s.m_from_screen.tolist(),
                    "gaze_on_srf":
                    s.gaze_on_srf,
                    "fixations_on_srf":
                    s.fixations_on_srf,
                    "timestamp":
                    frame.timestamp,
                    "camera_pose_3d":
                    s.camera_pose_3d.tolist()
                    if s.camera_pose_3d is not None else None,
                }
                events["surfaces"].append(datum)

        if self.running:
            self.button.status_text = "{}/{}".format(
                len([s for s in self.surfaces if s.detected]),
                len(self.surfaces))
        else:
            self.button.status_text = "tracking paused"

        if self.mode == "Show Markers and Surfaces":
            # edit surfaces by user
            if self.edit_surf_verts:
                pos = self._last_mouse_pos
                for s, v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos = s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx, new_pos)
Example #8
0
    def update(self,frame,events):
        self.img_shape = frame.height,frame.width,3

        if self.running:
            self.current_gaze_pos = self.get_gaze_pos(events)

            #drawing only in undistord image
            if self.show_undistord:
                self.frame_img = undistord_with_roi(img=frame.img, cm=self.camera_intrinsics[0], dist_coef=self.camera_intrinsics[1], roi=self.roi, new_cm=self.camera_intrinsics[4])
                gray = cv2.cvtColor(self.frame_img, cv2.COLOR_BGR2GRAY)
                cv2.imshow("test", self.frame_img)
            else:
                self.frame_img = frame.img
                gray = frame.gray


            if self.robust_detection:
                visible_markers = detect_markers_robust(gray,
                                                    grid_size = 5,
                                                    prev_markers=self.markers,
                                                    min_marker_perimeter=self.min_marker_perimeter,
                                                    aperture = 11, 
                                                    visualize=0,
                                                    true_detect_every_frame=3)
            else:
                visible_markers = detect_markers(gray,
                                                grid_size = 5,
                                                min_marker_perimeter=self.min_marker_perimeter,
                                                aperture = 11, 
                                                visualize=0)

            for m in self.markers:
                m['visible'] = False

            self.find_main_marker(visible_markers )

            for vm in visible_markers:
                #find the index of the visible marker in self.markers
                index = -1
                for indexList,m in enumerate(self.markers):
                    if m['id'] == vm['id']:
                        index = indexList
                        break

                if index == -1:  #marker is not registered already
                    index = len(self.markers)
                    new_marker = {'id':vm['id'],'verts':vm['verts'],'verts_norm':vm['verts_norm'],'centroid':vm['centroid'],'frames_since_true_detection':0,'height':76,'frames_looked_up_count':0,'obj_name':"None",'obj':None,'mult':1}
                    self.markers.append(new_marker)
                marker = self.markers[index]

                marker['verts'] = vm['verts']
                marker['verts_norm'] = vm['verts_norm']
                marker['centroid'] = vm['centroid']
                marker['frames_since_true_detection'] = vm['frames_since_true_detection']
                marker['visible'] = True
                objp = gen_square_pattern_grid(marker['height'])

                # Find the rotation and translation vectors.
                _, rvecs, tvecs = cv2.solvePnP(objp, marker['verts'], self.camera_intrinsics[0], None) #Already undistord, no need to give dist coeffs

                #if the marker is fixed by the gaze
                if self.is_looked_up(marker):
                    #get the obj to draw
                    if self.mode == "Draw obj":
                        if marker['obj'] == None and marker['obj_name'] != "None":
                            marker['obj'] = OBJ("../ressources/"+marker['obj_name'], marker['mult'], swapyz=True)

                        marker['rot'] = rvecs
                        marker['trans'] = tvecs
                        marker['to_draw'] = True

                else :  #not fixed
                    if self.mode == "Draw obj":
                       marker['to_draw'] = False

        if not self.running:
            self.button.status_text = 'tracking paused'
Example #9
0
def marker_positions(camera_spec,
                     videofile,
                     outfile,
                     new_camera=None,
                     start_time=0.0,
                     end_time=float("inf"),
                     visualize=False,
                     output_camera=None,
                     write=False):
    camera = pickle.load(open(camera_spec, 'rb'), encoding='bytes')
    image_resolution = camera[b'resolution']

    if b'rect_map' not in camera:
        camera_matrix = camera[b'camera_matrix']
        camera_distortion = camera[b'dist_coefs']
        rect_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
            camera_matrix, camera_distortion, image_resolution, 0.0)
        rmap = cv2.initUndistortRectifyMap(camera_matrix, camera_distortion,
                                           None, rect_camera_matrix,
                                           image_resolution, cv2.CV_32FC1)
    else:
        rmap = camera[b'rect_map']
        rect_camera_matrix = camera[b'rect_camera_matrix']

    K, D, resolution, cm = camera[b'camera_matrix'], camera[
        b'dist_coefs'], camera[b'resolution'], rect_camera_matrix

    camera = {}
    camera['camera_matrix'] = rect_camera_matrix
    camera['dist_coefs'] = None
    camera['resolution'] = image_resolution
    if new_camera is not None:
        save_object(camera, new_camera)

    #rectify_gaze_data(path, K, D, rect_camera_matrix)

    #if new_camera is not None:
    #    pickle.dump(camera, open(new_camera, 'w'))

    video = cv2.VideoCapture(videofile)
    video.set(cv2.CAP_PROP_POS_MSEC, start_time * 1000)
    frames = []
    #marker_tracker = markerdetect.MarkerTracker()

    prev_minute = 0.0
    marker_cache = []

    #fourcc = cv2.VideoWriter_fourcc(*'XVID')

    #if write == True:
    #    out = cv2.VideoWriter(path + "world.mp4",fourcc, 30.0, (1280,720))

    while True:
        ret, oframe = video.read()
        if not ret:
            break

        frame = cv2.remap(oframe, rmap[0], rmap[1], cv2.INTER_LINEAR)

        #if write == True:
        #    out.write(frame)

        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        msecs = video.get(cv2.CAP_PROP_POS_MSEC)
        time = msecs / 1000.0
        if time > end_time:
            break

        markers = markerdetect.detect_markers(frame,
                                              5,
                                              min_marker_perimeter=15)
        marker_cache.append(markers)

        frame_d = {
            'ts': time,
            'markers': markers,
        }
        frames.append(frame_d)

        if not visualize: continue
        markerdetect.draw_markers(frame, frame_d['markers'])

        cv2.imshow('frameBG', frame)
        cv2.waitKey(1)

    np.save(outfile, frames)

    if write == True:
        out.release()

    return marker_cache
Example #10
0
    def update(self, frame, events):
        self.img_shape = frame.height, frame.width, 3

        if self.running:
            gray = frame.gray
            if self.invert_image:
                gray = 255 - gray

            if self.robust_detection:
                self.markers = detect_markers_robust(
                    gray,
                    grid_size=5,
                    aperture=self.aperture,
                    prev_markers=self.markers,
                    true_detect_every_frame=3,
                    min_marker_perimeter=self.min_marker_perimeter,
                )
            else:
                self.markers = detect_markers(
                    gray, grid_size=5, aperture=self.aperture, min_marker_perimeter=self.min_marker_perimeter
                )
            if self.mode == "Show marker IDs":
                draw_markers(frame.gray, self.markers)

        # locate surfaces, map gaze
        for s in self.surfaces:
            s.locate(
                self.markers, self.camera_calibration, self.min_marker_perimeter, self.min_id_confidence, self.locate_3d
            )
            if s.detected:
                s.gaze_on_srf = s.map_data_to_surface(events.get("gaze_positions", []), s.m_from_screen)
            else:
                s.gaze_on_srf = []

        events["surface"] = []
        for s in self.surfaces:
            if s.detected:
                events["surface"].append(
                    {
                        "name": s.name,
                        "uid": s.uid,
                        "m_to_screen": s.m_to_screen.tolist(),
                        "m_from_screen": s.m_from_screen.tolist(),
                        "gaze_on_srf": s.gaze_on_srf,
                        "timestamp": frame.timestamp,
                        "camera_pose_3d": s.camera_pose_3d.tolist() if s.camera_pose_3d is not None else None,
                    }
                )

        if self.running:
            self.button.status_text = "%s/%s" % (len([s for s in self.surfaces if s.detected]), len(self.surfaces))
        else:
            self.button.status_text = "tracking paused"

        if self.mode == "Show Markers and Surfaces":
            # edit surfaces by user
            if self.edit_surf_verts:
                window = glfwGetCurrentContext()
                pos = glfwGetCursorPos(window)
                pos = normalize(pos, glfwGetWindowSize(window), flip_y=True)
                for s, v_idx in self.edit_surf_verts:
                    if s.detected:
                        new_pos = s.img_to_ref_surface(np.array(pos))
                        s.move_vertex(v_idx, new_pos)