예제 #1
0
파일: recorder.py 프로젝트: lloves/pupil
    def start(self,network_propagate=True):
        self.timestamps = []
        self.data = {'pupil_positions':[],'gaze_positions':[]}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir %s"%session)

        except:
            logger.debug("Recordings session dir %s already exists, using it." %session)

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "%03d/" % counter)
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir %s"%self.rec_path)
                break
            except:
                logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w') as f:
            f.write("Recording Name\t"+self.session_name+ "\n")
            f.write("Start Date\t"+ strftime("%d.%m.%Y", localtime(self.start_time))+ "\n")
            f.write("Start Time\t"+ strftime("%H:%M:%S", localtime(self.start_time))+ "\n")


        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(audio_path,self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,fps=self.g_pool.capture.frame_rate)
        # positions path to eye process
        if self.record_eye:
            for tx in self.g_pool.eye_tx:
                tx.send((self.rec_path,self.raw_jpeg))

        if self.show_info_menu:
            self.open_info_menu()

        self.notify_all( {'subject':'rec_started','rec_path':self.rec_path,'session_name':self.session_name,'network_propagate':network_propagate} )
예제 #2
0
파일: recorder.py 프로젝트: prinkkala/pupil
    def start(self):
        self.timestamps = []
        self.timestampsUnix = []
        self.glint_pos_list = []
        self.pupil_pos_list = []
        self.gaze_pos_list = []
        self.data = {'pupil_positions':[],'gaze_positions':[],'notifications':[]}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir %s"%session)

        except:
            logger.debug("Recordings session dir %s already exists, using it." %session)

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "%03d/" % counter)
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir %s"%self.rec_path)
                break
            except:
                logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w') as csvfile:
            csv_utils.write_key_value_file(csvfile,{
                'Recording Name': self.session_name,
                'Start Date': strftime("%d.%m.%Y", localtime(self.start_time)),
                'Start Time': strftime("%H:%M:%S", localtime(self.start_time)),
                'Start Time (seconds since epoch)':  str(self.start_time)
            })

        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(audio_path,self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,fps=self.g_pool.capture.frame_rate)

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all( {'subject':'recording.started','rec_path':self.rec_path,'session_name':self.session_name,'record_eye':self.record_eye,'compression':self.raw_jpeg} )
예제 #3
0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )
    writer = AV_Writer(
        output_file, fps=input_source.frame_rate, audio_dir=None, use_timestamps=True
    )
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
예제 #4
0
    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(
            video_path, fps=self.depth_frame_rate, use_timestamps=True
        )
예제 #5
0
class Recorder(Plugin):
    """Capture Recorder"""
    def __init__(self, g_pool, session_name=get_auto_name(), rec_dir=None,
                 user_info={'name': '', 'additional_field': 'change_me'},
                 info_menu_conf={}, show_info_menu=False, record_eye=False,
                 audio_src='No Audio', raw_jpeg=True):
        super().__init__(g_pool)
        # update name if it was autogenerated.
        if session_name.startswith('20') and len(session_name) == 10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep, 1)[0]
        default_rec_dir = os.path.join(base_dir, 'recordings')

        if rec_dir and rec_dir != default_rec_dir and self.verify_path(rec_dir):
            self.rec_dir = rec_dir
        else:
            try:
                os.makedirs(default_rec_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info('Created standard Rec dir at "{}"'.format(default_rec_dir))
            self.rec_dir = default_rec_dir

        self.raw_jpeg = raw_jpeg
        self.order = .9
        self.record_eye = record_eye
        self.session_name = session_name
        self.audio_devices_dict = Audio_Input_Dict()
        if audio_src in list(self.audio_devices_dict.keys()):
            self.audio_src = audio_src
        else:
            self.audio_src = 'No Audio'
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf

    def get_init_dict(self):
        d = {}
        d['record_eye'] = self.record_eye
        d['audio_src'] = self.audio_src
        d['session_name'] = self.session_name
        d['user_info'] = self.user_info
        d['info_menu_conf'] = self.info_menu_conf
        d['show_info_menu'] = self.show_info_menu
        d['rec_dir'] = self.rec_dir
        d['raw_jpeg'] = self.raw_jpeg
        return d

    def init_gui(self):
        self.menu = ui.Growing_Menu('Recorder')
        self.g_pool.sidebar.insert(3, self.menu)
        self.menu.append(ui.Info_Text('Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'))
        self.menu.append(ui.Info_Text('Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'))
        self.menu.append(ui.Text_Input('rec_dir', self, setter=self.set_rec_dir, label='Path to recordings'))
        self.menu.append(ui.Text_Input('session_name', self, setter=self.set_session_name, label='Recording session name'))
        self.menu.append(ui.Switch('show_info_menu', self, on_val=True, off_val=False, label='Request additional user info'))
        self.menu.append(ui.Selector('raw_jpeg', self, selection=[True, False], labels=["bigger file, less CPU", "smaller file, more CPU"], label='Compression'))
        self.menu.append(ui.Info_Text('Recording the raw eye video is optional. We use it for debugging.'))
        self.menu.append(ui.Switch('record_eye', self, on_val=True, off_val=False, label='Record eye'))
        self.menu.append(ui.Selector('audio_src', self, selection=list(self.audio_devices_dict.keys()), label='Audio Source'))

        self.button = ui.Thumb('running', self, setter=self.toggle, label='R', hotkey='r')
        self.button.on_color[:] = (1, .0, .0, .8)
        self.g_pool.quickbar.insert(1, self.button)

    def deinit_gui(self):
        if self.menu:
            self.g_pool.sidebar.remove(self.menu)
            self.menu = None
        if self.button:
            self.g_pool.quickbar.remove(self.button)
            self.button = None

    def toggle(self, _=None):
        if self.running:
            self.notify_all({'subject': 'recording.should_stop'})
            self.notify_all({'subject': 'recording.should_stop', 'remote_notify': 'all'})
        else:
            self.notify_all({'subject': 'recording.should_start', 'session_name': self.session_name})
            self.notify_all({'subject': 'recording.should_start', 'session_name': self.session_name, 'remote_notify': 'all'})


    def on_notify(self, notification):
        """Handles recorder notifications

        Reacts to notifications:
            ``recording.should_start``: Starts a new recording session
            ``recording.should_stop``: Stops current recording session

        Emits notifications:
            ``recording.started``: New recording session started
            ``recording.stopped``: Current recording session stopped

        Args:
            notification (dictionary): Notification dictionary
        """
        # notification wants to be recorded
        if notification.get('record', False) and self.running:
            self.data['notifications'].append(notification)
        elif notification['subject'] == 'recording.should_start':
            if self.running:
                logger.info('Recording already running!')
            elif not self.g_pool.capture.online:
                logger.error("Current world capture is offline. Please reconnect or switch to fake capture")
            else:
                if notification.get("session_name", ""):
                    self.set_session_name(notification["session_name"])
                self.start()

        elif notification['subject'] == 'recording.should_stop':
            if self.running:
                self.stop()
            else:
                logger.info('Recording already stopped!')

    def get_rec_time_str(self):
        rec_time = gmtime(time()-self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self):
        self.timestamps = []
        self.data = {'pupil_positions': [], 'gaze_positions': [], 'notifications': []}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir {}".format(session))

        except:
            logger.debug("Recordings session dir {} already exists, using it.".format(session))

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(self.rec_path))
                break
            except:
                logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w', newline='') as csvfile:
            csv_utils.write_key_value_file(csvfile, {
                'Recording Name': self.session_name,
                'Start Date': strftime("%d.%m.%Y", localtime(self.start_time)),
                'Start Time': strftime("%H:%M:%S", localtime(self.start_time))
            })

        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(audio_path, self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path, self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path, fps=self.g_pool.capture.frame_rate)

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({'subject': 'recording.started', 'rec_path': self.rec_path,
                         'session_name': self.session_name, 'record_eye': self.record_eye,
                         'compression': self.raw_jpeg})

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu('additional Recording Info', size=(300, 300), pos=(300, 300))
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.keys():
                self.info_menu.insert(0, ui.Text_Input(name, self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(ui.Info_Text('Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'))
        self.info_menu.append(ui.Text_Input('user_info', self, setter=set_user_info, label="User info"))
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def recent_events(self,events):
        if self.running:
            for key, data in events.items():
                if key not in ('dt','frame'):
                    try:
                        self.data[key] += data
                    except KeyError:
                        self.data[key] = []
                        self.data[key] += data

            if 'frame' in events:
                frame = events['frame']
                self.timestamps.append(frame.timestamp)
                self.writer.write_video_frame(frame)
                self.frame_count += 1

            # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))

            self.button.status_text = self.get_rec_time_str()

    def stop(self):
        # explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        save_object(self.data, os.path.join(self.rec_path, "pupil_data"))

        timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
        # ts = sanitize_timestamps(np.array(self.timestamps))
        ts = np.array(self.timestamps)
        np.save(timestamps_path, ts)

        try:
            copy2(os.path.join(self.g_pool.user_dir, "surface_definitions"),
                  os.path.join(self.rec_path, "surface_definitions"))
        except:
            logger.info("No surface_definitions data found. You may want this if you do marker tracking.")
        try:
            copy2(os.path.join(self.g_pool.user_dir, "user_calibration_data"),
                  os.path.join(self.rec_path, "user_calibration_data"))
        except:
            logger.warning("No user calibration data found. Please calibrate first.")

        camera_calibration = load_camera_calibration(self.g_pool)
        if camera_calibration is not None:
            save_object(camera_calibration, os.path.join(self.rec_path, "camera_calibration"))
        else:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a', newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, {
                    'Duration Time': self.get_rec_time_str(),
                    'World Camera Frames': self.frame_count,
                    'World Camera Resolution': str(self.g_pool.capture.frame_size[0])+"x"+str(self.g_pool.capture.frame_size[1]),
                    'Capture Software Version': self.g_pool.version,
                    'System Info': get_system_info()
                }, append=True)
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w', newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")

        self.close_info_menu()

        if self.audio_writer:
            self.audio_writer = None

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.timestamps = []
        self.data = {'pupil_positions': [], 'gaze_positions': []}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        logger.info("Saved Recording.")
        self.notify_all({'subject': 'recording.stopped', 'rec_path': self.rec_path})

    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()
        self.deinit_gui()

    def verify_path(self, val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '{}'.".format(n_path))
            return False
        else:
            return n_path

    def set_rec_dir(self, val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if os.path.sep in val:
                logger.warning('You session name will create one or more subdirectories')
            self.session_name = val
예제 #6
0
파일: recorder.py 프로젝트: prinkkala/pupil
class Recorder(Plugin):
    """Capture Recorder"""
    def __init__(self,g_pool,session_name = get_auto_name(),rec_dir=None, user_info={'name':'','additional_field':'change_me'},info_menu_conf={},show_info_menu=False, record_eye = True, audio_src = 'No Audio',raw_jpeg=True):
        super(Recorder, self).__init__(g_pool)
        #update name if it was autogenerated.
        if session_name.startswith('20') and len(session_name)==10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep,1)[0]
        default_rec_dir = os.path.join(base_dir,'recordings')

        if rec_dir and rec_dir != default_rec_dir and self.verify_path(rec_dir):
            self.rec_dir = rec_dir
        else:
            try:
                os.makedirs(default_rec_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info('Created standard Rec dir at "%s"'%default_rec_dir)
            self.rec_dir = default_rec_dir

        self.raw_jpeg = raw_jpeg
        self.order = .9
        self.record_eye = record_eye
        self.session_name = session_name
        self.audio_devices_dict = Audio_Input_Dict()
        if audio_src in self.audio_devices_dict.keys():
            self.audio_src = audio_src
        else:
            self.audio_src = 'No Audio'
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf


    def get_init_dict(self):
        d = {}
        d['record_eye'] = self.record_eye
        d['audio_src'] = self.audio_src
        d['session_name'] = self.session_name
        d['user_info'] = self.user_info
        d['info_menu_conf'] = self.info_menu_conf
        d['show_info_menu'] = self.show_info_menu
        d['rec_dir'] = self.rec_dir
        d['raw_jpeg'] = self.raw_jpeg
        return d


    def init_gui(self):
        self.menu = ui.Growing_Menu('Recorder')
        self.g_pool.sidebar.insert(3,self.menu)
        self.menu.append(ui.Info_Text('Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'))
        self.menu.append(ui.Info_Text('Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'))
        self.menu.append(ui.Text_Input('rec_dir',self,setter=self.set_rec_dir,label='Path to recordings'))
        self.menu.append(ui.Text_Input('session_name',self,setter=self.set_session_name,label='Recording session name'))
        self.menu.append(ui.Switch('show_info_menu',self,on_val=True,off_val=False,label='Request additional user info'))
        self.menu.append(ui.Selector('raw_jpeg',self,selection = [True,False], labels=["bigger file, less CPU", "smaller file, more CPU"],label='Compression'))
        self.menu.append(ui.Info_Text('Recording the raw eye video is optional. We use it for debugging.'))
        self.menu.append(ui.Switch('record_eye',self,on_val=True,off_val=False,label='Record eye'))
        self.menu.append(ui.Selector('audio_src',self, selection=self.audio_devices_dict.keys(),label='Audio Source'))

        self.button = ui.Thumb('running',self,setter=self.toggle,label='Record',hotkey='r')
        self.button.on_color[:] = (1,.0,.0,.8)
        self.g_pool.quickbar.insert(1,self.button)


    def deinit_gui(self):
        if self.menu:
            self.g_pool.sidebar.remove(self.menu)
            self.menu = None
        if self.button:
            self.g_pool.quickbar.remove(self.button)
            self.button = None



    def toggle(self, _=None):
        if self.running:
            self.notify_all( {'subject':'recording.should_stop'} )
            self.notify_all( {'subject':'recording.should_stop', 'remote_notify':'all'} )
        else:
            self.notify_all( {'subject':'recording.should_start','session_name':self.session_name} )
            self.notify_all( {'subject':'recording.should_start','session_name':self.session_name,'remote_notify':'all'} )


    def on_notify(self,notification):
        """Handles recorder notifications

        Reacts to notifications:
            ``recording.should_start``: Starts a new recording session
            ``recording.should_stop``: Stops current recording session

        Emits notifications:
            ``recording.started``: New recording session started
            ``recording.stopped``: Current recording session stopped

        Args:
            notification (dictionary): Notification dictionary
        """
        # notification wants to be recorded
        if notification.get('record',False) and self.running:
            self.data['notifications'].append(notification)


        elif notification['subject'] == 'recording.should_start':
            if self.running:
                logger.info('Recording already running!')
            else:
                if notification.get("session_name",""):
                    self.set_session_name(notification["session_name"])
                self.start()

        elif notification['subject'] == 'recording.should_stop':
            if self.running:
                self.stop()
            else:
                logger.info('Recording already stopped!')


    def get_rec_time_str(self):
        rec_time = gmtime(time()-self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self):
        self.timestamps = []
        self.timestampsUnix = []
        self.glint_pos_list = []
        self.pupil_pos_list = []
        self.gaze_pos_list = []
        self.data = {'pupil_positions':[],'gaze_positions':[],'notifications':[]}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir %s"%session)

        except:
            logger.debug("Recordings session dir %s already exists, using it." %session)

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "%03d/" % counter)
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir %s"%self.rec_path)
                break
            except:
                logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w') as csvfile:
            csv_utils.write_key_value_file(csvfile,{
                'Recording Name': self.session_name,
                'Start Date': strftime("%d.%m.%Y", localtime(self.start_time)),
                'Start Time': strftime("%H:%M:%S", localtime(self.start_time)),
                'Start Time (seconds since epoch)':  str(self.start_time)
            })

        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(audio_path,self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,fps=self.g_pool.capture.frame_rate)

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all( {'subject':'recording.started','rec_path':self.rec_path,'session_name':self.session_name,'record_eye':self.record_eye,'compression':self.raw_jpeg} )

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu('additional Recording Info',size=(300,300),pos=(300,300))
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.iterkeys():
                self.info_menu.insert(0,ui.Text_Input(name,self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(ui.Info_Text('Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'))
        self.info_menu.append(ui.Text_Input('user_info',self,setter=set_user_info,label="User info"))
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def update(self,frame,events):
        if self.running:
            for key,data in events.iteritems():
                if key not in ('dt') and key != 'timestamp_unix':
                    try:
                        self.data[key] += data
                    except KeyError:
                        self.data[key] = []
                        self.data[key] += data

            self.timestamps.append(frame.timestamp)
            self.writer.write_video_frame(frame)
            self.frame_count += 1

            for glint in events['glint_positions']:
                self.glint_pos_list += glint
            self.timestampsUnix.append(events['timestamp_unix'])
            # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))

            self.button.status_text = self.get_rec_time_str()

    def stop(self):
        #explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        save_object(self.data,os.path.join(self.rec_path, "pupil_data"))



        self.glint_pos_list = np.array(self.glint_pos_list)
        glint_list_path = os.path.join(self.rec_path, "glint_positions.npy")
        np.save(glint_list_path,self.glint_pos_list)

        timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
        # ts = sanitize_timestamps(np.array(self.timestamps))
        ts = np.array(self.timestamps)
        np.save(timestamps_path,ts)

        timestampsUnix_path = os.path.join(self.rec_path, "world_timestamps_unix.npy")
        tsUnix = np.array(self.timestampsUnix)
        np.save(timestampsUnix_path,tsUnix)


        try:
            copy2(os.path.join(self.g_pool.user_dir,"surface_definitions"),os.path.join(self.rec_path,"surface_definitions"))
        except:
            logger.info("No surface_definitions data found. You may want this if you do marker tracking.")
        try:
            copy2(os.path.join(self.g_pool.user_dir,"user_calibration_data"),os.path.join(self.rec_path,"user_calibration_data"))
        except:
            logger.warning("No user calibration data found. Please calibrate first.")

        try:
            copy2(os.path.join(self.g_pool.user_dir,"cal_pt_cloud_glint.npy"),os.path.join(self.rec_path,"cal_pt_cloud_glint.npy"))
        except:
            logger.warning("No pupil-glint-vector calibration data found. Please calibrate first.")

        try:
            copy2(os.path.join(self.g_pool.user_dir,"cal_ref_list.npy"),os.path.join(self.rec_path,"cal_ref_list.npy"))
        except:
            logger.warning("No calibration reference list found.")

        try:
            copy2(os.path.join(self.g_pool.user_dir,"accuracy_test_pt_cloud.npy"),os.path.join(self.rec_path,"accuracy_test_pt_cloud.npy"))
            copy2(os.path.join(self.g_pool.user_dir,"accuracy_test_ref_list.npy"),os.path.join(self.rec_path,"accuracy_test_ref_list.npy"))
        except:
            logger.warning("No accuracy test found.")

        try:
            copy2(os.path.join(self.g_pool.user_dir,"accuracy_test_pt_cloud_previous.npy"),os.path.join(self.rec_path,"accuracy_test_pt_cloud_previous.npy"))
            copy2(os.path.join(self.g_pool.user_dir,"accuracy_test_ref_list_previous.npy"),os.path.join(self.rec_path,"accuracy_test_ref_list_previous.npy"))
        except:
            logger.warning("No previous accuracy test results.")


        camera_calibration = load_camera_calibration(self.g_pool)
        if camera_calibration is not None:
            save_object(camera_calibration,os.path.join(self.rec_path, "camera_calibration"))
        else:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a') as csvfile:
                csv_utils.write_key_value_file(csvfile, {
                    'Duration Time': self.get_rec_time_str(),
                    'World Camera Frames': self.frame_count,
                    'World Camera Resolution': str(self.g_pool.capture.frame_size[0])+"x"+str(self.g_pool.capture.frame_size[1]),
                    'Capture Software Version': self.g_pool.version,
                    'System Info': get_system_info()
                }, append=True)
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w') as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")


        self.close_info_menu()

        if self.audio_writer:
            self.audio_writer = None

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.timestamps = []
        self.data = {'pupil_positions':[],'gaze_positions':[]}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        logger.info("Saved Recording.")
        self.notify_all( {'subject':'rec_stopped','rec_path':self.rec_path,'network_propagate':True} )
        self.notify_all( {'subject':'recording.stopped','rec_path':self.rec_path} )

        copyfile(os.path.join(self.g_pool.user_dir,'capture.log'), os.path.join(self.rec_path,"capture.log"))

    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()
        self.deinit_gui()

    def verify_path(self,val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '%s'."%n_path)
            return False
        else:
            return n_path

    def set_rec_dir(self,val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if '/' in val:
                logger.warning('You session name will create one or more subdirectories')
            self.session_name = val
예제 #7
0
def export(should_terminate,frames_to_export,current_frame, rec_dir,user_dir,start_frame=None,end_frame=None,plugin_initializers=[],out_file_path=None):

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()) )

   #parse info.csv file
    meta_info_path = os.path.join(rec_dir,"info.csv")
    with open(meta_info_path) as info:
        meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) )

    video_path = [f for f in glob(os.path.join(rec_dir,"world.*")) if f[-3:] in ('mp4','mkv','avi')][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")


    rec_version = read_rec_version(meta_info)
    if rec_version >= VersionFormat('0.5'):
        pass
    elif rec_version >= VersionFormat('0.4'):
        update_recording_0v4_to_current(rec_dir)
    elif rec_version >= VersionFormat('0.3'):
        update_recording_0v3_to_current(rec_dir)
        timestamps_path = os.path.join(rec_dir, "timestamps.npy")
    else:
        logger.Error("This recording is to old. Sorry.")
        return


    timestamps = np.load(timestamps_path)

    cap = File_Capture(video_path,timestamps=timestamps)


    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name =  os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name,file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s"%out_file_path)


    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps)==0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug("Will export from frame %s to frame %s. This means I will export %s frames."%(start_frame,start_frame+frames_to_export.value,frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path,fps=cap.frame_rate,use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.capture = cap
    g.rec_dir = rec_dir
    g.user_dir = user_dir
    g.rec_version = rec_version
    g.timestamps = timestamps


    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']

    g.pupil_positions_by_frame = correlate_data(pupil_list,g.timestamps)
    g.gaze_positions_by_frame = correlate_data(gaze_list,g.timestamps)
    g.fixations_by_frame = [[] for x in g.timestamps] #populated by the fixation detector plugin

    #add plugins
    g.plugins = Plugin_List(g,plugin_by_name,plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s."%(current_frame.value,out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g.pupil_positions_by_frame[frame.index]

        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame,events)

        writer.write_video_frame(frame)
        current_frame.value +=1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    logger.info("Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"%(current_frame.value,out_file_path,duration,effective_fps))
    return True
예제 #8
0
class Realsense_Source(Base_Source):
    """
    Camera Capture is a class that encapsualtes pyrs.Device:
    """

    def __init__(
        self,
        g_pool,
        device_id=0,
        frame_size=(1920, 1080),
        frame_rate=30,
        depth_frame_size=(640, 480),
        depth_frame_rate=60,
        align_streams=False,
        preview_depth=False,
        device_options=(),
        record_depth=True,
        stream_preset=None,
    ):
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.device = None
        self.service = pyrs.Service()
        self.align_streams = align_streams
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self.controls = None
        self.pitch = 0
        self.yaw = 0
        self.mouse_drag = False
        self.last_pos = (0, 0)
        self.depth_window = None
        self._needs_restart = False
        self.stream_preset = stream_preset
        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )

    def _initialize_device(
        self,
        device_id,
        color_frame_size,
        color_fps,
        depth_frame_size,
        depth_fps,
        device_options=(),
    ):
        devices = tuple(self.service.get_devices())
        color_frame_size = tuple(color_frame_size)
        depth_frame_size = tuple(depth_frame_size)

        self.streams = [ColorStream(), DepthStream(), PointStream()]
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if not devices:
            if not self._needs_restart:
                logger.error("Camera failed to initialize. No cameras connected.")
            self.device = None
            self.update_menu()
            return

        if self.device is not None:
            self.device.stop()  # only call Device.stop() if its context

        if device_id >= len(devices):
            logger.error(
                "Camera with id {} not found. Initializing default camera.".format(
                    device_id
                )
            )
            device_id = 0

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)

        # make sure that given frame sizes and rates are available
        color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR]
        if color_frame_size not in color_modes:
            # automatically select highest resolution
            color_frame_size = sorted(color_modes.keys(), reverse=True)[0]

        if color_fps not in color_modes[color_frame_size]:
            # automatically select highest frame rate
            color_fps = color_modes[color_frame_size][0]

        depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH]
        if self.align_streams:
            depth_frame_size = color_frame_size
        else:
            if depth_frame_size not in depth_modes:
                # automatically select highest resolution
                depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0]

        if depth_fps not in depth_modes[depth_frame_size]:
            # automatically select highest frame rate
            depth_fps = depth_modes[depth_frame_size][0]

        colorstream = ColorStream(
            width=color_frame_size[0],
            height=color_frame_size[1],
            fps=color_fps,
            color_format="yuv",
            preset=self.stream_preset,
        )
        depthstream = DepthStream(
            width=depth_frame_size[0],
            height=depth_frame_size[1],
            fps=depth_fps,
            preset=self.stream_preset,
        )
        pointstream = PointStream(
            width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps
        )

        self.streams = [colorstream, depthstream, pointstream]
        if self.align_streams:
            dacstream = DACStream(
                width=depth_frame_size[0], height=depth_frame_size[1], fps=depth_fps
            )
            dacstream.name = "depth"  # rename data accessor
            self.streams.append(dacstream)

        # update with correctly initialized streams
        # always initiliazes color + depth, adds rectified/aligned versions as necessary

        self.device = self.service.Device(device_id, streams=self.streams)
        self.controls = Realsense_Controls(self.device, device_options)
        self._intrinsics = load_intrinsics(
            self.g_pool.user_dir, self.name, self.frame_size
        )

        self.update_menu()
        self._needs_restart = False

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}
        # only lists modes for native streams (RS_STREAM_COLOR/RS_STREAM_DEPTH)
        for mode in self.service.get_device_modes(device_id):
            if mode.stream in (rs_stream.RS_STREAM_COLOR, rs_stream.RS_STREAM_DEPTH):
                # check if frame size dict is available
                if mode.stream not in formats:
                    formats[mode.stream] = {}
                stream_obj = next((s for s in self.streams if s.stream == mode.stream))
                if mode.format == stream_obj.format:
                    size = mode.width, mode.height
                    # check if framerate list is already available
                    if size not in formats[mode.stream]:
                        formats[mode.stream][size] = []
                    formats[mode.stream][size].append(mode.fps)

        if self.align_streams:
            depth_sizes = formats[rs_stream.RS_STREAM_DEPTH].keys()
            color_sizes = formats[rs_stream.RS_STREAM_COLOR].keys()
            # common_sizes = depth_sizes & color_sizes
            discarded_sizes = depth_sizes ^ color_sizes
            for size in discarded_sizes:
                for sizes in formats.values():
                    if size in sizes:
                        del sizes[size]

        return formats

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        if self.device is not None:
            self.device.stop()
        self.service.stop()

    def get_init_dict(self):
        return {
            "device_id": self.device.device_id if self.device is not None else 0,
            "frame_size": self.frame_size,
            "frame_rate": self.frame_rate,
            "depth_frame_size": self.depth_frame_size,
            "depth_frame_rate": self.depth_frame_rate,
            "preview_depth": self.preview_depth,
            "record_depth": self.record_depth,
            "align_streams": self.align_streams,
            "device_options": self.controls.export_presets()
            if self.controls is not None
            else (),
            "stream_preset": self.stream_preset,
        }

    def get_frames(self):
        if self.device:
            self.device.wait_for_frames()
            current_time = self.g_pool.get_timestamp()

            last_color_frame_ts = self.device.get_frame_timestamp(
                self.streams[0].stream
            )
            if self.last_color_frame_ts != last_color_frame_ts:
                self.last_color_frame_ts = last_color_frame_ts
                color = ColorFrame(self.device)
                color.timestamp = current_time
                color.index = self.color_frame_index
                self.color_frame_index += 1
            else:
                color = None

            last_depth_frame_ts = self.device.get_frame_timestamp(
                self.streams[1].stream
            )
            if self.last_depth_frame_ts != last_depth_frame_ts:
                self.last_depth_frame_ts = last_depth_frame_ts
                depth = DepthFrame(self.device)
                depth.timestamp = current_time
                depth.index = self.depth_frame_index
                self.depth_frame_index += 1
            else:
                depth = None

            return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart:
            self.restart_device()
            time.sleep(0.05)
        elif not self.online:
            time.sleep(0.05)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except (pyrs.RealsenseError, TimeoutError) as err:
            logger.warning("Realsense failed to provide frames. Attempting to reinit.")
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame and depth_frame:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if self.device is None:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        def align_and_restart(val):
            self.align_streams = val
            self.restart_device()

        self.menu.append(ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(ui.Switch("preview_depth", self, label="Preview Depth"))
        self.menu.append(
            ui.Switch(
                "align_streams", self, label="Align Streams", setter=align_and_restart
            )
        )

        def toggle_depth_display():
            def on_depth_mouse_button(window, button, action, mods):
                if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
                    self.mouse_drag = True
                if (
                    button == glfw.GLFW_MOUSE_BUTTON_LEFT
                    and action == glfw.GLFW_RELEASE
                ):
                    self.mouse_drag = False

            if self.depth_window is None:
                self.pitch = 0
                self.yaw = 0

                win_size = glfw.glfwGetWindowSize(self.g_pool.main_window)
                self.depth_window = glfw.glfwCreateWindow(
                    win_size[0], win_size[1], "3D Point Cloud"
                )
                glfw.glfwSetMouseButtonCallback(
                    self.depth_window, on_depth_mouse_button
                )
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(self.depth_window)
                gl_utils.basic_gl_setup()
                gl_utils.make_coord_system_norm_based()

                # refresh speed settings
                glfw.glfwSwapInterval(0)

                glfw.glfwMakeContextCurrent(active_window)

        native_presets = [
            ("None", None),
            ("Best Quality", rs_preset.RS_PRESET_BEST_QUALITY),
            ("Largest image", rs_preset.RS_PRESET_LARGEST_IMAGE),
            ("Highest framerate", rs_preset.RS_PRESET_HIGHEST_FRAMERATE),
        ]

        def set_stream_preset(val):
            if self.stream_preset != val:
                self.stream_preset = val
                self.restart_device()

        self.menu.append(
            ui.Selector(
                "stream_preset",
                self,
                setter=set_stream_preset,
                labels=[preset[0] for preset in native_presets],
                selection=[preset[1] for preset in native_presets],
                label="Stream preset",
            )
        )
        color_sizes = sorted(
            self._available_modes[rs_stream.RS_STREAM_COLOR], reverse=True
        )
        selector = ui.Selector(
            "frame_size",
            self,
            # setter=,
            selection=color_sizes,
            label="Resolution" if self.align_streams else "Color Resolution",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def color_fps_getter():
            avail_fps = [
                fps
                for fps in self._available_modes[rs_stream.RS_STREAM_COLOR][
                    self.frame_size
                ]
                if self.depth_frame_rate % fps == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "frame_rate",
            self,
            # setter=,
            selection_getter=color_fps_getter,
            label="Color Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        if not self.align_streams:
            depth_sizes = sorted(
                self._available_modes[rs_stream.RS_STREAM_DEPTH], reverse=True
            )
            selector = ui.Selector(
                "depth_frame_size",
                self,
                # setter=,
                selection=depth_sizes,
                label="Depth Resolution",
            )
            selector.read_only = self.stream_preset is not None
            self.menu.append(selector)

        def depth_fps_getter():
            avail_fps = [
                fps
                for fps in self._available_modes[rs_stream.RS_STREAM_DEPTH][
                    self.depth_frame_size
                ]
                if fps % self.frame_rate == 0
            ]
            return avail_fps, [str(fps) for fps in avail_fps]

        selector = ui.Selector(
            "depth_frame_rate",
            self,
            selection_getter=depth_fps_getter,
            label="Depth Frame Rate",
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def reset_options():
            if self.device:
                try:
                    self.device.reset_device_options_to_default(self.controls.keys())
                except pyrs.RealsenseError as err:
                    logger.info("Resetting some device options failed")
                    logger.debug("Reason: {}".format(err))
                finally:
                    self.controls.refresh()

        self.menu.append(ui.Button("Point Cloud Window", toggle_depth_display))
        sensor_control = ui.Growing_Menu(label="Sensor Settings")
        sensor_control.append(
            ui.Button("Reset device options to default", reset_options)
        )
        for ctrl in sorted(self.controls.values(), key=lambda x: x.range.option):
            # sensor_control.append(ui.Info_Text(ctrl.description))
            if (
                ctrl.range.min == 0.0
                and ctrl.range.max == 1.0
                and ctrl.range.step == 1.0
            ):
                sensor_control.append(
                    ui.Switch("value", ctrl, label=ctrl.label, off_val=0.0, on_val=1.0)
                )
            else:
                sensor_control.append(
                    ui.Slider(
                        "value",
                        ctrl,
                        label=ctrl.label,
                        min=ctrl.range.min,
                        max=ctrl.range.max,
                        step=ctrl.range.step,
                    )
                )
        self.menu.append(sensor_control)

    def gl_display(self):
        from math import floor

        if self.depth_window is not None and glfw.glfwWindowShouldClose(
            self.depth_window
        ):
            glfw.glfwDestroyWindow(self.depth_window)
            self.depth_window = None

        if self.depth_window is not None and self._recent_depth_frame is not None:
            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(self.depth_window)

            win_size = glfw.glfwGetFramebufferSize(self.depth_window)
            gl_utils.adjust_gl_view(win_size[0], win_size[1])
            pos = glfw.glfwGetCursorPos(self.depth_window)
            if self.mouse_drag:
                self.pitch = np.clip(self.pitch + (pos[1] - self.last_pos[1]), -80, 80)
                self.yaw = np.clip(self.yaw - (pos[0] - self.last_pos[0]), -120, 120)
            self.last_pos = pos

            glClearColor(0, 0, 0, 0)
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
            glMatrixMode(GL_PROJECTION)
            glLoadIdentity()
            gluPerspective(60, win_size[0] / win_size[1], 0.01, 20.0)
            glMatrixMode(GL_MODELVIEW)
            glLoadIdentity()
            gluLookAt(0, 0, 0, 0, 0, 1, 0, -1, 0)
            glTranslatef(0, 0, 0.5)
            glRotated(self.pitch, 1, 0, 0)
            glRotated(self.yaw, 0, 1, 0)
            glTranslatef(0, 0, -0.5)

            # glPointSize(2)
            glEnable(GL_DEPTH_TEST)
            extrinsics = self.device.get_device_extrinsics(
                rs_stream.RS_STREAM_DEPTH, rs_stream.RS_STREAM_COLOR
            )
            depth_frame = self._recent_depth_frame
            color_frame = self._recent_frame
            depth_scale = self.device.depth_scale

            glEnableClientState(GL_VERTEX_ARRAY)

            pointcloud = self.device.pointcloud
            glVertexPointer(3, GL_FLOAT, 0, pointcloud)
            glEnableClientState(GL_COLOR_ARRAY)
            depth_to_color = np.zeros(
                depth_frame.height * depth_frame.width * 3, np.uint8
            )
            rsutilwrapper.project_pointcloud_to_pixel(
                depth_to_color,
                self.device.depth_intrinsics,
                self.device.color_intrinsics,
                extrinsics,
                pointcloud,
                self._recent_frame.bgr,
            )
            glColorPointer(3, GL_UNSIGNED_BYTE, 0, depth_to_color)
            glDrawArrays(GL_POINTS, 0, depth_frame.width * depth_frame.height)
            gl_utils.glFlush()
            glDisable(GL_DEPTH_TEST)
            # gl_utils.make_coord_system_norm_based()
            glfw.glfwSwapBuffers(self.depth_window)
            glfw.glfwMakeContextCurrent(active_window)

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3)
        )

    def restart_device(
        self,
        device_id=None,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if device_id is None:
            if self.device is not None:
                device_id = self.device.device_id
            else:
                device_id = 0
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = self.controls.export_presets()
        if self.device is not None:
            self.device.stop()
            self.device = None
        self.service.stop()
        self.service.start()
        self.notify_all(
            {
                "subject": "realsense_source.restart",
                "device_id": device_id,
                "color_frame_size": color_frame_size,
                "color_fps": color_fps,
                "depth_frame_size": depth_frame_size,
                "depth_fps": depth_fps,
                "device_options": device_options,
            }
        )

    def on_click(self, pos, button, action):
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
            self.mouse_drag = True
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_RELEASE:
            self.mouse_drag = False

    def on_notify(self, notification):
        if notification["subject"] == "realsense_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(
            video_path, fps=self.depth_frame_rate, use_timestamps=True
        )

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def frame_size(self):
        stream = self.streams[0]
        return stream.width, stream.height

    @frame_size.setter
    def frame_size(self, new_size):
        if self.device is not None and new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        return self.streams[0].fps

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        stream = self.streams[1]
        return stream.width, stream.height

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if self.device is not None and new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        return self.streams[1].fps

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.device and self.device.is_streaming()

    @property
    def name(self):
        # not the same as `if self.device:`!
        if self.device is not None:
            return self.device.name
        else:
            return "Ghost capture"
예제 #9
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, init_playback_source
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_eye_video_overlay import Vis_Eye_Video_Overlay
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Vis_Eye_Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(f for f in glob(os.path.join(rec_dir, "world.*"))
                              if os.path.splitext(f)[1] in valid_ext)
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = init_playback_source(g_pool, source_path=video_path, timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(start_frame, start_frame + frames_to_export,
                            frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path,
                           fps=cap.frame_rate,
                           audio_loc=audio_path,
                           use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration,
                          effective_fps))
        yield "Export done. This took {:.0f} seconds.".format(
            duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(
            os.getpid()))
예제 #10
0
def export(should_terminate,frames_to_export,current_frame, rec_dir,start_frame=None,end_frame=None,plugin_initializers=[],out_file_path=None):

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()) )



    #parse info.csv file
    with open(rec_dir + "/info.csv") as info:
        meta_info = dict( ((line.strip().split('\t')) for line in info.readlines() ) )
    rec_version = read_rec_version(meta_info)
    logger.debug("Exporting a video from recording with version: %s"%rec_version)

    if rec_version < VersionFormat('0.4'):
        video_path = rec_dir + "/world.avi"
        timestamps_path = rec_dir + "/timestamps.npy"
    else:
        video_path = rec_dir + "/world.mkv"
        timestamps_path = rec_dir + "/world_timestamps.npy"

    gaze_positions_path = rec_dir + "/gaze_positions.npy"
    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)

    #correlate data
    if rec_version < VersionFormat('0.4'):
        positions_by_frame = correlate_gaze_legacy(gaze_list,timestamps)
    else:
        positions_by_frame = correlate_gaze(gaze_list,timestamps)

    cap = autoCreateCapture(video_path,timestamps=timestamps_path)
    width,height = cap.get_size()

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name =  os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name,file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s"%out_file_path)


    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps)==0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug("Will export from frame %s to frame %s. This means I will export %s frames."%(start_frame,start_frame+frames_to_export.value,frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.rec_dir = rec_dir
    g.rec_version = rec_version
    g.timestamps = timestamps
    g.gaze_list = gaze_list
    g.positions_by_frame = positions_by_frame
    g.plugins = Plugin_List(g,plugin_by_name,plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s."%(current_frame.value,out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['pupil_positions'] = positions_by_frame[frame.index]
        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame,events)

        writer.write_video_frame(frame)
        current_frame.value +=1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    logger.info("Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"%(current_frame.value,out_file_path,duration,effective_fps))
    return True
예제 #11
0
class Realsense2_Source(Base_Source):
    def __init__(
        self,
        g_pool,
        device_id=None,
        frame_size=DEFAULT_COLOR_SIZE,
        frame_rate=DEFAULT_COLOR_FPS,
        depth_frame_size=DEFAULT_DEPTH_SIZE,
        depth_frame_rate=DEFAULT_DEPTH_FPS,
        preview_depth=False,
        device_options=(),
        record_depth=True,
    ):
        logger.debug("_init_ started")
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.context = rs.context()
        self.pipeline = rs.pipeline(self.context)
        self.pipeline_profile = None
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self._needs_restart = False
        self.frame_size_backup = DEFAULT_COLOR_SIZE
        self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
        self.frame_rate_backup = DEFAULT_COLOR_FPS
        self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )
        logger.debug("_init_ completed")

    def _initialize_device(
        self,
        device_id,
        color_frame_size,
        color_fps,
        depth_frame_size,
        depth_fps,
        device_options=(),
    ):
        self.stop_pipeline()
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if device_id is None:
            device_id = self.device_id

        if device_id is None:  # FIXME these two if blocks look ugly.
            return

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)
        logger.debug(
            "device_id: {} self._available_modes: {}".format(
                device_id, str(self._available_modes)
            )
        )

        if (
            color_frame_size is not None
            and depth_frame_size is not None
            and color_fps is not None
            and depth_fps is not None
        ):
            color_frame_size = tuple(color_frame_size)
            depth_frame_size = tuple(depth_frame_size)

            logger.debug(
                "Initialize with Color {}@{}\tDepth {}@{}".format(
                    color_frame_size, color_fps, depth_frame_size, depth_fps
                )
            )

            # make sure the frame rates are compatible with the given frame sizes
            color_fps = self._get_valid_frame_rate(
                rs.stream.color, color_frame_size, color_fps
            )
            depth_fps = self._get_valid_frame_rate(
                rs.stream.depth, depth_frame_size, depth_fps
            )

            self.frame_size_backup = color_frame_size
            self.depth_frame_size_backup = depth_frame_size
            self.frame_rate_backup = color_fps
            self.depth_frame_rate_backup = depth_fps

            config = self._prep_configuration(
                color_frame_size, color_fps, depth_frame_size, depth_fps
            )
        else:
            config = self._get_default_config()
            self.frame_size_backup = DEFAULT_COLOR_SIZE
            self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
            self.frame_rate_backup = DEFAULT_COLOR_FPS
            self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        try:
            self.pipeline_profile = self.pipeline.start(config)
        except RuntimeError as re:
            logger.error("Cannot start pipeline! " + str(re))
            self.pipeline_profile = None
        else:
            self.stream_profiles = {
                s.stream_type(): s.as_video_stream_profile()
                for s in self.pipeline_profile.get_streams()
            }
            logger.debug("Pipeline started for device " + device_id)
            logger.debug("Stream profiles: " + str(self.stream_profiles))

            self._intrinsics = load_intrinsics(
                self.g_pool.user_dir, self.name, self.frame_size
            )
            self.update_menu()
            self._needs_restart = False

    def _prep_configuration(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
    ):
        config = rs.config()

        # only use these two formats
        color_format = rs.format.yuyv
        depth_format = rs.format.z16

        config.enable_stream(
            rs.stream.depth,
            depth_frame_size[0],
            depth_frame_size[1],
            depth_format,
            depth_fps,
        )

        config.enable_stream(
            rs.stream.color,
            color_frame_size[0],
            color_frame_size[1],
            color_format,
            color_fps,
        )

        return config

    def _get_default_config(self):
        config = rs.config()  # default config is RGB8, we want YUYV
        config.enable_stream(
            rs.stream.color,
            DEFAULT_COLOR_SIZE[0],
            DEFAULT_COLOR_SIZE[1],
            rs.format.yuyv,
            DEFAULT_COLOR_FPS,
        )
        config.enable_stream(
            rs.stream.depth,
            DEFAULT_DEPTH_SIZE[0],
            DEFAULT_DEPTH_SIZE[1],
            rs.format.z16,
            DEFAULT_DEPTH_FPS,
        )
        return config

    def _get_valid_frame_rate(self, stream_type, frame_size, fps):
        assert stream_type == rs.stream.color or stream_type == rs.stream.depth

        if not self._available_modes or stream_type not in self._available_modes:
            logger.warning(
                "_get_valid_frame_rate: self._available_modes not set yet. Returning default fps."
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS
            else:
                raise ValueError("Unexpected `stream_type`: {}".format(stream_type))

        if frame_size not in self._available_modes[stream_type]:
            logger.error(
                "Frame size not supported for {}: {}. Returning default fps".format(
                    stream_type, frame_size
                )
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS

        if fps not in self._available_modes[stream_type][frame_size]:
            old_fps = fps
            rates = [
                abs(r - fps) for r in self._available_modes[stream_type][frame_size]
            ]
            best_rate_idx = rates.index(min(rates))
            fps = self._available_modes[stream_type][frame_size][best_rate_idx]
            logger.warning(
                "{} fps is not supported for ({}) for Color Stream. Fallback to {} fps".format(
                    old_fps, frame_size, fps
                )
            )

        return fps

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}

        if self.context is None:
            return formats

        devices = self.context.query_devices()
        current_device = None

        for d in devices:
            try:
                serial = d.get_info(rs.camera_info.serial_number)
            except RuntimeError as re:
                logger.error("Device no longer available " + str(re))
            else:
                if device_id == serial:
                    current_device = d

        if current_device is None:
            return formats
        logger.debug("Found the current device: " + device_id)

        sensors = current_device.query_sensors()
        for s in sensors:
            stream_profiles = s.get_stream_profiles()
            for sp in stream_profiles:
                vp = sp.as_video_stream_profile()
                stream_type = vp.stream_type()

                if stream_type not in (rs.stream.color, rs.stream.depth):
                    continue
                elif vp.format() not in (rs.format.z16, rs.format.yuyv):
                    continue

                formats.setdefault(stream_type, {})
                stream_resolution = (vp.width(), vp.height())
                formats[stream_type].setdefault(stream_resolution, []).append(vp.fps())

        return formats

    def stop_pipeline(self):
        if self.online:
            try:
                self.pipeline_profile = None
                self.stream_profiles = None
                self.pipeline.stop()
                logger.debug("Pipeline stopped.")
            except RuntimeError as re:
                logger.error("Cannot stop the pipeline: " + str(re))

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        self.stop_pipeline()

    def get_init_dict(self):
        return {
            "frame_size": self.frame_size,
            "frame_rate": self.frame_rate,
            "depth_frame_size": self.depth_frame_size,
            "depth_frame_rate": self.depth_frame_rate,
            "preview_depth": self.preview_depth,
            "record_depth": self.record_depth,
        }

    def get_frames(self):
        if self.online:
            try:
                frames = self.pipeline.wait_for_frames(TIMEOUT)
            except RuntimeError as e:
                logger.error("get_frames: Timeout!")
                raise RuntimeError(e)
            else:
                current_time = self.g_pool.get_timestamp()

                color = None
                # if we're expecting color frames
                if rs.stream.color in self.stream_profiles:
                    color_frame = frames.get_color_frame()
                    last_color_frame_ts = color_frame.get_timestamp()
                    if self.last_color_frame_ts != last_color_frame_ts:
                        self.last_color_frame_ts = last_color_frame_ts
                        color = ColorFrame(
                            np.asanyarray(color_frame.get_data()),
                            current_time,
                            self.color_frame_index,
                        )
                        self.color_frame_index += 1

                depth = None
                # if we're expecting depth frames
                if rs.stream.depth in self.stream_profiles:
                    depth_frame = frames.get_depth_frame()
                    last_depth_frame_ts = depth_frame.get_timestamp()
                    if self.last_depth_frame_ts != last_depth_frame_ts:
                        self.last_depth_frame_ts = last_depth_frame_ts
                        depth = DepthFrame(
                            np.asanyarray(depth_frame.get_data()),
                            current_time,
                            self.depth_frame_index,
                        )
                        self.depth_frame_index += 1

                return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart or not self.online:
            logger.debug("recent_events -> restarting device")
            self.restart_device()
            time.sleep(0.01)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except RuntimeError as re:
            logger.warning("Realsense failed to provide frames." + str(re))
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame is not None:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame is not None:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        logger.debug("update_menu")
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if not self.online:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        self.menu.append(ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(ui.Switch("preview_depth", self, label="Preview Depth"))

        if self._available_modes is not None:

            def frame_size_selection_getter():
                if self.device_id:
                    frame_size = sorted(
                        self._available_modes[rs.stream.color], reverse=True
                    )
                    labels = ["({}, {})".format(t[0], t[1]) for t in frame_size]
                    return frame_size, labels
                else:
                    return [self.frame_size_backup], [str(self.frame_size_backup)]

            selector = ui.Selector(
                "frame_size",
                self,
                selection_getter=frame_size_selection_getter,
                label="Color Resolution",
            )
            self.menu.append(selector)

            def frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps
                        for fps in self._available_modes[rs.stream.color][
                            self.frame_size
                        ]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return [self.frame_rate_backup], [str(self.frame_rate_backup)]

            selector = ui.Selector(
                "frame_rate",
                self,
                selection_getter=frame_rate_selection_getter,
                label="Color Frame Rate",
            )
            self.menu.append(selector)

            def depth_frame_size_selection_getter():
                if self.device_id:
                    depth_sizes = sorted(
                        self._available_modes[rs.stream.depth], reverse=True
                    )
                    labels = ["({}, {})".format(t[0], t[1]) for t in depth_sizes]
                    return depth_sizes, labels
                else:
                    return (
                        [self.depth_frame_size_backup],
                        [str(self.depth_frame_size_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_size",
                self,
                selection_getter=depth_frame_size_selection_getter,
                label="Depth Resolution",
            )
            self.menu.append(selector)

            def depth_frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps
                        for fps in self._available_modes[rs.stream.depth][
                            self.depth_frame_size
                        ]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return (
                        [self.depth_frame_rate_backup],
                        [str(self.depth_frame_rate_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_rate",
                self,
                selection_getter=depth_frame_rate_selection_getter,
                label="Depth Frame Rate",
            )
            self.menu.append(selector)

            def reset_options():
                logger.debug("reset_options")
                self.reset_device(self.device_id)

            sensor_control = ui.Growing_Menu(label="Sensor Settings")
            sensor_control.append(
                ui.Button("Reset device options to default", reset_options)
            )
            self.menu.append(sensor_control)
        else:
            logger.debug("update_menu: self._available_modes is None")

    def gl_display(self):

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3)
        )

    def reset_device(self, device_id):
        logger.debug("reset_device")
        if device_id is None:
            device_id = self.device_id

        self.notify_all(
            {
                "subject": "realsense2_source.restart",
                "device_id": device_id,
                "color_frame_size": None,
                "color_fps": None,
                "depth_frame_size": None,
                "depth_fps": None,
                "device_options": [],  # FIXME
            }
        )

    def restart_device(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = []  # FIXME

        self.notify_all(
            {
                "subject": "realsense2_source.restart",
                "device_id": None,
                "color_frame_size": color_frame_size,
                "color_fps": color_fps,
                "depth_frame_size": depth_frame_size,
                "depth_fps": depth_fps,
                "device_options": device_options,
            }
        )
        logger.debug("self.restart_device --> self.notify_all")

    def on_notify(self, notification):
        logger.debug(
            'self.on_notify, notification["subject"]: ' + notification["subject"]
        )
        if notification["subject"] == "realsense2_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(
            video_path, fps=self.depth_frame_rate, use_timestamps=True
        )

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def device_id(self):
        if self.online:  # already running
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.serial_number
            )
        else:
            # set the first available device
            devices = self.context.query_devices()
            if devices:
                logger.info("device_id: first device by default.")
                return devices[0].get_info(rs.camera_info.serial_number)
            else:
                logger.debug("device_id: No device connected.")
                return None

    @property
    def frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.frame_size_backup
        except KeyError:
            return self.frame_size_backup
        except TypeError:
            return self.frame_size_backup

    @frame_size.setter
    def frame_size(self, new_size):
        if new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check FPS is in self.available modes
            return stream_profile.fps()
        except AttributeError:
            return self.frame_rate_backup
        except KeyError:
            return self.frame_rate_backup
        except TypeError:
            return self.frame_rate_backup

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.depth_frame_size_backup
        except KeyError:
            return self.depth_frame_size_backup
        except TypeError:
            return self.depth_frame_size_backup

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            return stream_profile.fps()
        except AttributeError:
            return self.depth_frame_rate_backup
        except KeyError:
            return self.depth_frame_rate_backup
        except TypeError:
            return self.depth_frame_rate_backup

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.pipeline_profile is not None and self.pipeline is not None

    @property
    def name(self):
        if self.online:
            return self.pipeline_profile.get_device().get_info(rs.camera_info.name)
        else:
            logger.debug(
                "self.name: Realsense2 not online. Falling back to Ghost capture"
            )
            return "Ghost capture"
예제 #12
0
class Realsense_Source(Base_Source):
    """
    Camera Capture is a class that encapsualtes pyrs.Device:
    """
    def __init__(self,
                 g_pool,
                 device_id=0,
                 frame_size=(1920, 1080),
                 frame_rate=30,
                 depth_frame_size=(640, 480),
                 depth_frame_rate=30,
                 align_streams=False,
                 preview_depth=False,
                 device_options=(),
                 record_depth=True):
        super().__init__(g_pool)
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.device = None
        self.service = pyrs.Service()
        self.align_streams = align_streams
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self.controls = None
        self._initialize_device(device_id, frame_size, frame_rate,
                                depth_frame_size, depth_frame_rate,
                                device_options)

    def _initialize_device(self,
                           device_id,
                           color_frame_size,
                           color_fps,
                           depth_frame_size,
                           depth_fps,
                           device_options=()):
        devices = tuple(self.service.get_devices())
        color_frame_size = tuple(color_frame_size)
        depth_frame_size = tuple(depth_frame_size)

        self.streams = [ColorStream(), DepthStream()]
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if not devices:
            logger.error("Camera failed to initialize. No cameras connected.")
            self.device = None
            self.update_menu()
            return

        if self.device is not None:
            self.device.stop()  # only call Device.stop() if its context

        if device_id >= len(devices):
            logger.error(
                "Camera with id {} not found. Initializing default camera.".
                format(device_id))
            device_id = 0

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)

        # make sure that given frame sizes and rates are available
        color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR]
        if color_frame_size not in color_modes:
            # automatically select highest resolution
            color_frame_size = sorted(color_modes.keys(), reverse=True)[0]

        if color_fps not in color_modes[color_frame_size]:
            # automatically select highest frame rate
            color_fps = color_modes[color_frame_size][0]

        depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH]
        if self.align_streams:
            depth_frame_size = color_frame_size
        else:
            if depth_frame_size not in depth_modes:
                # automatically select highest resolution
                depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0]

        if depth_fps not in depth_modes[depth_frame_size]:
            # automatically select highest frame rate
            depth_fps = depth_modes[depth_frame_size][0]

        colorstream = ColorStream(width=color_frame_size[0],
                                  height=color_frame_size[1],
                                  fps=color_fps,
                                  color_format='yuv')
        depthstream = DepthStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1],
                                  fps=depth_fps)

        self.streams = [colorstream, depthstream]
        if self.align_streams:
            dacstream = DACStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1],
                                  fps=depth_fps)
            dacstream.name = 'depth'  # rename data accessor
            self.streams.append(dacstream)

        # update with correctly initialized streams
        # always initiliazes color + depth, adds rectified/aligned versions as necessary

        self.device = self.service.Device(device_id, streams=self.streams)

        self.controls = Realsense_Controls(self.device, device_options)
        self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name,
                                           self.frame_size)

        self.update_menu()

    def _enumerate_formats(self, device_id):
        '''Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        '''
        formats = {}
        # only lists modes for native streams (RS_STREAM_COLOR/RS_STREAM_DEPTH)
        for mode in self.service.get_device_modes(device_id):
            if mode.stream in (rs_stream.RS_STREAM_COLOR,
                               rs_stream.RS_STREAM_DEPTH):
                # check if frame size dict is available
                if mode.stream not in formats:
                    formats[mode.stream] = {}
                stream_obj = next(
                    (s for s in self.streams if s.stream == mode.stream))
                if mode.format == stream_obj.format:
                    size = mode.width, mode.height
                    # check if framerate list is already available
                    if size not in formats[mode.stream]:
                        formats[mode.stream][size] = []
                    formats[mode.stream][size].append(mode.fps)

        if self.align_streams:
            depth_sizes = formats[rs_stream.RS_STREAM_DEPTH].keys()
            color_sizes = formats[rs_stream.RS_STREAM_COLOR].keys()
            # common_sizes = depth_sizes & color_sizes
            discarded_sizes = depth_sizes ^ color_sizes
            for size in discarded_sizes:
                for sizes in formats.values():
                    if size in sizes:
                        del sizes[size]

        return formats

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        if self.device is not None:
            self.device.stop()
        self.service.stop()

    def get_init_dict(self):
        return {
            'device_id':
            self.device.device_id if self.device is not None else 0,
            'frame_size':
            self.frame_size,
            'frame_rate':
            self.frame_rate,
            'depth_frame_size':
            self.depth_frame_size,
            'depth_frame_rate':
            self.depth_frame_rate,
            'preview_depth':
            self.preview_depth,
            'record_depth':
            self.record_depth,
            'align_streams':
            self.align_streams,
            'device_options':
            self.controls.export_presets() if self.controls is not None else ()
        }

    def get_frames(self):
        if self.device:
            self.device.wait_for_frames()
            current_time = self.g_pool.get_timestamp()

            last_color_frame_ts = self.device.get_frame_timestamp(
                self.streams[0].stream)
            if self.last_color_frame_ts != last_color_frame_ts:
                self.last_color_frame_ts = last_color_frame_ts
                color = ColorFrame(self.device)
                color.timestamp = current_time
                color.index = self.color_frame_index
                self.color_frame_index += 1
            else:
                color = None

            last_depth_frame_ts = self.device.get_frame_timestamp(
                self.streams[1].stream)
            if self.last_depth_frame_ts != last_depth_frame_ts:
                self.last_depth_frame_ts = last_depth_frame_ts
                depth = DepthFrame(self.device)
                depth.timestamp = current_time
                depth.index = self.depth_frame_index
                self.depth_frame_index += 1
            else:
                depth = None

            return color, depth
        return None, None

    def recent_events(self, events):
        if not self.online:
            time.sleep(.05)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except (pyrs.RealsenseError, TimeoutError) as err:
            self._recent_frame = None
            self._recent_depth_frame = None
            self.restart_device()
        else:
            if color_frame and depth_frame:
                self._recent_frame = color_frame
                events['frame'] = color_frame

            if depth_frame:
                self._recent_depth_frame = depth_frame
                events['depth_frame'] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if self.device is None:
            self.menu.append(ui.Info_Text('Capture initialization failed.'))
            return

        def align_and_restart(val):
            self.align_streams = val
            self.restart_device()

        self.menu.append(
            ui.Switch('record_depth', self, label='Record Depth Stream'))
        self.menu.append(
            ui.Switch('preview_depth', self, label='Preview Depth'))
        self.menu.append(
            ui.Switch('align_streams',
                      self,
                      label='Align Streams',
                      setter=align_and_restart))

        color_sizes = sorted(self._available_modes[rs_stream.RS_STREAM_COLOR],
                             reverse=True)
        self.menu.append(
            ui.Selector(
                'frame_size',
                self,
                # setter=,
                selection=color_sizes,
                label='Resolution'
                if self.align_streams else 'Color Resolution'))

        def color_fps_getter():
            avail_fps = self._available_modes[rs_stream.RS_STREAM_COLOR][
                self.frame_size]
            return avail_fps, [str(fps) for fps in avail_fps]

        self.menu.append(
            ui.Selector(
                'frame_rate',
                self,
                # setter=,
                selection_getter=color_fps_getter,
                label='Color Frame Rate'))

        if not self.align_streams:
            depth_sizes = sorted(
                self._available_modes[rs_stream.RS_STREAM_DEPTH], reverse=True)
            self.menu.append(
                ui.Selector(
                    'depth_frame_size',
                    self,
                    # setter=,
                    selection=depth_sizes,
                    label='Depth Resolution'))

        def depth_fps_getter():
            avail_fps = self._available_modes[rs_stream.RS_STREAM_DEPTH][
                self.depth_frame_size]
            return avail_fps, [str(fps) for fps in avail_fps]

        self.menu.append(
            ui.Selector('depth_frame_rate',
                        self,
                        selection_getter=depth_fps_getter,
                        label='Depth Frame Rate'))

        def reset_options():
            if self.device:
                try:
                    self.device.reset_device_options_to_default(
                        self.controls.keys())
                except pyrs.RealsenseError as err:
                    logger.info('Resetting some device options failed')
                    logger.debug('Reason: {}'.format(err))
                finally:
                    self.controls.refresh()

        sensor_control = ui.Growing_Menu(label='Sensor Settings')
        sensor_control.append(
            ui.Button('Reset device options to default', reset_options))
        for ctrl in sorted(self.controls.values(),
                           key=lambda x: x.range.option):
            # sensor_control.append(ui.Info_Text(ctrl.description))
            if ctrl.range.min == 0.0 and ctrl.range.max == 1.0 and ctrl.range.step == 1.0:
                sensor_control.append(
                    ui.Switch('value',
                              ctrl,
                              label=ctrl.label,
                              off_val=0.0,
                              on_val=1.0))
            else:
                sensor_control.append(
                    ui.Slider('value',
                              ctrl,
                              label=ctrl.label,
                              min=ctrl.range.min,
                              max=ctrl.range.max,
                              step=ctrl.range.step))
        self.menu.append(sensor_control)

    def gl_display(self):
        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(
                self._recent_depth_frame.bgr)
            gl_utils.glFlush()
        elif not self.preview_depth and self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer, self._recent_frame.width,
                self._recent_frame.height)
            gl_utils.glFlush()

        gl_utils.make_coord_system_norm_based()
        self.g_pool.image_tex.draw()
        if not self.online:
            cygl.utils.draw_gl_texture(np.zeros((1, 1, 3), dtype=np.uint8),
                                       alpha=0.4)
        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3))

    def restart_device(self,
                       device_id=None,
                       color_frame_size=None,
                       color_fps=None,
                       depth_frame_size=None,
                       depth_fps=None,
                       device_options=None):
        if device_id is None:
            device_id = self.device.device_id
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = self.controls.export_presets()
        if self.device is not None:
            self.device.stop()
            self.device = None
        self.service.stop()
        self.service.start()
        self.notify_all({
            'subject': 'realsense_source.restart',
            'device_id': device_id,
            'color_frame_size': color_frame_size,
            'color_fps': color_fps,
            'depth_frame_size': depth_frame_size,
            'depth_fps': depth_fps,
            'device_options': device_options
        })

    def on_notify(self, notification):
        if notification['subject'] == 'realsense_source.restart':
            kwargs = notification.copy()
            del kwargs['subject']
            self._initialize_device(**kwargs)
        elif notification['subject'] == 'recording.started':
            self.start_depth_recording(notification['rec_path'])
        elif notification['subject'] == 'recording.stopped':
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning('Depth video recording has been started already')
            return

        video_path = os.path.join(rec_loc, 'depth.mp4')
        self.depth_video_writer = AV_Writer(video_path,
                                            fps=self.depth_frame_rate,
                                            use_timestamps=True)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning('Depth video recording was not running')
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def intrinsics(self):
        return self._intrinsics

    @property
    def frame_size(self):
        stream = self.streams[0]
        return stream.width, stream.height

    @frame_size.setter
    def frame_size(self, new_size):
        if self.device is not None and new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        return self.streams[0].fps

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        stream = self.streams[1]
        return stream.width, stream.height

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if self.device is not None and new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        return self.streams[1].fps

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.device and self.device.is_streaming()

    @property
    def name(self):
        # not the same as `if self.device:`!
        if self.device is not None:
            return self.device.name
        else:
            return "Ghost capture"
예제 #13
0
def eye(g_pool, cap_src, cap_size, pipe_to_world, eye_id=0):
    """
    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates into g_pool.pupil_queue
    """

    # modify the root logger for this process
    logger = logging.getLogger()
    # remove inherited handlers
    logger.handlers = []
    # create file handler which logs even debug messages
    fh = logging.FileHandler(os.path.join(g_pool.user_dir,
                                          'eye%s.log' % eye_id),
                             mode='w')
    # fh.setLevel(logging.DEBUG)
    # create console handler with a higher log level
    ch = logging.StreamHandler()
    ch.setLevel(logger.level + 10)
    # create formatter and add it to the handlers
    formatter = logging.Formatter(
        'Eye' + str(eye_id) +
        ' Process: %(asctime)s - %(name)s - %(levelname)s - %(message)s')
    fh.setFormatter(formatter)
    formatter = logging.Formatter(
        'EYE' + str(eye_id) +
        ' Process [%(levelname)s] %(name)s : %(message)s')
    ch.setFormatter(formatter)
    # add the handlers to the logger
    logger.addHandler(fh)
    logger.addHandler(ch)
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    #UI Platform tweaks
    if platform.system() == 'Linux':
        scroll_factor = 10.0
        window_position_default = (600, 300 * eye_id)
    elif platform.system() == 'Windows':
        scroll_factor = 1.0
        window_position_default = (600, 31 + 300 * eye_id)
    else:
        scroll_factor = 1.0
        window_position_default = (600, 300 * eye_id)

    # Callback functions
    def on_resize(window, w, h):
        if not g_pool.iconified:
            active_window = glfwGetCurrentContext()
            glfwMakeContextCurrent(window)
            g_pool.gui.update_window(w, h)
            graph.adjust_size(w, h)
            adjust_gl_view(w, h)
            glfwMakeContextCurrent(active_window)

    def on_key(window, key, scancode, action, mods):
        g_pool.gui.update_key(key, scancode, action, mods)

    def on_char(window, char):
        g_pool.gui.update_char(char)

    def on_iconify(window, iconified):
        g_pool.iconified = iconified

    def on_button(window, button, action, mods):
        if g_pool.display_mode == 'roi':
            if action == GLFW_RELEASE and u_r.active_edit_pt:
                u_r.active_edit_pt = False
                return  # if the roi interacts we dont what the gui to interact as well
            elif action == GLFW_PRESS:
                pos = glfwGetCursorPos(window)
                pos = normalize(pos, glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(
                    pos, (frame.width, frame.height))  # Position in img pixels
                if u_r.mouse_over_edit_pt(pos, u_r.handle_size + 40,
                                          u_r.handle_size + 40):
                    return  # if the roi interacts we dont what the gui to interact as well

        g_pool.gui.update_button(button, action, mods)

    def on_pos(window, x, y):
        hdpi_factor = float(
            glfwGetFramebufferSize(window)[0] / glfwGetWindowSize(window)[0])
        g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

        if u_r.active_edit_pt:
            pos = normalize((x, y), glfwGetWindowSize(main_window))
            if g_pool.flip:
                pos = 1 - pos[0], 1 - pos[1]
            pos = denormalize(pos, (frame.width, frame.height))
            u_r.move_vertex(u_r.active_pt_idx, pos)

    def on_scroll(window, x, y):
        g_pool.gui.update_scroll(x, y * scroll_factor)

    def on_close(window):
        g_pool.quit.value = True
        logger.info('Process closing from window')

    # load session persistent settings
    session_settings = Persistent_Dict(
        os.path.join(g_pool.user_dir, 'user_settings_eye%s' % eye_id))
    if session_settings.get("version", VersionFormat('0.0')) < g_pool.version:
        logger.info(
            "Session setting are from older version of this app. I will not use those."
        )
        session_settings.clear()
    # Initialize capture
    cap = autoCreateCapture(cap_src, timebase=g_pool.timebase)
    default_settings = {'frame_size': cap_size, 'frame_rate': 30}
    previous_settings = session_settings.get('capture_settings', None)
    if previous_settings and previous_settings['name'] == cap.name:
        cap.settings = previous_settings
    else:
        cap.settings = default_settings

    # Test capture
    try:
        frame = cap.get_frame()
    except CameraCaptureError:
        logger.error("Could not retrieve image from capture")
        cap.close()
        return

    #signal world that we are ready to go
    pipe_to_world.send('eye%s process ready' % eye_id)

    # any object we attach to the g_pool object *from now on* will only be visible to this process!
    # vars should be declared here to make them visible to the code reader.
    g_pool.iconified = False
    g_pool.capture = cap
    g_pool.flip = session_settings.get('flip', False)
    g_pool.display_mode = session_settings.get('display_mode', 'camera_image')
    g_pool.display_mode_info_text = {
        'camera_image':
        "Raw eye camera image. This uses the least amount of CPU power",
        'roi':
        "Click and drag on the blue circles to adjust the region of interest. The region should be a small as possible but big enough to capture to pupil in its movements",
        'algorithm':
        "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters with in the Pupil Detection menu below."
    }
    # g_pool.draw_pupil = session_settings.get('draw_pupil',True)

    u_r = UIRoi(frame.img.shape)
    u_r.set(session_settings.get('roi', u_r.get()))

    writer = None

    pupil_detector = Canny_Detector(g_pool)

    # UI callback functions
    def set_scale(new_scale):
        g_pool.gui.scale = new_scale
        g_pool.gui.collect_menus()

    def set_display_mode_info(val):
        g_pool.display_mode = val
        g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

    # Initialize glfw
    glfwInit()
    if g_pool.binocular:
        title = "Binocular eye %s" % eye_id
    else:
        title = 'Eye'
    width, height = session_settings.get('window_size',
                                         (frame.width, frame.height))
    main_window = glfwCreateWindow(width, height, title, None, None)
    window_pos = session_settings.get('window_position',
                                      window_position_default)
    glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
    glfwMakeContextCurrent(main_window)
    cygl_init()

    # gl_state settings
    basic_gl_setup()
    g_pool.image_tex = Named_Texture()
    g_pool.image_tex.update_from_frame(frame)
    glfwSwapInterval(0)

    #setup GUI
    g_pool.gui = ui.UI()
    g_pool.gui.scale = session_settings.get('gui_scale', 1)
    g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                       pos=(-300, 0),
                                       size=(0, 0),
                                       header_pos='left')
    general_settings = ui.Growing_Menu('General')
    general_settings.append(
        ui.Slider('scale',
                  g_pool.gui,
                  setter=set_scale,
                  step=.05,
                  min=1.,
                  max=2.5,
                  label='Interface Size'))
    general_settings.append(
        ui.Button(
            'Reset window size',
            lambda: glfwSetWindowSize(main_window, frame.width, frame.height)))
    general_settings.append(
        ui.Selector('display_mode',
                    g_pool,
                    setter=set_display_mode_info,
                    selection=['camera_image', 'roi', 'algorithm'],
                    labels=['Camera Image', 'ROI', 'Algorithm'],
                    label="Mode"))
    general_settings.append(
        ui.Switch('flip', g_pool, label='Flip image display'))
    g_pool.display_mode_info = ui.Info_Text(
        g_pool.display_mode_info_text[g_pool.display_mode])
    general_settings.append(g_pool.display_mode_info)
    g_pool.sidebar.append(general_settings)
    g_pool.gui.append(g_pool.sidebar)
    # let the camera add its GUI
    g_pool.capture.init_gui(g_pool.sidebar)
    # let detector add its GUI
    pupil_detector.init_gui(g_pool.sidebar)

    # Register callbacks main_window
    glfwSetFramebufferSizeCallback(main_window, on_resize)
    glfwSetWindowCloseCallback(main_window, on_close)
    glfwSetWindowIconifyCallback(main_window, on_iconify)
    glfwSetKeyCallback(main_window, on_key)
    glfwSetCharCallback(main_window, on_char)
    glfwSetMouseButtonCallback(main_window, on_button)
    glfwSetCursorPosCallback(main_window, on_pos)
    glfwSetScrollCallback(main_window, on_scroll)

    #set the last saved window size
    on_resize(main_window, *glfwGetWindowSize(main_window))

    # load last gui configuration
    g_pool.gui.configuration = session_settings.get('ui_config', {})

    #set up performance graphs
    pid = os.getpid()
    ps = psutil.Process(pid)
    ts = frame.timestamp

    cpu_graph = graph.Bar_Graph()
    cpu_graph.pos = (20, 130)
    cpu_graph.update_fn = ps.cpu_percent
    cpu_graph.update_rate = 5
    cpu_graph.label = 'CPU %0.1f'

    fps_graph = graph.Bar_Graph()
    fps_graph.pos = (140, 130)
    fps_graph.update_rate = 5
    fps_graph.label = "%0.0f FPS"

    #create a timer to control window update frequency
    window_update_timer = timer(1 / 60.)

    def window_should_update():
        return next(window_update_timer)

    # Event loop
    while not g_pool.quit.value:
        # Get an image from the grabber
        try:
            frame = cap.get_frame()
        except CameraCaptureError:
            logger.error("Capture from Camera Failed. Stopping.")
            break
        except EndofVideoFileError:
            logger.warning("Video File is done. Stopping")
            break

        #update performace graphs
        t = frame.timestamp
        dt, ts = t - ts, t
        try:
            fps_graph.add(1. / dt)
        except ZeroDivisionError:
            pass
        cpu_graph.update()

        ###  RECORDING of Eye Video (on demand) ###
        # Setup variables and lists for recording
        if pipe_to_world.poll():
            command, raw_mode = pipe_to_world.recv()
            if command is not None:
                record_path = command
                logger.info("Will save eye video to: %s" % record_path)
                timestamps_path = os.path.join(record_path,
                                               "eye%s_timestamps.npy" % eye_id)
                if raw_mode and frame.jpeg_buffer:
                    video_path = os.path.join(record_path,
                                              "eye%s.mp4" % eye_id)
                    writer = JPEG_Writer(video_path, cap.frame_rate)
                else:
                    video_path = os.path.join(record_path,
                                              "eye%s.mp4" % eye_id)
                    writer = AV_Writer(video_path, cap.frame_rate)
                timestamps = []
            else:
                logger.info("Done recording.")
                writer.release()
                writer = None
                np.save(timestamps_path, np.asarray(timestamps))
                del timestamps

        if writer:
            writer.write_video_frame(frame)
            timestamps.append(frame.timestamp)

        # pupil ellipse detection
        result = pupil_detector.detect(
            frame, user_roi=u_r, visualize=g_pool.display_mode == 'algorithm')
        result['id'] = eye_id
        # stream the result
        g_pool.pupil_queue.put(result)

        # GL drawing
        if window_should_update():
            if not g_pool.iconified:
                glfwMakeContextCurrent(main_window)
                clear_gl_screen()

                # switch to work in normalized coordinate space
                if g_pool.display_mode == 'algorithm':
                    g_pool.image_tex.update_from_ndarray(frame.img)
                elif g_pool.display_mode in ('camera_image', 'roi'):
                    g_pool.image_tex.update_from_ndarray(frame.gray)
                else:
                    pass

                make_coord_system_norm_based(g_pool.flip)
                g_pool.image_tex.draw()
                # switch to work in pixel space
                make_coord_system_pixel_based((frame.height, frame.width, 3),
                                              g_pool.flip)

                if result['confidence'] > 0:
                    if result.has_key('axes'):
                        pts = cv2.ellipse2Poly((int(
                            result['center'][0]), int(result['center'][1])),
                                               (int(result['axes'][0] / 2),
                                                int(result['axes'][1] / 2)),
                                               int(result['angle']), 0, 360,
                                               15)
                        cygl_draw_polyline(pts, 1, cygl_rgba(1., 0, 0, .5))
                    cygl_draw_points([result['center']],
                                     size=20,
                                     color=cygl_rgba(1., 0., 0., .5),
                                     sharpness=1.)

                # render graphs
                graph.push_view()
                fps_graph.draw()
                cpu_graph.draw()
                graph.pop_view()

                # render GUI
                g_pool.gui.update()

                #render the ROI
                if g_pool.display_mode == 'roi':
                    u_r.draw(g_pool.gui.scale)

                #update screen
                glfwSwapBuffers(main_window)
            glfwPollEvents()

    # END while running

    # in case eye recording was still runnnig: Save&close
    if writer:
        logger.info("Done recording eye.")
        writer = None
        np.save(timestamps_path, np.asarray(timestamps))

    glfwRestoreWindow(main_window)  #need to do this for windows os
    # save session persistent settings
    session_settings['gui_scale'] = g_pool.gui.scale
    session_settings['roi'] = u_r.get()
    session_settings['flip'] = g_pool.flip
    session_settings['display_mode'] = g_pool.display_mode
    session_settings['ui_config'] = g_pool.gui.configuration
    session_settings['capture_settings'] = g_pool.capture.settings
    session_settings['window_size'] = glfwGetWindowSize(main_window)
    session_settings['window_position'] = glfwGetWindowPos(main_window)
    session_settings['version'] = g_pool.version
    session_settings.close()

    pupil_detector.cleanup()
    g_pool.gui.terminate()
    glfwDestroyWindow(main_window)
    glfwTerminate()
    cap.close()

    #flushing queue in case world process did not exit gracefully
    while not g_pool.pupil_queue.empty():
        g_pool.pupil_queue.get()
    g_pool.pupil_queue.close()

    logger.debug("Process done")
예제 #14
0
    def start(self):
        self.timestamps = []
        self.data = {
            'pupil_positions': [],
            'gaze_positions': [],
            'notifications': []
        }
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug(
                "Created new recordings session dir {}".format(session))

        except:
            logger.debug(
                "Recordings session dir {} already exists, using it.".format(
                    session))

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(
                    self.rec_path))
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w', newline='') as csvfile:
            csv_utils.write_key_value_file(
                csvfile, {
                    'Recording Name': self.session_name,
                    'Start Date': strftime("%d.%m.%Y",
                                           localtime(self.start_time)),
                    'Start Time': strftime("%H:%M:%S",
                                           localtime(self.start_time))
                })

        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(
                audio_path, self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,
                                      self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,
                                    fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir,
                                       "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {
                'subject': 'calibration.calibration_data',
                'record': True
            }
            notification.update(cal_data)
            self.data['notifications'].append(notification)
        except:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            'subject': 'recording.started',
            'rec_path': self.rec_path,
            'session_name': self.session_name,
            'record_eye': self.record_eye,
            'compression': self.raw_jpeg
        })
예제 #15
0
파일: exporter.py 프로젝트: mbiesaga/pupil
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           user_dir,
           start_frame=None,
           end_frame=None,
           plugin_initializers=[],
           out_file_path=None):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    #parse info.csv file
    meta_info_path = os.path.join(rec_dir, "info.csv")
    with open(meta_info_path) as info:
        meta_info = dict(
            ((line.strip().split('\t')) for line in info.readlines()))

    video_path = glob(os.path.join(rec_dir, "world.*"))[0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")

    rec_version = read_rec_version(meta_info)
    if rec_version >= VersionFormat('0.5'):
        pass
    elif rec_version >= VersionFormat('0.4'):
        update_recording_0v4_to_current(rec_dir)
    elif rec_version >= VersionFormat('0.3'):
        update_recording_0v3_to_current(rec_dir)
        timestamps_path = os.path.join(rec_dir, "timestamps.npy")
    else:
        logger.Error("This recording is to old. Sorry.")
        return

    timestamps = np.load(timestamps_path)

    cap = autoCreateCapture(video_path, timestamps=timestamps_path)
    if isinstance(cap, FakeCapture):
        logger.error("could not start capture.")
        return

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.capture = cap
    g.rec_dir = rec_dir
    g.user_dir = user_dir
    g.rec_version = rec_version
    g.timestamps = timestamps

    # load pupil_positions, gaze_positions
    pupil_data = load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']

    g.pupil_positions_by_frame = correlate_data(pupil_list, g.timestamps)
    g.gaze_positions_by_frame = correlate_data(gaze_list, g.timestamps)
    g.fixations_by_frame = [[] for x in g.timestamps
                            ]  #populated by the fixation detector plugin
    g.fixations_by_frame = [[] for x in g.timestamps
                            ]  #populated by the fixation detector plugin

    #add plugins
    g.plugins = Plugin_List(g, plugin_by_name, plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['gaze_positions'] = g.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g.pupil_positions_by_frame[frame.index]

        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
예제 #16
0
파일: recorder.py 프로젝트: samtuhka/pupil
    def start(self):
        self.timestamps = []
        self.timestampsUnix = []
        self.glint_pos_list = []
        self.pupil_pos_list = []
        self.gaze_pos_list = []
        self.data = {'pupil_positions':[],'gaze_positions':[],'notifications':[]}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir {}".format(session))

        except:
            logger.debug("Recordings session dir {} already exists, using it.".format(session))

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(self.rec_path))
                break
            except:
                logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w', newline='') as csvfile:
            csv_utils.write_key_value_file(csvfile, {
                'Recording Name': self.session_name,
                'Start Date': strftime("%d.%m.%Y", localtime(self.start_time)),
                'Start Time': strftime("%H:%M:%S", localtime(self.start_time)),
            })

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path, self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, 'h264_buffer'):
            self.writer = H264Writer(self.video_path,
                                     self.g_pool.capture.frame_size[0],
                                     self.g_pool.capture.frame_size[1],
                                     int(self.g_pool.capture.frame_rate))
        else:
            self.writer = AV_Writer(self.video_path, fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir, "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {'subject': 'calibration.calibration_data', 'record': True}
            notification.update(cal_data)
            self.data['notifications'].append(notification)
        except:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({'subject': 'recording.started', 'rec_path': self.rec_path,
                         'session_name': self.session_name, 'record_eye': self.record_eye,
                         'compression': self.raw_jpeg})
예제 #17
0
class Realsense2_Source(Base_Source):
    def __init__(
            self,
            g_pool,
            device_id=None,
            frame_size=DEFAULT_COLOR_SIZE,
            frame_rate=DEFAULT_COLOR_FPS,
            depth_frame_size=DEFAULT_DEPTH_SIZE,
            depth_frame_rate=DEFAULT_DEPTH_FPS,
            preview_depth=False,
            device_options=(),
            record_depth=True,
    ):
        logger.debug("_init_ started")
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.context = rs.context()
        self.pipeline = rs.pipeline(self.context)
        self.pipeline_profile = None
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self._needs_restart = False
        self.frame_size_backup = DEFAULT_COLOR_SIZE
        self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
        self.frame_rate_backup = DEFAULT_COLOR_FPS
        self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        self._initialize_device(
            device_id,
            frame_size,
            frame_rate,
            depth_frame_size,
            depth_frame_rate,
            device_options,
        )
        logger.debug("_init_ completed")

    def _initialize_device(
            self,
            device_id,
            color_frame_size,
            color_fps,
            depth_frame_size,
            depth_fps,
            device_options=(),
    ):
        self.stop_pipeline()
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if device_id is None:
            device_id = self.device_id

        if device_id is None:  # FIXME these two if blocks look ugly.
            return

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)
        logger.debug("device_id: {} self._available_modes: {}".format(
            device_id, str(self._available_modes)))

        if (color_frame_size is not None and depth_frame_size is not None
                and color_fps is not None and depth_fps is not None):
            color_frame_size = tuple(color_frame_size)
            depth_frame_size = tuple(depth_frame_size)

            logger.debug("Initialize with Color {}@{}\tDepth {}@{}".format(
                color_frame_size, color_fps, depth_frame_size, depth_fps))

            # make sure the frame rates are compatible with the given frame sizes
            color_fps = self._get_valid_frame_rate(rs.stream.color,
                                                   color_frame_size, color_fps)
            depth_fps = self._get_valid_frame_rate(rs.stream.depth,
                                                   depth_frame_size, depth_fps)

            self.frame_size_backup = color_frame_size
            self.depth_frame_size_backup = depth_frame_size
            self.frame_rate_backup = color_fps
            self.depth_frame_rate_backup = depth_fps

            config = self._prep_configuration(color_frame_size, color_fps,
                                              depth_frame_size, depth_fps)
        else:
            config = self._get_default_config()
            self.frame_size_backup = DEFAULT_COLOR_SIZE
            self.depth_frame_size_backup = DEFAULT_DEPTH_SIZE
            self.frame_rate_backup = DEFAULT_COLOR_FPS
            self.depth_frame_rate_backup = DEFAULT_DEPTH_FPS

        try:
            self.pipeline_profile = self.pipeline.start(config)
        except RuntimeError as re:
            logger.error("Cannot start pipeline! " + str(re))
            self.pipeline_profile = None
        else:
            self.stream_profiles = {
                s.stream_type(): s.as_video_stream_profile()
                for s in self.pipeline_profile.get_streams()
            }
            logger.debug("Pipeline started for device " + device_id)
            logger.debug("Stream profiles: " + str(self.stream_profiles))

            self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name,
                                               self.frame_size)
            self.update_menu()
            self._needs_restart = False

    def _prep_configuration(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
    ):
        config = rs.config()

        # only use these two formats
        color_format = rs.format.yuyv
        depth_format = rs.format.z16

        config.enable_stream(
            rs.stream.depth,
            depth_frame_size[0],
            depth_frame_size[1],
            depth_format,
            depth_fps,
        )

        config.enable_stream(
            rs.stream.color,
            color_frame_size[0],
            color_frame_size[1],
            color_format,
            color_fps,
        )

        return config

    def _get_default_config(self):
        config = rs.config()  # default config is RGB8, we want YUYV
        config.enable_stream(
            rs.stream.color,
            DEFAULT_COLOR_SIZE[0],
            DEFAULT_COLOR_SIZE[1],
            rs.format.yuyv,
            DEFAULT_COLOR_FPS,
        )
        config.enable_stream(
            rs.stream.depth,
            DEFAULT_DEPTH_SIZE[0],
            DEFAULT_DEPTH_SIZE[1],
            rs.format.z16,
            DEFAULT_DEPTH_FPS,
        )
        return config

    def _get_valid_frame_rate(self, stream_type, frame_size, fps):
        assert stream_type == rs.stream.color or stream_type == rs.stream.depth

        if not self._available_modes or stream_type not in self._available_modes:
            logger.warning(
                "_get_valid_frame_rate: self._available_modes not set yet. Returning default fps."
            )
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS
            else:
                raise ValueError(
                    "Unexpected `stream_type`: {}".format(stream_type))

        if frame_size not in self._available_modes[stream_type]:
            logger.error(
                "Frame size not supported for {}: {}. Returning default fps".
                format(stream_type, frame_size))
            if stream_type == rs.stream.color:
                return DEFAULT_COLOR_FPS
            elif stream_type == rs.stream.depth:
                return DEFAULT_DEPTH_FPS

        if fps not in self._available_modes[stream_type][frame_size]:
            old_fps = fps
            rates = [
                abs(r - fps)
                for r in self._available_modes[stream_type][frame_size]
            ]
            best_rate_idx = rates.index(min(rates))
            fps = self._available_modes[stream_type][frame_size][best_rate_idx]
            logger.warning(
                "{} fps is not supported for ({}) for Color Stream. Fallback to {} fps"
                .format(old_fps, frame_size, fps))

        return fps

    def _enumerate_formats(self, device_id):
        """Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        """
        formats = {}

        if self.context is None:
            return formats

        devices = self.context.query_devices()
        current_device = None

        for d in devices:
            try:
                serial = d.get_info(rs.camera_info.serial_number)
            except RuntimeError as re:
                logger.error("Device no longer available " + str(re))
            else:
                if device_id == serial:
                    current_device = d

        if current_device is None:
            return formats
        logger.debug("Found the current device: " + device_id)

        sensors = current_device.query_sensors()
        for s in sensors:
            stream_profiles = s.get_stream_profiles()
            for sp in stream_profiles:
                vp = sp.as_video_stream_profile()
                stream_type = vp.stream_type()

                if stream_type not in (rs.stream.color, rs.stream.depth):
                    continue
                elif vp.format() not in (rs.format.z16, rs.format.yuyv):
                    continue

                formats.setdefault(stream_type, {})
                stream_resolution = (vp.width(), vp.height())
                formats[stream_type].setdefault(stream_resolution,
                                                []).append(vp.fps())

        return formats

    def stop_pipeline(self):
        if self.online:
            try:
                self.pipeline_profile = None
                self.stream_profiles = None
                self.pipeline.stop()
                logger.debug("Pipeline stopped.")
            except RuntimeError as re:
                logger.error("Cannot stop the pipeline: " + str(re))

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        self.stop_pipeline()

    def get_init_dict(self):
        return {
            "frame_size": self.frame_size,
            "frame_rate": self.frame_rate,
            "depth_frame_size": self.depth_frame_size,
            "depth_frame_rate": self.depth_frame_rate,
            "preview_depth": self.preview_depth,
            "record_depth": self.record_depth,
        }

    def get_frames(self):
        if self.online:
            try:
                frames = self.pipeline.wait_for_frames(TIMEOUT)
            except RuntimeError as e:
                logger.error("get_frames: Timeout!")
                raise RuntimeError(e)
            else:
                current_time = self.g_pool.get_timestamp()

                color = None
                # if we're expecting color frames
                if rs.stream.color in self.stream_profiles:
                    color_frame = frames.get_color_frame()
                    last_color_frame_ts = color_frame.get_timestamp()
                    if self.last_color_frame_ts != last_color_frame_ts:
                        self.last_color_frame_ts = last_color_frame_ts
                        color = ColorFrame(
                            np.asanyarray(color_frame.get_data()),
                            current_time,
                            self.color_frame_index,
                        )
                        self.color_frame_index += 1

                depth = None
                # if we're expecting depth frames
                if rs.stream.depth in self.stream_profiles:
                    depth_frame = frames.get_depth_frame()
                    last_depth_frame_ts = depth_frame.get_timestamp()
                    if self.last_depth_frame_ts != last_depth_frame_ts:
                        self.last_depth_frame_ts = last_depth_frame_ts
                        depth = DepthFrame(
                            np.asanyarray(depth_frame.get_data()),
                            current_time,
                            self.depth_frame_index,
                        )
                        self.depth_frame_index += 1

                return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart or not self.online:
            logger.debug("recent_events -> restarting device")
            self.restart_device()
            time.sleep(0.01)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except RuntimeError as re:
            logger.warning("Realsense failed to provide frames." + str(re))
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame is not None:
                self._recent_frame = color_frame
                events["frame"] = color_frame

            if depth_frame is not None:
                self._recent_depth_frame = depth_frame
                events["depth_frame"] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        logger.debug("update_menu")
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if not self.online:
            self.menu.append(ui.Info_Text("Capture initialization failed."))
            return

        self.menu.append(
            ui.Switch("record_depth", self, label="Record Depth Stream"))
        self.menu.append(
            ui.Switch("preview_depth", self, label="Preview Depth"))

        if self._available_modes is not None:

            def frame_size_selection_getter():
                if self.device_id:
                    frame_size = sorted(self._available_modes[rs.stream.color],
                                        reverse=True)
                    labels = [
                        "({}, {})".format(t[0], t[1]) for t in frame_size
                    ]
                    return frame_size, labels
                else:
                    return [self.frame_size_backup
                            ], [str(self.frame_size_backup)]

            selector = ui.Selector(
                "frame_size",
                self,
                selection_getter=frame_size_selection_getter,
                label="Color Resolution",
            )
            self.menu.append(selector)

            def frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps for fps in self._available_modes[rs.stream.color][
                            self.frame_size]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return [self.frame_rate_backup
                            ], [str(self.frame_rate_backup)]

            selector = ui.Selector(
                "frame_rate",
                self,
                selection_getter=frame_rate_selection_getter,
                label="Color Frame Rate",
            )
            self.menu.append(selector)

            def depth_frame_size_selection_getter():
                if self.device_id:
                    depth_sizes = sorted(
                        self._available_modes[rs.stream.depth], reverse=True)
                    labels = [
                        "({}, {})".format(t[0], t[1]) for t in depth_sizes
                    ]
                    return depth_sizes, labels
                else:
                    return (
                        [self.depth_frame_size_backup],
                        [str(self.depth_frame_size_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_size",
                self,
                selection_getter=depth_frame_size_selection_getter,
                label="Depth Resolution",
            )
            self.menu.append(selector)

            def depth_frame_rate_selection_getter():
                if self.device_id:
                    avail_fps = [
                        fps for fps in self._available_modes[rs.stream.depth][
                            self.depth_frame_size]
                    ]
                    return avail_fps, [str(fps) for fps in avail_fps]
                else:
                    return (
                        [self.depth_frame_rate_backup],
                        [str(self.depth_frame_rate_backup)],
                    )

            selector = ui.Selector(
                "depth_frame_rate",
                self,
                selection_getter=depth_frame_rate_selection_getter,
                label="Depth Frame Rate",
            )
            self.menu.append(selector)

            def reset_options():
                logger.debug("reset_options")
                self.reset_device(self.device_id)

            sensor_control = ui.Growing_Menu(label="Sensor Settings")
            sensor_control.append(
                ui.Button("Reset device options to default", reset_options))
            self.menu.append(sensor_control)
        else:
            logger.debug("update_menu: self._available_modes is None")

    def gl_display(self):

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(
                self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(
                self._recent_frame.yuv_buffer,
                self._recent_frame.width,
                self._recent_frame.height,
            )
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based(
            (self.frame_size[1], self.frame_size[0], 3))

    def reset_device(self, device_id):
        logger.debug("reset_device")
        if device_id is None:
            device_id = self.device_id

        self.notify_all({
            "subject": "realsense2_source.restart",
            "device_id": device_id,
            "color_frame_size": None,
            "color_fps": None,
            "depth_frame_size": None,
            "depth_fps": None,
            "device_options": [],  # FIXME
        })

    def restart_device(
        self,
        color_frame_size=None,
        color_fps=None,
        depth_frame_size=None,
        depth_fps=None,
        device_options=None,
    ):
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = []  # FIXME

        self.notify_all({
            "subject": "realsense2_source.restart",
            "device_id": None,
            "color_frame_size": color_frame_size,
            "color_fps": color_fps,
            "depth_frame_size": depth_frame_size,
            "depth_fps": depth_fps,
            "device_options": device_options,
        })
        logger.debug("self.restart_device --> self.notify_all")

    def on_click(self, pos, button, action):
        pass

    def on_notify(self, notification):
        logger.debug('self.on_notify, notification["subject"]: ' +
                     notification["subject"])
        if notification["subject"] == "realsense2_source.restart":
            kwargs = notification.copy()
            del kwargs["subject"]
            del kwargs["topic"]
            self._initialize_device(**kwargs)
        elif notification["subject"] == "recording.started":
            self.start_depth_recording(notification["rec_path"])
        elif notification["subject"] == "recording.stopped":
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning("Depth video recording has been started already")
            return

        video_path = os.path.join(rec_loc, "depth.mp4")
        self.depth_video_writer = AV_Writer(video_path,
                                            fps=self.depth_frame_rate,
                                            use_timestamps=True)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning("Depth video recording was not running")
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def device_id(self):
        if self.online:  # already running
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.serial_number)
        else:
            # set the first available device
            devices = self.context.query_devices()
            if devices:
                logger.info("device_id: first device by default.")
                return devices[0].get_info(rs.camera_info.serial_number)
            else:
                logger.debug("device_id: No device connected.")
                return None

    @property
    def frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.frame_size_backup
        except KeyError:
            return self.frame_size_backup
        except TypeError:
            return self.frame_size_backup

    @frame_size.setter
    def frame_size(self, new_size):
        if new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.color]
            # TODO check FPS is in self.available modes
            return stream_profile.fps()
        except AttributeError:
            return self.frame_rate_backup
        except KeyError:
            return self.frame_rate_backup
        except TypeError:
            return self.frame_rate_backup

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            # TODO check width & height is in self.available modes
            return stream_profile.width(), stream_profile.height()
        except AttributeError:
            return self.depth_frame_size_backup
        except KeyError:
            return self.depth_frame_size_backup
        except TypeError:
            return self.depth_frame_size_backup

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        try:
            stream_profile = self.stream_profiles[rs.stream.depth]
            return stream_profile.fps()
        except AttributeError:
            return self.depth_frame_rate_backup
        except KeyError:
            return self.depth_frame_rate_backup
        except TypeError:
            return self.depth_frame_rate_backup

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.pipeline_profile is not None and self.pipeline is not None

    @property
    def name(self):
        if self.online:
            return self.pipeline_profile.get_device().get_info(
                rs.camera_info.name)
        else:
            logger.debug(
                "self.name: Realsense2 not online. Falling back to Ghost capture"
            )
            return "Ghost capture"
예제 #18
0
파일: eye.py 프로젝트: pilotbear/pupil
def eye(
    timebase,
    is_alive_flag,
    ipc_pub_url,
    ipc_sub_url,
    ipc_push_url,
    user_dir,
    version,
    eye_id,
    overwrite_cap_settings=None,
):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools

    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=("notify", ))

    # logging setup
    import logging

    logging.getLogger("OpenGL").setLevel(logging.ERROR)
    logger = logging.getLogger()
    logger.handlers = []
    logger.setLevel(logging.NOTSET)
    logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
    # create logger for the context of this function
    logger = logging.getLogger(__name__)

    if is_alive_flag.value:
        # indicates eye process that this is a duplicated startup
        logger.warning("Aborting redundant eye process startup")
        return

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id, logger):
        # general imports
        import traceback
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible, glViewport
        from ui_roi import UIRoi

        # monitoring
        import psutil

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        from background_helper import IPC_Logging_Task_Proxy

        IPC_Logging_Task_Proxy.push_url = ipc_push_url

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D, Detector_Dummy

        pupil_detectors = {
            Detector_2D.__name__: Detector_2D,
            Detector_3D.__name__: Detector_3D,
            Detector_Dummy.__name__: Detector_Dummy,
        }

        # UI Platform tweaks
        if platform.system() == "Linux":
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id + 30)
        elif platform.system() == "Windows":
            scroll_factor = 10.0
            window_position_default = (600, 90 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        icon_bar_width = 50
        window_size = None
        camera_render_size = None
        hdpi_factor = 1.0

        # g_pool holds variables for this process
        g_pool = SimpleNamespace()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = "capture"
        g_pool.process = "eye{}".format(eye_id)
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            nonlocal window_size
            nonlocal camera_render_size
            nonlocal hdpi_factor

            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(window)
            hdpi_factor = glfw.getHDPIFactor(window)
            g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
            window_size = w, h
            camera_render_size = w - int(icon_bar_width * g_pool.gui.scale), h
            g_pool.gui.update_window(w, h)
            g_pool.gui.collect_menus()
            for g in g_pool.graphs:
                g.scale = hdpi_factor
                g.adjust_window_size(w, h)
            adjust_gl_view(w, h)
            glfw.glfwMakeContextCurrent(active_window)

        def on_window_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_window_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_window_mouse_button(window, button, action, mods):
            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            x *= hdpi_factor
            y *= hdpi_factor
            g_pool.gui.update_mouse(x, y)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), camera_render_size)
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        def on_drop(window, count, paths):
            paths = [paths[x].decode("utf-8") for x in range(count)]
            plugins = (g_pool.capture_manager, g_pool.capture)
            for plugin in plugins:
                if plugin.on_drop(paths):
                    break

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir,
                         "user_settings_eye{}".format(eye_id)))
        if VersionFormat(session_settings.get("version",
                                              "0.0")) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get("flip", False)
        g_pool.display_mode = session_settings.get("display_mode",
                                                   "camera_image")
        g_pool.display_mode_info_text = {
            "camera_image":
            "Raw eye camera image. This uses the least amount of CPU power",
            "roi":
            "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            "algorithm":
            "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below.",
        }

        capture_manager_settings = session_settings.get(
            "capture_manager_settings", ("UVC_Manager", {}))

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__: c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](
            g_pool, **manager_settings)

        if eye_id == 0:
            cap_src = [
                "Pupil Cam3 ID0", "Pupil Cam2 ID0", "Pupil Cam1 ID0", "HD-6000"
            ]
        else:
            cap_src = ["Pupil Cam3 ID1", "Pupil Cam2 ID1", "Pupil Cam1 ID1"]

        # Initialize capture
        default_settings = (
            "UVC_Source",
            {
                "preferred_names": cap_src,
                "frame_size": (320, 240),
                "frame_rate": 120
            },
        )

        capture_source_settings = overwrite_cap_settings or session_settings.get(
            "capture_settings", default_settings)
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__: c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](
            g_pool, **source_settings)
        assert g_pool.capture

        g_pool.u_r = UIRoi(
            (g_pool.capture.frame_size[1], g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get("roi")
        if roi_user_settings and tuple(
                roi_user_settings[-1]) == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        pupil_detector_settings = session_settings.get(
            "pupil_detector_settings", None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            "last_pupil_detector", Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,
                                                    pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.deinit_ui()
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_ui()

        def toggle_general_settings(collapsed):
            # this is the menu toggle logic.
            # Only one menu can be open.
            # If no menu is open the menubar should collapse.
            g_pool.menubar.collapsed = collapsed
            for m in g_pool.menubar.elements:
                m.collapsed = True
            general_settings.collapsed = collapsed

        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)

        width, height = g_pool.capture.frame_size
        width *= 2
        height *= 2
        width += icon_bar_width
        width, height = session_settings.get("window_size", (width, height))

        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get("window_position",
                                          window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(
            np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get("gui_scale", 1.0)
        g_pool.menubar = ui.Scrolling_Menu("Settings",
                                           pos=(-500, 0),
                                           size=(-icon_bar_width, 0),
                                           header_pos="left")
        g_pool.iconbar = ui.Scrolling_Menu("Icons",
                                           pos=(-icon_bar_width, 0),
                                           size=(0, 0),
                                           header_pos="hidden")
        g_pool.gui.append(g_pool.menubar)
        g_pool.gui.append(g_pool.iconbar)

        general_settings = ui.Growing_Menu("General", header_pos="headline")
        general_settings.append(
            ui.Selector(
                "gui_user_scale",
                g_pool,
                setter=set_scale,
                selection=[0.8, 0.9, 1.0, 1.1, 1.2],
                label="Interface Size",
            ))

        def set_window_size():
            f_width, f_height = g_pool.capture.frame_size
            f_width *= 2
            f_height *= 2
            f_width += int(icon_bar_width * g_pool.gui.scale)
            glfw.glfwSetWindowSize(main_window, f_width, f_height)

        def uroi_on_mouse_button(button, action, mods):
            if g_pool.display_mode == "roi":
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    x, y = glfw.glfwGetCursorPos(main_window)
                    # pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    x *= hdpi_factor
                    y *= hdpi_factor
                    pos = normalize((x, y), camera_render_size)
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(
                        pos,
                        g_pool.capture.frame_size)  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(pos,
                                                     g_pool.u_r.handle_size,
                                                     g_pool.u_r.handle_size):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

        general_settings.append(ui.Button("Reset window size",
                                          set_window_size))
        general_settings.append(
            ui.Switch("flip", g_pool, label="Flip image display"))
        general_settings.append(
            ui.Selector(
                "display_mode",
                g_pool,
                setter=set_display_mode_info,
                selection=["camera_image", "roi", "algorithm"],
                labels=["Camera Image", "ROI", "Algorithm"],
                label="Mode",
            ))
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)

        detector_selector = ui.Selector(
            "pupil_detector",
            getter=lambda: g_pool.pupil_detector.__class__,
            setter=set_detector,
            selection=[Detector_Dummy, Detector_2D, Detector_3D],
            labels=["disabled", "C++ 2d detector", "C++ 3d detector"],
            label="Detection method",
        )
        general_settings.append(detector_selector)

        g_pool.menubar.append(general_settings)
        icon = ui.Icon(
            "collapsed",
            general_settings,
            label=chr(0xE8B8),
            on_val=False,
            off_val=True,
            setter=toggle_general_settings,
            label_font="pupil_icons",
        )
        icon.tooltip = "General Settings"
        g_pool.iconbar.append(icon)
        toggle_general_settings(False)

        g_pool.pupil_detector.init_ui()
        g_pool.capture.init_ui()
        g_pool.capture_manager.init_ui()
        g_pool.writer = None

        def replace_source(source_class_name, source_settings):
            g_pool.capture.deinit_ui()
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](
                g_pool, **source_settings)
            g_pool.capture.init_ui()
            if g_pool.writer:
                logger.info("Done recording.")
                try:
                    g_pool.writer.release()
                except RuntimeError:
                    logger.error("No eye video recorded")
                g_pool.writer = None

        g_pool.replace_source = replace_source  # for ndsi capture

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_window_key)
        glfw.glfwSetCharCallback(main_window, on_window_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_window_mouse_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)
        glfw.glfwSetDropCallback(main_window, on_drop)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get("ui_config", {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 50)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = "CPU %0.1f"

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 50)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = "jpeg"
        frame_publish_format_recent_warning = False

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning("Process started.")

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification["subject"]
                if subject.startswith("eye_process.should_stop"):
                    if notification["eye_id"] == eye_id:
                        break
                elif subject == "set_detection_mapping_mode":
                    if notification["mode"] == "3d":
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    elif notification["mode"] == "2d":
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                    else:
                        if not isinstance(g_pool.pupil_detector,
                                          Detector_Dummy):
                            set_detector(Detector_Dummy)
                        detector_selector.read_only = True
                elif subject == "recording.started":
                    if notification["record_eye"] and g_pool.capture.online:
                        record_path = notification["rec_path"]
                        raw_mode = notification["compression"]
                        logger.info(
                            "Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path,
                                                  "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(
                                video_path, g_pool.capture.frame_rate)
                        elif hasattr(g_pool.capture._recent_frame,
                                     "h264_buffer"):
                            g_pool.writer = H264Writer(
                                video_path,
                                g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate,
                            )
                        else:
                            g_pool.writer = AV_Writer(
                                video_path, g_pool.capture.frame_rate)
                elif subject == "recording.stopped":
                    if g_pool.writer:
                        logger.info("Done recording.")
                        try:
                            g_pool.writer.release()
                        except RuntimeError:
                            logger.error("No eye video recorded")
                        g_pool.writer = None
                elif subject.startswith("meta.should_doc"):
                    ipc_socket.notify({
                        "subject": "meta.doc",
                        "actor": "eye{}".format(eye_id),
                        "doc": eye.__doc__,
                    })
                elif subject.startswith("frame_publishing.started"):
                    should_publish_frames = True
                    frame_publish_format = notification.get("format", "jpeg")
                elif subject.startswith("frame_publishing.stopped"):
                    should_publish_frames = False
                    frame_publish_format = "jpeg"
                elif (subject.startswith("start_eye_capture")
                      and notification["target"] == g_pool.process):
                    replace_source(notification["name"], notification["args"])
                elif notification["subject"].startswith(
                        "pupil_detector.set_property"):
                    target_process = notification.get("target", g_pool.process)
                    should_apply = target_process == g_pool.process

                    if should_apply:
                        try:
                            property_name = notification["name"]
                            property_value = notification["value"]
                            if "2d" in notification["subject"]:
                                g_pool.pupil_detector.set_2d_detector_property(
                                    property_name, property_value)
                            elif "3d" in notification["subject"]:
                                if not isinstance(g_pool.pupil_detector,
                                                  Detector_3D):
                                    raise ValueError(
                                        "3d properties are only available"
                                        " if 3d detector is active")
                                g_pool.pupil_detector.set_3d_detector_property(
                                    property_name, property_value)
                            elif property_name == "roi":
                                try:
                                    # Modify the ROI with the values sent over network
                                    minX, maxX, minY, maxY = property_value
                                    g_pool.u_r.set([
                                        max(g_pool.u_r.min_x, int(minX)),
                                        max(g_pool.u_r.min_y, int(minY)),
                                        min(g_pool.u_r.max_x, int(maxX)),
                                        min(g_pool.u_r.max_y, int(maxY)),
                                    ])
                                except ValueError as err:
                                    raise ValueError(
                                        "ROI needs to be list of 4 integers:"
                                        "(minX, maxX, minY, maxY)") from err
                            else:
                                raise KeyError(
                                    "Notification subject does not "
                                    "specifiy detector type nor modify ROI.")
                            logger.debug("`{}` property set to {}".format(
                                property_name, property_value))
                        except KeyError:
                            logger.error("Malformed notification received")
                            logger.debug(traceback.format_exc())
                        except (ValueError, TypeError):
                            logger.error("Invalid property or value")
                            logger.debug(traceback.format_exc())
                elif notification["subject"].startswith(
                        "pupil_detector.broadcast_properties"):
                    target_process = notification.get("target", g_pool.process)
                    should_respond = target_process == g_pool.process
                    if should_respond:
                        props = g_pool.pupil_detector.get_detector_properties()
                        properties_broadcast = {
                            "subject":
                            "pupil_detector.properties.{}".format(eye_id),
                            **props,  # add properties to broadcast
                        }
                        ipc_socket.notify(properties_broadcast)
                g_pool.capture.on_notify(notification)
                g_pool.capture_manager.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get("frame")
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0], g_pool.u_r.array_shape[1]) != (
                        f_height,
                        f_width,
                ):
                    g_pool.pupil_detector.on_resolution_change(
                        (g_pool.u_r.array_shape[1], g_pool.u_r.array_shape[0]),
                        g_pool.capture.frame_size,
                    )
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames:
                    try:
                        if frame_publish_format == "jpeg":
                            data = frame.jpeg_buffer
                        elif frame_publish_format == "yuv":
                            data = frame.yuv_buffer
                        elif frame_publish_format == "bgr":
                            data = frame.bgr
                        elif frame_publish_format == "gray":
                            data = frame.gray
                        assert data is not None
                    except (AttributeError, AssertionError, NameError):
                        if not frame_publish_format_recent_warning:
                            frame_publish_format_recent_warning = True
                            logger.warning(
                                '{}s are not compatible with format "{}"'.
                                format(type(frame), frame_publish_format))
                    else:
                        frame_publish_format_recent_warning = False
                        pupil_socket.send({
                            "topic":
                            "frame.eye.{}".format(eye_id),
                            "width":
                            frame.width,
                            "height":
                            frame.height,
                            "index":
                            frame.index,
                            "timestamp":
                            frame.timestamp,
                            "format":
                            frame_publish_format,
                            "__raw_data__": [data],
                        })

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1.0 / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result = g_pool.pupil_detector.detect(
                    frame, g_pool.u_r, g_pool.display_mode == "algorithm")
                if result is not None:
                    result["id"] = eye_id
                    result["topic"] = "pupil.{}".format(eye_id)
                    pupil_socket.send(result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == "algorithm":
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ("camera_image", "roi"):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()

                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3),
                                                  g_pool.flip)
                    if frame and result:
                        if result["method"] == "3d c++":
                            eye_ball = result["projected_sphere"]
                            try:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(eye_ball["center"][0]),
                                        int(eye_ball["center"][1]),
                                    ),
                                    (
                                        int(eye_ball["axes"][0] / 2),
                                        int(eye_ball["axes"][1] / 2),
                                    ),
                                    int(eye_ball["angle"]),
                                    0,
                                    360,
                                    8,
                                )
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(
                                    pts,
                                    2,
                                    RGBA(0.0, 0.9, 0.1,
                                         result["model_confidence"]),
                                )
                        if result["confidence"] > 0:
                            if "ellipse" in result:
                                pts = cv2.ellipse2Poly(
                                    (
                                        int(result["ellipse"]["center"][0]),
                                        int(result["ellipse"]["center"][1]),
                                    ),
                                    (
                                        int(result["ellipse"]["axes"][0] / 2),
                                        int(result["ellipse"]["axes"][1] / 2),
                                    ),
                                    int(result["ellipse"]["angle"]),
                                    0,
                                    360,
                                    15,
                                )
                                confidence = result["confidence"] * 0.7
                                draw_polyline(pts, 1,
                                              RGBA(1.0, 0, 0, confidence))
                                draw_points(
                                    [result["ellipse"]["center"]],
                                    size=20,
                                    color=RGBA(1.0, 0.0, 0.0, confidence),
                                    sharpness=1.0,
                                )

                    glViewport(0, 0, *camera_render_size)
                    make_coord_system_pixel_based((f_height, f_width, 3),
                                                  g_pool.flip)
                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == "roi":
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    glViewport(0, 0, *window_size)
                    make_coord_system_pixel_based((*window_size[::-1], 3),
                                                  g_pool.flip)
                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    unused_elements = g_pool.gui.update()
                    for butt in unused_elements.buttons:
                        uroi_on_mouse_button(*butt)

                    make_coord_system_pixel_based((*window_size[::-1], 3),
                                                  g_pool.flip)

                    g_pool.pupil_detector.visualize(
                    )  # detector decides if we visualize or not

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings["gui_scale"] = g_pool.gui_user_scale
        session_settings["roi"] = g_pool.u_r.get()
        session_settings["flip"] = g_pool.flip
        session_settings["display_mode"] = g_pool.display_mode
        session_settings["ui_config"] = g_pool.gui.configuration
        session_settings["capture_settings"] = (
            g_pool.capture.class_name,
            g_pool.capture.get_init_dict(),
        )
        session_settings["capture_manager_settings"] = (
            g_pool.capture_manager.class_name,
            g_pool.capture_manager.get_init_dict(),
        )
        session_settings["window_position"] = glfw.glfwGetWindowPos(
            main_window)
        session_settings["version"] = str(g_pool.version)
        session_settings[
            "last_pupil_detector"] = g_pool.pupil_detector.__class__.__name__
        session_settings[
            "pupil_detector_settings"] = g_pool.pupil_detector.get_settings()

        session_window_size = glfw.glfwGetWindowSize(main_window)
        if 0 not in session_window_size:
            session_settings["window_size"] = session_window_size

        session_settings.close()

        g_pool.capture.deinit_ui()
        g_pool.capture_manager.deinit_ui()
        g_pool.pupil_detector.deinit_ui()

        g_pool.pupil_detector.cleanup()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()

        glfw.glfwDestroyWindow(main_window)
        g_pool.gui.terminate()
        glfw.glfwTerminate()
        logger.info("Process shutting down.")
예제 #19
0
파일: recorder.py 프로젝트: Tkwitty/pupil
    def start(self):
        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug(
                "Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".
                format(session))
            return

        self.data = {
            'pupil_positions': [],
            'gaze_positions': [],
            'notifications': []
        }
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(
                    self.rec_path))
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w', newline='',
                  encoding='utf-8') as csvfile:
            csv_utils.write_key_value_file(
                csvfile, {
                    'Recording Name': self.session_name,
                    'Start Date': strftime("%d.%m.%Y",
                                           localtime(self.start_time)),
                    'Start Time': strftime("%H:%M:%S",
                                           localtime(self.start_time)),
                    'Start Time (System)': self.start_time,
                    'Start Time (Synced)': start_time_synced
                })

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path,
                                      self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, 'h264_buffer'):
            self.writer = H264Writer(self.video_path,
                                     self.g_pool.capture.frame_size[0],
                                     self.g_pool.capture.frame_size[1],
                                     int(self.g_pool.capture.frame_rate))
        else:
            self.writer = AV_Writer(self.video_path,
                                    fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir,
                                       "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {
                'subject': 'calibration.calibration_data',
                'record': True
            }
            notification.update(cal_data)
            self.data['notifications'].append(notification)
        except:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            'subject': 'recording.started',
            'rec_path': self.rec_path,
            'session_name': self.session_name,
            'record_eye': self.record_eye,
            'compression': self.raw_jpeg
        })
예제 #20
0
파일: eye.py 프로젝트: Heyula08/pupil
def eye(timebase,
        is_alive_flag,
        ipc_pub_url,
        ipc_sub_url,
        ipc_push_url,
        user_dir,
        version,
        eye_id,
        overwrite_cap_settings=None):
    """reads eye video and detects the pupil.

    Creates a window, gl context.
    Grabs images from a capture.
    Streams Pupil coordinates.

    Reacts to notifications:
       ``set_detection_mapping_mode``: Sets detection method
       ``eye_process.should_stop``: Stops the eye process
       ``recording.started``: Starts recording eye video
       ``recording.stopped``: Stops recording eye video
       ``frame_publishing.started``: Starts frame publishing
       ``frame_publishing.stopped``: Stops frame publishing

    Emits notifications:
        ``eye_process.started``: Eye process started
        ``eye_process.stopped``: Eye process stopped

    Emits data:
        ``pupil.<eye id>``: Pupil data for eye with id ``<eye id>``
        ``frame.eye.<eye id>``: Eye frames with id ``<eye id>``
    """

    # We deferr the imports becasue of multiprocessing.
    # Otherwise the world process each process also loads the other imports.
    import zmq
    import zmq_tools
    zmq_ctx = zmq.Context()
    ipc_socket = zmq_tools.Msg_Dispatcher(zmq_ctx, ipc_push_url)
    pupil_socket = zmq_tools.Msg_Streamer(zmq_ctx, ipc_pub_url)
    notify_sub = zmq_tools.Msg_Receiver(zmq_ctx,
                                        ipc_sub_url,
                                        topics=("notify", ))

    with Is_Alive_Manager(is_alive_flag, ipc_socket, eye_id):

        # logging setup
        import logging
        logging.getLogger("OpenGL").setLevel(logging.ERROR)
        logger = logging.getLogger()
        logger.handlers = []
        logger.setLevel(logging.INFO)
        logger.addHandler(zmq_tools.ZMQ_handler(zmq_ctx, ipc_push_url))
        # create logger for the context of this function
        logger = logging.getLogger(__name__)

        # general imports
        import numpy as np
        import cv2

        # display
        import glfw
        from pyglui import ui, graph, cygl
        from pyglui.cygl.utils import draw_points, RGBA, draw_polyline
        from pyglui.cygl.utils import Named_Texture
        from gl_utils import basic_gl_setup, adjust_gl_view, clear_gl_screen
        from gl_utils import make_coord_system_pixel_based
        from gl_utils import make_coord_system_norm_based
        from gl_utils import is_window_visible
        from ui_roi import UIRoi
        # monitoring
        import psutil

        # helpers/utils
        from uvc import get_time_monotonic
        from file_methods import Persistent_Dict
        from version_utils import VersionFormat
        from methods import normalize, denormalize, timer
        from av_writer import JPEG_Writer, AV_Writer
        from ndsi import H264Writer
        from video_capture import source_classes
        from video_capture import manager_classes

        # Pupil detectors
        from pupil_detectors import Detector_2D, Detector_3D
        pupil_detectors = {
            Detector_2D.__name__: Detector_2D,
            Detector_3D.__name__: Detector_3D
        }

        # UI Platform tweaks
        if platform.system() == 'Linux':
            scroll_factor = 10.0
            window_position_default = (600, 300 * eye_id)
        elif platform.system() == 'Windows':
            scroll_factor = 10.0
            window_position_default = (600, 31 + 300 * eye_id)
        else:
            scroll_factor = 1.0
            window_position_default = (600, 300 * eye_id)

        # g_pool holds variables for this process
        g_pool = Global_Container()

        # make some constants avaiable
        g_pool.user_dir = user_dir
        g_pool.version = version
        g_pool.app = 'capture'
        g_pool.process = 'eye{}'.format(eye_id)
        g_pool.timebase = timebase

        g_pool.ipc_pub = ipc_socket

        def get_timestamp():
            return get_time_monotonic() - g_pool.timebase.value

        g_pool.get_timestamp = get_timestamp
        g_pool.get_now = get_time_monotonic

        # Callback functions
        def on_resize(window, w, h):
            if is_window_visible(window):
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(window)
                hdpi_factor = float(
                    glfw.glfwGetFramebufferSize(window)[0] /
                    glfw.glfwGetWindowSize(window)[0])
                g_pool.gui.scale = g_pool.gui_user_scale * hdpi_factor
                g_pool.gui.update_window(w, h)
                g_pool.gui.collect_menus()
                for g in g_pool.graphs:
                    g.scale = hdpi_factor
                    g.adjust_window_size(w, h)
                adjust_gl_view(w, h)
                glfw.glfwMakeContextCurrent(active_window)

        def on_key(window, key, scancode, action, mods):
            g_pool.gui.update_key(key, scancode, action, mods)

        def on_char(window, char):
            g_pool.gui.update_char(char)

        def on_iconify(window, iconified):
            g_pool.iconified = iconified

        def on_button(window, button, action, mods):
            if g_pool.display_mode == 'roi':
                if action == glfw.GLFW_RELEASE and g_pool.u_r.active_edit_pt:
                    g_pool.u_r.active_edit_pt = False
                    # if the roi interacts we dont want
                    # the gui to interact as well
                    return
                elif action == glfw.GLFW_PRESS:
                    pos = glfw.glfwGetCursorPos(window)
                    pos = normalize(pos, glfw.glfwGetWindowSize(main_window))
                    if g_pool.flip:
                        pos = 1 - pos[0], 1 - pos[1]
                    # Position in img pixels
                    pos = denormalize(
                        pos,
                        g_pool.capture.frame_size)  # Position in img pixels
                    if g_pool.u_r.mouse_over_edit_pt(
                            pos, g_pool.u_r.handle_size + 40,
                            g_pool.u_r.handle_size + 40):
                        # if the roi interacts we dont want
                        # the gui to interact as well
                        return

            g_pool.gui.update_button(button, action, mods)

        def on_pos(window, x, y):
            hdpi_factor = glfw.glfwGetFramebufferSize(
                window)[0] / glfw.glfwGetWindowSize(window)[0]
            g_pool.gui.update_mouse(x * hdpi_factor, y * hdpi_factor)

            if g_pool.u_r.active_edit_pt:
                pos = normalize((x, y), glfw.glfwGetWindowSize(main_window))
                if g_pool.flip:
                    pos = 1 - pos[0], 1 - pos[1]
                pos = denormalize(pos, g_pool.capture.frame_size)
                g_pool.u_r.move_vertex(g_pool.u_r.active_pt_idx, pos)

        def on_scroll(window, x, y):
            g_pool.gui.update_scroll(x, y * scroll_factor)

        # load session persistent settings
        session_settings = Persistent_Dict(
            os.path.join(g_pool.user_dir,
                         'user_settings_eye{}'.format(eye_id)))
        if VersionFormat(session_settings.get("version",
                                              '0.0')) != g_pool.version:
            logger.info(
                "Session setting are from a different version of this app. I will not use those."
            )
            session_settings.clear()

        g_pool.iconified = False
        g_pool.capture = None
        g_pool.capture_manager = None
        g_pool.flip = session_settings.get('flip', False)
        g_pool.display_mode = session_settings.get('display_mode',
                                                   'camera_image')
        g_pool.display_mode_info_text = {
            'camera_image':
            "Raw eye camera image. This uses the least amount of CPU power",
            'roi':
            "Click and drag on the blue circles to adjust the region of interest. The region should be as small as possible, but large enough to capture all pupil movements.",
            'algorithm':
            "Algorithm display mode overlays a visualization of the pupil detection parameters on top of the eye video. Adjust parameters within the Pupil Detection menu below."
        }

        capture_manager_settings = session_settings.get(
            'capture_manager_settings', ('UVC_Manager', {}))

        manager_class_name, manager_settings = capture_manager_settings
        manager_class_by_name = {c.__name__: c for c in manager_classes}
        g_pool.capture_manager = manager_class_by_name[manager_class_name](
            g_pool, **manager_settings)

        if eye_id == 0:
            cap_src = [
                "Pupil Cam1 ID0", "HD-6000", "Integrated Camera",
                "HD USB Camera", "USB 2.0 Camera"
            ]
        else:
            cap_src = ["Pupil Cam1 ID1", "HD-6000", "Integrated Camera"]

        # Initialize capture
        default_settings = ('UVC_Source', {
            'preferred_names': cap_src,
            'frame_size': (640, 480),
            'frame_rate': 90
        })

        capture_source_settings = overwrite_cap_settings or session_settings.get(
            'capture_settings', default_settings)
        source_class_name, source_settings = capture_source_settings
        source_class_by_name = {c.__name__: c for c in source_classes}
        g_pool.capture = source_class_by_name[source_class_name](
            g_pool, **source_settings)
        assert g_pool.capture

        g_pool.u_r = UIRoi(
            (g_pool.capture.frame_size[1], g_pool.capture.frame_size[0]))
        roi_user_settings = session_settings.get('roi')
        if roi_user_settings and roi_user_settings[-1] == g_pool.u_r.get()[-1]:
            g_pool.u_r.set(roi_user_settings)

        pupil_detector_settings = session_settings.get(
            'pupil_detector_settings', None)
        last_pupil_detector = pupil_detectors[session_settings.get(
            'last_pupil_detector', Detector_2D.__name__)]
        g_pool.pupil_detector = last_pupil_detector(g_pool,
                                                    pupil_detector_settings)

        def set_display_mode_info(val):
            g_pool.display_mode = val
            g_pool.display_mode_info.text = g_pool.display_mode_info_text[val]

        def set_detector(new_detector):
            g_pool.pupil_detector.cleanup()
            g_pool.pupil_detector = new_detector(g_pool)
            g_pool.pupil_detector.init_gui(g_pool.sidebar)

        # Initialize glfw
        glfw.glfwInit()
        title = "Pupil Capture - eye {}".format(eye_id)
        width, height = session_settings.get('window_size',
                                             g_pool.capture.frame_size)
        main_window = glfw.glfwCreateWindow(width, height, title, None, None)
        window_pos = session_settings.get('window_position',
                                          window_position_default)
        glfw.glfwSetWindowPos(main_window, window_pos[0], window_pos[1])
        glfw.glfwMakeContextCurrent(main_window)
        cygl.utils.init()

        # UI callback functions
        def set_scale(new_scale):
            g_pool.gui_user_scale = new_scale
            on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        # gl_state settings
        basic_gl_setup()
        g_pool.image_tex = Named_Texture()
        g_pool.image_tex.update_from_ndarray(
            np.ones((1, 1), dtype=np.uint8) + 125)

        # setup GUI
        g_pool.gui = ui.UI()
        g_pool.gui_user_scale = session_settings.get('gui_scale', 1.)
        g_pool.sidebar = ui.Scrolling_Menu("Settings",
                                           pos=(-300, 0),
                                           size=(0, 0),
                                           header_pos='left')
        general_settings = ui.Growing_Menu('General')
        general_settings.append(
            ui.Selector('gui_user_scale',
                        g_pool,
                        setter=set_scale,
                        selection=[.8, .9, 1., 1.1, 1.2],
                        label='Interface Size'))
        general_settings.append(
            ui.Button(
                'Reset window size', lambda: glfw.glfwSetWindowSize(
                    main_window, *g_pool.capture.frame_size)))
        general_settings.append(
            ui.Switch('flip', g_pool, label='Flip image display'))
        general_settings.append(
            ui.Selector('display_mode',
                        g_pool,
                        setter=set_display_mode_info,
                        selection=['camera_image', 'roi', 'algorithm'],
                        labels=['Camera Image', 'ROI', 'Algorithm'],
                        label="Mode"))
        g_pool.display_mode_info = ui.Info_Text(
            g_pool.display_mode_info_text[g_pool.display_mode])

        general_settings.append(g_pool.display_mode_info)
        g_pool.gui.append(g_pool.sidebar)
        detector_selector = ui.Selector(
            'pupil_detector',
            getter=lambda: g_pool.pupil_detector.__class__,
            setter=set_detector,
            selection=[Detector_2D, Detector_3D],
            labels=['C++ 2d detector', 'C++ 3d detector'],
            label="Detection method")
        general_settings.append(detector_selector)

        g_pool.capture_selector_menu = ui.Growing_Menu('Capture Selection')
        g_pool.capture_source_menu = ui.Growing_Menu('Capture Source')
        g_pool.capture_source_menu.collapsed = True
        g_pool.capture.init_gui()

        g_pool.sidebar.append(general_settings)
        g_pool.sidebar.append(g_pool.capture_selector_menu)
        g_pool.sidebar.append(g_pool.capture_source_menu)

        g_pool.pupil_detector.init_gui(g_pool.sidebar)

        g_pool.capture_manager.init_gui()
        g_pool.writer = None

        def replace_source(source_class_name, source_settings):
            g_pool.capture.cleanup()
            g_pool.capture = source_class_by_name[source_class_name](
                g_pool, **source_settings)
            g_pool.capture.init_gui()
            if g_pool.writer:
                logger.info("Done recording.")
                g_pool.writer.release()
                g_pool.writer = None

        g_pool.replace_source = replace_source  # for ndsi capture

        def replace_manager(manager_class):
            g_pool.capture_manager.cleanup()
            g_pool.capture_manager = manager_class(g_pool)
            g_pool.capture_manager.init_gui()

        #We add the capture selection menu, after a manager has been added:
        g_pool.capture_selector_menu.insert(
            0,
            ui.Selector('capture_manager',
                        g_pool,
                        setter=replace_manager,
                        getter=lambda: g_pool.capture_manager.__class__,
                        selection=manager_classes,
                        labels=[b.gui_name for b in manager_classes],
                        label='Manager'))

        # Register callbacks main_window
        glfw.glfwSetFramebufferSizeCallback(main_window, on_resize)
        glfw.glfwSetWindowIconifyCallback(main_window, on_iconify)
        glfw.glfwSetKeyCallback(main_window, on_key)
        glfw.glfwSetCharCallback(main_window, on_char)
        glfw.glfwSetMouseButtonCallback(main_window, on_button)
        glfw.glfwSetCursorPosCallback(main_window, on_pos)
        glfw.glfwSetScrollCallback(main_window, on_scroll)

        # load last gui configuration
        g_pool.gui.configuration = session_settings.get('ui_config', {})

        # set up performance graphs
        pid = os.getpid()
        ps = psutil.Process(pid)
        ts = g_pool.get_timestamp()

        cpu_graph = graph.Bar_Graph()
        cpu_graph.pos = (20, 130)
        cpu_graph.update_fn = ps.cpu_percent
        cpu_graph.update_rate = 5
        cpu_graph.label = 'CPU %0.1f'

        fps_graph = graph.Bar_Graph()
        fps_graph.pos = (140, 130)
        fps_graph.update_rate = 5
        fps_graph.label = "%0.0f FPS"
        g_pool.graphs = [cpu_graph, fps_graph]

        # set the last saved window size
        on_resize(main_window, *glfw.glfwGetFramebufferSize(main_window))

        should_publish_frames = False
        frame_publish_format = 'jpeg'

        # create a timer to control window update frequency
        window_update_timer = timer(1 / 60)

        def window_should_update():
            return next(window_update_timer)

        logger.warning('Process started.')

        frame = None

        # Event loop
        while not glfw.glfwWindowShouldClose(main_window):

            if notify_sub.new_data:
                t, notification = notify_sub.recv()
                subject = notification['subject']
                if subject == 'eye_process.should_stop':
                    if notification['eye_id'] == eye_id:
                        break
                elif subject == 'set_detection_mapping_mode':
                    if notification['mode'] == '3d':
                        if not isinstance(g_pool.pupil_detector, Detector_3D):
                            set_detector(Detector_3D)
                        detector_selector.read_only = True
                    else:
                        if not isinstance(g_pool.pupil_detector, Detector_2D):
                            set_detector(Detector_2D)
                        detector_selector.read_only = False
                elif subject == 'recording.started':
                    if notification['record_eye'] and g_pool.capture.online:
                        record_path = notification['rec_path']
                        raw_mode = notification['compression']
                        logger.info(
                            "Will save eye video to: {}".format(record_path))
                        video_path = os.path.join(record_path,
                                                  "eye{}.mp4".format(eye_id))
                        if raw_mode and frame and g_pool.capture.jpeg_support:
                            g_pool.writer = JPEG_Writer(
                                video_path, g_pool.capture.frame_rate)
                        elif hasattr(g_pool.capture._recent_frame,
                                     'h264_buffer'):
                            g_pool.writer = H264Writer(
                                video_path, g_pool.capture.frame_size[0],
                                g_pool.capture.frame_size[1],
                                g_pool.capture.frame_rate)
                        else:
                            g_pool.writer = AV_Writer(
                                video_path, g_pool.capture.frame_rate)
                elif subject == 'recording.stopped':
                    if g_pool.writer:
                        logger.info("Done recording.")
                        g_pool.writer.release()
                        g_pool.writer = None
                elif subject.startswith('meta.should_doc'):
                    ipc_socket.notify({
                        'subject': 'meta.doc',
                        'actor': 'eye{}'.format(eye_id),
                        'doc': eye.__doc__
                    })
                elif subject.startswith('frame_publishing.started'):
                    should_publish_frames = True
                    frame_publish_format = notification.get('format', 'jpeg')
                elif subject.startswith('frame_publishing.stopped'):
                    should_publish_frames = False
                    frame_publish_format = 'jpeg'
                elif subject.startswith('start_eye_capture') and notification[
                        'target'] == g_pool.process:
                    replace_source(notification['name'], notification['args'])

                g_pool.capture.on_notify(notification)

            # Get an image from the grabber
            event = {}
            g_pool.capture.recent_events(event)
            frame = event.get('frame')
            g_pool.capture_manager.recent_events(event)
            if frame:
                f_width, f_height = g_pool.capture.frame_size
                if (g_pool.u_r.array_shape[0],
                        g_pool.u_r.array_shape[1]) != (f_height, f_width):
                    g_pool.u_r = UIRoi((f_height, f_width))
                if should_publish_frames and frame.jpeg_buffer:
                    if frame_publish_format == "jpeg":
                        data = frame.jpeg_buffer
                    elif frame_publish_format == "yuv":
                        data = frame.yuv_buffer
                    elif frame_publish_format == "bgr":
                        data = frame.bgr
                    elif frame_publish_format == "gray":
                        data = frame.gray
                    pupil_socket.send(
                        'frame.eye.%s' % eye_id, {
                            'width': frame.width,
                            'height': frame.width,
                            'index': frame.index,
                            'timestamp': frame.timestamp,
                            'format': frame_publish_format,
                            '__raw_data__': [data]
                        })

                t = frame.timestamp
                dt, ts = t - ts, t
                try:
                    fps_graph.add(1. / dt)
                except ZeroDivisionError:
                    pass

                if g_pool.writer:
                    g_pool.writer.write_video_frame(frame)

                # pupil ellipse detection
                result = g_pool.pupil_detector.detect(
                    frame, g_pool.u_r, g_pool.display_mode == 'algorithm')
                result['id'] = eye_id

                # stream the result
                pupil_socket.send('pupil.%s' % eye_id, result)

            cpu_graph.update()

            # GL drawing
            if window_should_update():
                if is_window_visible(main_window):
                    glfw.glfwMakeContextCurrent(main_window)
                    clear_gl_screen()

                    if frame:
                        # switch to work in normalized coordinate space
                        if g_pool.display_mode == 'algorithm':
                            g_pool.image_tex.update_from_ndarray(frame.img)
                        elif g_pool.display_mode in ('camera_image', 'roi'):
                            g_pool.image_tex.update_from_ndarray(frame.gray)
                        else:
                            pass
                    make_coord_system_norm_based(g_pool.flip)
                    g_pool.image_tex.draw()
                    f_width, f_height = g_pool.capture.frame_size
                    make_coord_system_pixel_based((f_height, f_width, 3),
                                                  g_pool.flip)
                    if frame:
                        if result['method'] == '3d c++':
                            eye_ball = result['projected_sphere']
                            try:
                                pts = cv2.ellipse2Poly(
                                    (int(eye_ball['center'][0]),
                                     int(eye_ball['center'][1])),
                                    (int(eye_ball['axes'][0] / 2),
                                     int(eye_ball['axes'][1] / 2)),
                                    int(eye_ball['angle']), 0, 360, 8)
                            except ValueError as e:
                                pass
                            else:
                                draw_polyline(
                                    pts, 2,
                                    RGBA(0., .9, .1,
                                         result['model_confidence']))
                        if result['confidence'] > 0:
                            if 'ellipse' in result:
                                pts = cv2.ellipse2Poly(
                                    (int(result['ellipse']['center'][0]),
                                     int(result['ellipse']['center'][1])),
                                    (int(result['ellipse']['axes'][0] / 2),
                                     int(result['ellipse']['axes'][1] / 2)),
                                    int(result['ellipse']['angle']), 0, 360,
                                    15)
                                confidence = result['confidence'] * 0.7
                                draw_polyline(pts, 1,
                                              RGBA(1., 0, 0, confidence))
                                draw_points([result['ellipse']['center']],
                                            size=20,
                                            color=RGBA(1., 0., 0., confidence),
                                            sharpness=1.)

                    # render graphs
                    fps_graph.draw()
                    cpu_graph.draw()

                    # render GUI
                    g_pool.gui.update()

                    # render the ROI
                    g_pool.u_r.draw(g_pool.gui.scale)
                    if g_pool.display_mode == 'roi':
                        g_pool.u_r.draw_points(g_pool.gui.scale)

                    # update screen
                    glfw.glfwSwapBuffers(main_window)
                glfw.glfwPollEvents()
                g_pool.pupil_detector.visualize(
                )  # detector decides if we visualize or not

        # END while running

        # in case eye recording was still runnnig: Save&close
        if g_pool.writer:
            logger.info("Done recording eye.")
            g_pool.writer = None

        glfw.glfwRestoreWindow(main_window)  # need to do this for windows os
        # save session persistent settings
        session_settings['gui_scale'] = g_pool.gui_user_scale
        session_settings['roi'] = g_pool.u_r.get()
        session_settings['flip'] = g_pool.flip
        session_settings['display_mode'] = g_pool.display_mode
        session_settings['ui_config'] = g_pool.gui.configuration
        session_settings[
            'capture_settings'] = g_pool.capture.class_name, g_pool.capture.get_init_dict(
            )
        session_settings[
            'capture_manager_settings'] = g_pool.capture_manager.class_name, g_pool.capture_manager.get_init_dict(
            )
        session_settings['window_size'] = glfw.glfwGetWindowSize(main_window)
        session_settings['window_position'] = glfw.glfwGetWindowPos(
            main_window)
        session_settings['version'] = str(g_pool.version)
        session_settings[
            'last_pupil_detector'] = g_pool.pupil_detector.__class__.__name__
        session_settings[
            'pupil_detector_settings'] = g_pool.pupil_detector.get_settings()
        session_settings.close()

        g_pool.capture.deinit_gui()
        g_pool.pupil_detector.cleanup()
        g_pool.gui.terminate()
        glfw.glfwDestroyWindow(main_window)
        glfw.glfwTerminate()
        g_pool.capture_manager.cleanup()
        g_pool.capture.cleanup()
        logger.info("Process shutting down.")
예제 #21
0
파일: recorder.py 프로젝트: Tkwitty/pupil
class Recorder(System_Plugin_Base):
    """Capture Recorder"""
    icon_chr = chr(0xe04b)
    icon_font = 'pupil_icons'

    warning_low_disk_space_th = 5.0  # threshold in GB
    stop_rec_low_disk_space_th = 1.0  # threshold in GB

    def __init__(self,
                 g_pool,
                 session_name=get_auto_name(),
                 rec_dir=None,
                 user_info={
                     'name': '',
                     'additional_field': 'change_me'
                 },
                 info_menu_conf={},
                 show_info_menu=False,
                 record_eye=True,
                 raw_jpeg=True):
        super().__init__(g_pool)
        # update name if it was autogenerated.
        if session_name.startswith('20') and len(session_name) == 10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep, 1)[0]
        default_rec_dir = os.path.join(base_dir, 'recordings')

        if rec_dir and rec_dir != default_rec_dir and self.verify_path(
                rec_dir):
            self.rec_dir = rec_dir
        else:
            try:
                os.makedirs(default_rec_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info(
                    'Created standard Rec dir at "{}"'.format(default_rec_dir))
            self.rec_dir = default_rec_dir

        self.raw_jpeg = raw_jpeg
        self.order = .9
        self.record_eye = record_eye
        self.session_name = session_name
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf

        self.low_disk_space_thumb = None
        check_timer = timer(1.)
        self.check_space = lambda: next(check_timer)

    def get_init_dict(self):
        d = {}
        d['record_eye'] = self.record_eye
        d['session_name'] = self.session_name
        d['user_info'] = self.user_info
        d['info_menu_conf'] = self.info_menu_conf
        d['show_info_menu'] = self.show_info_menu
        d['rec_dir'] = self.rec_dir
        d['raw_jpeg'] = self.raw_jpeg
        return d

    def init_ui(self):
        self.add_menu()
        self.menu.label = 'Recorder'
        self.menu_icon.order = 0.29

        self.menu.append(
            ui.Info_Text(
                'Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'
            ))
        self.menu.append(
            ui.Info_Text(
                'Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'
            ))
        self.menu.append(
            ui.Text_Input('rec_dir',
                          self,
                          setter=self.set_rec_dir,
                          label='Path to recordings'))
        self.menu.append(
            ui.Text_Input('session_name',
                          self,
                          setter=self.set_session_name,
                          label='Recording session name'))
        self.menu.append(
            ui.Switch('show_info_menu',
                      self,
                      on_val=True,
                      off_val=False,
                      label='Request additional user info'))
        self.menu.append(
            ui.Selector(
                'raw_jpeg',
                self,
                selection=[True, False],
                labels=["bigger file, less CPU", "smaller file, more CPU"],
                label='Compression'))
        self.menu.append(
            ui.Info_Text(
                'Recording the raw eye video is optional. We use it for debugging.'
            ))
        self.menu.append(
            ui.Switch('record_eye',
                      self,
                      on_val=True,
                      off_val=False,
                      label='Record eye'))
        self.button = ui.Thumb('running',
                               self,
                               setter=self.toggle,
                               label='R',
                               hotkey='r')
        self.button.on_color[:] = (1, .0, .0, .8)
        self.g_pool.quickbar.insert(2, self.button)

        self.low_disk_space_thumb = ui.Thumb('low_disk_warn',
                                             label='!',
                                             getter=lambda: True,
                                             setter=lambda x: None)
        self.low_disk_space_thumb.on_color[:] = (1, .0, .0, .8)
        self.low_disk_space_thumb.status_text = 'Low disk space'

    def deinit_ui(self):
        if self.low_disk_space_thumb in self.g_pool.quickbar:
            self.g_pool.quickbar.remove(self.low_disk_space_thumb)
        self.g_pool.quickbar.remove(self.button)
        self.button = None
        self.remove_menu()

    def toggle(self, _=None):
        if self.running:
            self.notify_all({'subject': 'recording.should_stop'})
            self.notify_all({
                'subject': 'recording.should_stop',
                'remote_notify': 'all'
            })
        else:
            self.notify_all({
                'subject': 'recording.should_start',
                'session_name': self.session_name
            })
            self.notify_all({
                'subject': 'recording.should_start',
                'session_name': self.session_name,
                'remote_notify': 'all'
            })

    def on_notify(self, notification):
        """Handles recorder notifications

        Reacts to notifications:
            ``recording.should_start``: Starts a new recording session.
                fields:
                - 'session_name' change session name
                    start with `/` to ingore the rec base dir and start from root instead.
                - `record_eye` boolean that indicates recording of the eyes, defaults to current setting
            ``recording.should_stop``: Stops current recording session

        Emits notifications:
            ``recording.started``: New recording session started
            ``recording.stopped``: Current recording session stopped

        Args:
            notification (dictionary): Notification dictionary
        """
        # notification wants to be recorded
        if notification.get('record', False) and self.running:
            if 'timestamp' not in notification:
                logger.error(
                    "Notification without timestamp will not be saved.")
            else:
                self.data['notifications'].append(notification)
        elif notification['subject'] == 'recording.should_start':
            if self.running:
                logger.info('Recording already running!')
            else:
                self.record_eye = notification.get('record_eye',
                                                   self.record_eye)
                if notification.get("session_name", ""):
                    self.set_session_name(notification["session_name"])
                self.start()

        elif notification['subject'] == 'recording.should_stop':
            if self.running:
                self.stop()
            else:
                logger.info('Recording already stopped!')

    def get_rec_time_str(self):
        rec_time = gmtime(time() - self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self):
        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug(
                "Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".
                format(session))
            return

        self.data = {
            'pupil_positions': [],
            'gaze_positions': [],
            'notifications': []
        }
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(
                    self.rec_path))
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w', newline='',
                  encoding='utf-8') as csvfile:
            csv_utils.write_key_value_file(
                csvfile, {
                    'Recording Name': self.session_name,
                    'Start Date': strftime("%d.%m.%Y",
                                           localtime(self.start_time)),
                    'Start Time': strftime("%H:%M:%S",
                                           localtime(self.start_time)),
                    'Start Time (System)': self.start_time,
                    'Start Time (Synced)': start_time_synced
                })

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path,
                                      self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, 'h264_buffer'):
            self.writer = H264Writer(self.video_path,
                                     self.g_pool.capture.frame_size[0],
                                     self.g_pool.capture.frame_size[1],
                                     int(self.g_pool.capture.frame_rate))
        else:
            self.writer = AV_Writer(self.video_path,
                                    fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir,
                                       "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {
                'subject': 'calibration.calibration_data',
                'record': True
            }
            notification.update(cal_data)
            self.data['notifications'].append(notification)
        except:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            'subject': 'recording.started',
            'rec_path': self.rec_path,
            'session_name': self.session_name,
            'record_eye': self.record_eye,
            'compression': self.raw_jpeg
        })

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu('additional Recording Info',
                                         size=(300, 300),
                                         pos=(300, 300))
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.keys():
                self.info_menu.insert(0, ui.Text_Input(name, self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(
            ui.Info_Text(
                'Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'
            ))
        self.info_menu.append(
            ui.Text_Input('user_info',
                          self,
                          setter=set_user_info,
                          label="User info"))
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def recent_events(self, events):

        if self.check_space():
            disk_space = available_gb(self.rec_dir)
            if disk_space < self.warning_low_disk_space_th and self.low_disk_space_thumb not in self.g_pool.quickbar:
                self.g_pool.quickbar.append(self.low_disk_space_thumb)
            elif disk_space >= self.warning_low_disk_space_th and self.low_disk_space_thumb in self.g_pool.quickbar:
                self.g_pool.quickbar.remove(self.low_disk_space_thumb)

            if self.running and disk_space <= self.stop_rec_low_disk_space_th:
                self.stop()
                logger.error('Recording was stopped due to low disk space!')

        if self.running:
            for key, data in events.items():
                if key not in ('dt', 'frame', 'depth_frame'):
                    try:
                        self.data[key] += data
                    except KeyError:
                        self.data[key] = []
                        self.data[key] += data

            if 'frame' in events:
                frame = events['frame']
                self.writer.write_video_frame(frame)
                self.frame_count += 1

            # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))

            self.button.status_text = self.get_rec_time_str()

    def stop(self):
        # explicit release of VideoWriter
        try:
            self.writer.release()
        except RuntimeError:
            logger.error("No world video recorded")
        else:
            logger.debug("Closed media container")
            self.g_pool.capture.intrinsics.save(self.rec_path,
                                                custom_name='world')
        finally:
            self.writer = None

        save_object(self.data, os.path.join(self.rec_path, "pupil_data"))

        try:
            copy2(os.path.join(self.g_pool.user_dir, "surface_definitions"),
                  os.path.join(self.rec_path, "surface_definitions"))
        except:
            logger.info(
                "No surface_definitions data found. You may want this if you do marker tracking."
            )

        try:
            with open(self.meta_info_path, 'a', newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, {
                    'Duration Time':
                    self.get_rec_time_str(),
                    'World Camera Frames':
                    self.frame_count,
                    'World Camera Resolution':
                    str(self.g_pool.capture.frame_size[0]) + "x" +
                    str(self.g_pool.capture.frame_size[1]),
                    'Capture Software Version':
                    self.g_pool.version,
                    'Data Format Version':
                    self.g_pool.version,
                    'System Info':
                    get_system_info()
                },
                                               append=True)
        except Exception:
            logger.exception(
                "Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"),
                      'w',
                      newline='') as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except Exception:
            logger.exception(
                "Could not save userdata. Please report this bug!")

        self.close_info_menu()

        self.running = False
        if self.menu:
            self.menu.read_only = False
            self.button.status_text = ''

        self.data = {'pupil_positions': [], 'gaze_positions': []}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        logger.info("Saved Recording.")
        self.notify_all({
            'subject': 'recording.stopped',
            'rec_path': self.rec_path
        })

    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()

    def verify_path(self, val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '{}'.".format(n_path))
            return False
        else:
            return n_path

    def set_rec_dir(self, val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if os.path.sep in val:
                logger.warning(
                    'You session name will create one or more subdirectories')
            self.session_name = val
예제 #22
0
def export(rec_dir, user_dir, min_data_confidence, start_frame=None, end_frame=None,
           plugin_initializers=(), out_file_path=None, pre_computed={}):

    PID = str(os.getpid())
    logger = logging.getLogger(__name__+' with pid: '+PID)
    start_status = 'Starting video export with pid: {}'.format(PID)
    print(start_status)
    yield start_status, 0

    try:
        update_recording_to_recent(rec_dir)

        vis_plugins = sorted([Vis_Circle, Vis_Cross, Vis_Polyline, Vis_Light_Points,
                              Vis_Watermark, Vis_Scan_Path, Vis_Eye_Video_Overlay],
                             key=lambda x: x.__name__)
        analysis_plugins = [Offline_Fixation_Detector]
        user_plugins = sorted(import_runtime_plugins(os.path.join(user_dir, 'plugins')), key=lambda x: x.__name__)

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        update_recording_to_recent(rec_dir)

        video_path = [f for f in glob(os.path.join(rec_dir, "world.*"))
                      if os.path.splitext(f)[-1] in ('.mp4', '.mkv', '.avi', '.mjpeg')][0]
        pupil_data_path = os.path.join(rec_dir, "pupil_data")
        audio_path = os.path.join(rec_dir, "audio.mp4")

        meta_info = load_meta_info(rec_dir)

        g_pool = Global_Container()
        g_pool.app = 'exporter'
        g_pool.min_data_confidence = min_data_confidence
        cap = File_Source(g_pool, video_path)
        timestamps = cap.timestamps

        # Out file path verification, we do this before but if one uses a separate tool, this will kick in.
        if out_file_path is None:
            out_file_path = os.path.join(rec_dir, "world_viz.mp4")
        else:
            file_name = os.path.basename(out_file_path)
            dir_name = os.path.dirname(out_file_path)
            if not dir_name:
                dir_name = rec_dir
            if not file_name:
                file_name = 'world_viz.mp4'
            out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, endframe) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the lauching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
        logger.debug(exp_info.format(start_frame, start_frame + frames_to_export, frames_to_export))

        # setup of writer
        writer = AV_Writer(out_file_path, fps=cap.frame_rate, audio_loc=audio_path, use_timestamps=True)

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []
        # load pupil_positions, gaze_positions
        pupil_data = pre_computed.get("pupil_data") or load_object(pupil_data_path)
        g_pool.pupil_data = pupil_data
        g_pool.pupil_positions = pre_computed.get("pupil_positions") or pupil_data['pupil_positions']
        g_pool.gaze_positions = pre_computed.get("gaze_positions") or pupil_data['gaze_positions']
        g_pool.fixations = pre_computed.get("fixations", [])

        g_pool.pupil_positions_by_frame = correlate_data(g_pool.pupil_positions, g_pool.timestamps)
        g_pool.gaze_positions_by_frame = correlate_data(g_pool.gaze_positions, g_pool.timestamps)
        g_pool.fixations_by_frame = correlate_data(g_pool.fixations, g_pool.timestamps)

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoFileError:
                break

            events = {'frame': frame}
            # new positons and events
            events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
            events['pupil_positions'] = g_pool.pupil_positions_by_frame[frame.index]

            # publish delayed notifiactions when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n['_notify_time_'] < time():
                    del n['_notify_time_']
                    del g_pool.delayed_notifications[n['subject']]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifactions:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield 'Exporting with pid {}'.format(PID), current_frame

        writer.close()
        writer = None

        duration = time()-start_time
        effective_fps = float(current_frame)/duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        print(result.format(current_frame, out_file_path, duration, effective_fps))
        yield 'Export done. This took {:.0f} seconds.'.format(duration), current_frame

    except GeneratorExit:
        print('Video export with pid {} was canceled.'.format(os.getpid()))
    except Exception as e:
        from time import sleep
        import traceback
        trace = traceback.format_exc()
        print('Process Export (pid: {}) crashed with trace:\n{}'.format(os.getpid(), trace))
        yield e
        sleep(1.0)
예제 #23
0
    def start(self, network_propagate=True):
        self.timestamps = []
        self.data = {
            'pupil_positions': [],
            'gaze_positions': [],
            'notifications': []
        }
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir %s" % session)

        except:
            logger.debug(
                "Recordings session dir %s already exists, using it." %
                session)

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "%03d/" % counter)
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir %s" % self.rec_path)
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w') as f:
            f.write("Recording Name\t" + self.session_name + "\n")
            f.write("Start Date\t" +
                    strftime("%d.%m.%Y", localtime(self.start_time)) + "\n")
            f.write("Start Time\t" +
                    strftime("%H:%M:%S", localtime(self.start_time)) + "\n")

        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(
                audio_path, self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,
                                      self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,
                                    fps=self.g_pool.capture.frame_rate)
        # positions path to eye process
        if self.record_eye:
            for tx in self.g_pool.eye_tx:
                tx.send((self.rec_path, self.raw_jpeg))

        if self.show_info_menu:
            self.open_info_menu()

        self.notify_all({
            'subject': 'rec_started',
            'rec_path': self.rec_path,
            'session_name': self.session_name,
            'network_propagate': network_propagate
        })
예제 #24
0
class Realsense_Source(Base_Source):
    """
    Camera Capture is a class that encapsualtes pyrs.Device:
    """
    def __init__(self, g_pool, device_id=0,
                 frame_size=(1920, 1080), frame_rate=30,
                 depth_frame_size=(640, 480), depth_frame_rate=60,
                 align_streams=False, preview_depth=False,
                 device_options=(), record_depth=True, stream_preset = None):
        super().__init__(g_pool)
        self._intrinsics = None
        self.color_frame_index = 0
        self.depth_frame_index = 0
        self.device = None
        self.service = pyrs.Service()
        self.align_streams = align_streams
        self.preview_depth = preview_depth
        self.record_depth = record_depth
        self.depth_video_writer = None
        self.controls = None
        self.pitch = 0
        self.yaw = 0
        self.mouse_drag = False
        self.last_pos = (0,0)
        self.depth_window = None
        self._needs_restart = False
        self.stream_preset = stream_preset
        self._initialize_device(device_id, frame_size, frame_rate,
                                depth_frame_size, depth_frame_rate, device_options)

    def _initialize_device(self, device_id,
                           color_frame_size, color_fps,
                           depth_frame_size, depth_fps,
                           device_options=()):
        devices = tuple(self.service.get_devices())
        color_frame_size = tuple(color_frame_size)
        depth_frame_size = tuple(depth_frame_size)

        self.streams = [ColorStream(), DepthStream(), PointStream()]
        self.last_color_frame_ts = None
        self.last_depth_frame_ts = None
        self._recent_frame = None
        self._recent_depth_frame = None

        if not devices:
            if not self._needs_restart:
                logger.error("Camera failed to initialize. No cameras connected.")
            self.device = None
            self.update_menu()
            return

        if self.device is not None:
            self.device.stop()  # only call Device.stop() if its context

        if device_id >= len(devices):
            logger.error("Camera with id {} not found. Initializing default camera.".format(device_id))
            device_id = 0

        # use default streams to filter modes by rs_stream and rs_format
        self._available_modes = self._enumerate_formats(device_id)

        # make sure that given frame sizes and rates are available
        color_modes = self._available_modes[rs_stream.RS_STREAM_COLOR]
        if color_frame_size not in color_modes:
            # automatically select highest resolution
            color_frame_size = sorted(color_modes.keys(), reverse=True)[0]

        if color_fps not in color_modes[color_frame_size]:
            # automatically select highest frame rate
            color_fps = color_modes[color_frame_size][0]

        depth_modes = self._available_modes[rs_stream.RS_STREAM_DEPTH]
        if self.align_streams:
            depth_frame_size = color_frame_size
        else:
            if depth_frame_size not in depth_modes:
                # automatically select highest resolution
                depth_frame_size = sorted(depth_modes.keys(), reverse=True)[0]

        if depth_fps not in depth_modes[depth_frame_size]:
            # automatically select highest frame rate
            depth_fps = depth_modes[depth_frame_size][0]

        colorstream = ColorStream(width=color_frame_size[0],
                                  height=color_frame_size[1],
                                  fps=color_fps, color_format='yuv', preset=self.stream_preset)
        depthstream = DepthStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1], fps=depth_fps, preset=self.stream_preset)
        pointstream = PointStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1], fps=depth_fps)

        self.streams = [colorstream, depthstream, pointstream]
        if self.align_streams:
            dacstream = DACStream(width=depth_frame_size[0],
                                  height=depth_frame_size[1], fps=depth_fps)
            dacstream.name = 'depth'  # rename data accessor
            self.streams.append(dacstream)

        # update with correctly initialized streams
        # always initiliazes color + depth, adds rectified/aligned versions as necessary

        self.device = self.service.Device(device_id, streams=self.streams)
        self.controls = Realsense_Controls(self.device, device_options)
        self._intrinsics = load_intrinsics(self.g_pool.user_dir, self.name, self.frame_size)

        self.update_menu()
        self._needs_restart = False

    def _enumerate_formats(self, device_id):
        '''Enumerate formats into hierachical structure:

        streams:
            resolutions:
                framerates
        '''
        formats = {}
        # only lists modes for native streams (RS_STREAM_COLOR/RS_STREAM_DEPTH)
        for mode in self.service.get_device_modes(device_id):
            if mode.stream in (rs_stream.RS_STREAM_COLOR, rs_stream.RS_STREAM_DEPTH):
                # check if frame size dict is available
                if mode.stream not in formats:
                    formats[mode.stream] = {}
                stream_obj = next((s for s in self.streams if s.stream == mode.stream))
                if mode.format == stream_obj.format:
                    size = mode.width, mode.height
                    # check if framerate list is already available
                    if size not in formats[mode.stream]:
                        formats[mode.stream][size] = []
                    formats[mode.stream][size].append(mode.fps)

        if self.align_streams:
            depth_sizes = formats[rs_stream.RS_STREAM_DEPTH].keys()
            color_sizes = formats[rs_stream.RS_STREAM_COLOR].keys()
            # common_sizes = depth_sizes & color_sizes
            discarded_sizes = depth_sizes ^ color_sizes
            for size in discarded_sizes:
                for sizes in formats.values():
                    if size in sizes:
                        del sizes[size]

        return formats

    def cleanup(self):
        if self.depth_video_writer is not None:
            self.stop_depth_recording()
        if self.device is not None:
            self.device.stop()
        self.service.stop()

    def get_init_dict(self):
        return {'device_id': self.device.device_id if self.device is not None else 0,
                'frame_size': self.frame_size,
                'frame_rate': self.frame_rate,
                'depth_frame_size': self.depth_frame_size,
                'depth_frame_rate': self.depth_frame_rate,
                'preview_depth': self.preview_depth,
                'record_depth': self.record_depth,
                'align_streams': self.align_streams,
                'device_options': self.controls.export_presets() if self.controls is not None else (),
                'stream_preset': self.stream_preset}

    def get_frames(self):
        if self.device:
            self.device.wait_for_frames()
            current_time = self.g_pool.get_timestamp()

            last_color_frame_ts = self.device.get_frame_timestamp(self.streams[0].stream)
            if self.last_color_frame_ts != last_color_frame_ts:
                self.last_color_frame_ts = last_color_frame_ts
                color = ColorFrame(self.device)
                color.timestamp = current_time
                color.index = self.color_frame_index
                self.color_frame_index += 1
            else:
                color = None

            last_depth_frame_ts = self.device.get_frame_timestamp(self.streams[1].stream)
            if self.last_depth_frame_ts != last_depth_frame_ts:
                self.last_depth_frame_ts = last_depth_frame_ts
                depth = DepthFrame(self.device)
                depth.timestamp = current_time
                depth.index = self.depth_frame_index
                self.depth_frame_index += 1
            else:
                depth = None

            return color, depth
        return None, None

    def recent_events(self, events):
        if self._needs_restart:
            self.restart_device()
            time.sleep(0.05)
        elif not self.online:
            time.sleep(.05)
            return

        try:
            color_frame, depth_frame = self.get_frames()
        except (pyrs.RealsenseError, TimeoutError) as err:
            logger.warning("Realsense failed to provide frames. Attempting to reinit.")
            self._recent_frame = None
            self._recent_depth_frame = None
            self._needs_restart = True
        else:
            if color_frame and depth_frame:
                self._recent_frame = color_frame
                events['frame'] = color_frame

            if depth_frame:
                self._recent_depth_frame = depth_frame
                events['depth_frame'] = depth_frame

                if self.depth_video_writer is not None:
                    self.depth_video_writer.write_video_frame(depth_frame)

    def deinit_ui(self):
        self.remove_menu()

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Local USB Video Source"
        self.update_menu()

    def update_menu(self):
        try:
            del self.menu[:]
        except AttributeError:
            return

        from pyglui import ui

        if self.device is None:
            self.menu.append(ui.Info_Text('Capture initialization failed.'))
            return

        def align_and_restart(val):
            self.align_streams = val
            self.restart_device()

        self.menu.append(ui.Switch('record_depth', self, label='Record Depth Stream'))
        self.menu.append(ui.Switch('preview_depth', self, label='Preview Depth'))
        self.menu.append(ui.Switch('align_streams', self, label='Align Streams',
                                   setter=align_and_restart))
        def toggle_depth_display():
            def on_depth_mouse_button(window, button, action, mods):
                if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
                   self.mouse_drag = True
                if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_RELEASE:
                   self.mouse_drag = False

            if self.depth_window is None:
                self.pitch = 0
                self.yaw = 0

                win_size = glfw.glfwGetWindowSize(self.g_pool.main_window)
                self.depth_window = glfw.glfwCreateWindow(win_size[0], win_size[1], "3D Point Cloud")
                glfw.glfwSetMouseButtonCallback(self.depth_window, on_depth_mouse_button)
                active_window = glfw.glfwGetCurrentContext()
                glfw.glfwMakeContextCurrent(self.depth_window)
                gl_utils.basic_gl_setup()
                gl_utils.make_coord_system_norm_based()

                # refresh speed settings
                glfw.glfwSwapInterval(0)

                glfw.glfwMakeContextCurrent(active_window)


        native_presets = [('None', None), ('Best Quality', rs_preset.RS_PRESET_BEST_QUALITY),
                          ('Largest image', rs_preset.RS_PRESET_LARGEST_IMAGE),
                          ('Highest framerate', rs_preset.RS_PRESET_HIGHEST_FRAMERATE)]

        def set_stream_preset(val):
            if self.stream_preset != val:
                self.stream_preset = val
                self.restart_device()
        self.menu.append(ui.Selector(
            'stream_preset', self,
            setter=set_stream_preset,
            labels = [preset[0] for preset in native_presets],
            selection=[preset[1] for preset in native_presets],
            label= 'Stream preset'
        ))
        color_sizes = sorted(self._available_modes[rs_stream.RS_STREAM_COLOR], reverse=True)
        selector = ui.Selector(
            'frame_size', self,
            # setter=,
            selection=color_sizes,
            label= 'Resolution' if self.align_streams else 'Color Resolution')
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def color_fps_getter():
            avail_fps = [fps for fps in self._available_modes[rs_stream.RS_STREAM_COLOR][self.frame_size] if self.depth_frame_rate % fps == 0]
            return avail_fps, [str(fps) for fps in avail_fps]
        selector = ui.Selector(
            'frame_rate', self,
            # setter=,
            selection_getter=color_fps_getter,
            label='Color Frame Rate',
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        if not self.align_streams:
            depth_sizes = sorted(self._available_modes[rs_stream.RS_STREAM_DEPTH], reverse=True)
            selector = ui.Selector(
                'depth_frame_size', self,
                # setter=,
                selection=depth_sizes,
                label='Depth Resolution',
            )
            selector.read_only = self.stream_preset is not None
            self.menu.append(selector)

        def depth_fps_getter():
            avail_fps = [fps for fps in self._available_modes[rs_stream.RS_STREAM_DEPTH][self.depth_frame_size] if fps % self.frame_rate == 0]
            return avail_fps, [str(fps) for fps in avail_fps]
        selector = ui.Selector(
            'depth_frame_rate', self,
            selection_getter=depth_fps_getter,
            label='Depth Frame Rate',
        )
        selector.read_only = self.stream_preset is not None
        self.menu.append(selector)

        def reset_options():
            if self.device:
                try:
                    self.device.reset_device_options_to_default(self.controls.keys())
                except pyrs.RealsenseError as err:
                    logger.info('Resetting some device options failed')
                    logger.debug('Reason: {}'.format(err))
                finally:
                    self.controls.refresh()

        self.menu.append(ui.Button('Point Cloud Window', toggle_depth_display))
        sensor_control = ui.Growing_Menu(label='Sensor Settings')
        sensor_control.append(ui.Button('Reset device options to default', reset_options))
        for ctrl in sorted(self.controls.values(), key=lambda x: x.range.option):
            # sensor_control.append(ui.Info_Text(ctrl.description))
            if ctrl.range.min == 0.0 and ctrl.range.max == 1.0 and ctrl.range.step == 1.0:
                sensor_control.append(ui.Switch('value', ctrl, label=ctrl.label,
                                                off_val=0.0, on_val=1.0))
            else:
                sensor_control.append(ui.Slider('value', ctrl,
                                                label=ctrl.label,
                                                min=ctrl.range.min,
                                                max=ctrl.range.max,
                                                step=ctrl.range.step))
        self.menu.append(sensor_control)

    def gl_display(self):
        from math import floor
        if self.depth_window is not None and glfw.glfwWindowShouldClose(self.depth_window):
            glfw.glfwDestroyWindow(self.depth_window)
            self.depth_window = None

        if self.depth_window is not None and self._recent_depth_frame is not None:
            active_window = glfw.glfwGetCurrentContext()
            glfw.glfwMakeContextCurrent(self.depth_window)

            win_size = glfw.glfwGetFramebufferSize(self.depth_window)
            gl_utils.adjust_gl_view(win_size[0], win_size[1])
            pos = glfw.glfwGetCursorPos(self.depth_window)
            if self.mouse_drag:
                self.pitch = np.clip(self.pitch + (pos[1] - self.last_pos[1]), -80, 80)
                self.yaw = np.clip(self.yaw - (pos[0] - self.last_pos[0]), -120, 120)
            self.last_pos = pos

            glClearColor(0,0,0,0)
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
            glMatrixMode(GL_PROJECTION)
            glLoadIdentity()
            gluPerspective(60, win_size[0]/win_size[1] , 0.01, 20.0)
            glMatrixMode(GL_MODELVIEW)
            glLoadIdentity()
            gluLookAt(0,0,0, 0,0,1, 0,-1,0)
            glTranslatef(0,0,0.5)
            glRotated(self.pitch, 1, 0, 0)
            glRotated(self.yaw, 0, 1, 0)
            glTranslatef(0,0,-0.5)

            #glPointSize(2)
            glEnable(GL_DEPTH_TEST);
            extrinsics = self.device.get_device_extrinsics(rs_stream.RS_STREAM_DEPTH, rs_stream.RS_STREAM_COLOR)
            depth_frame = self._recent_depth_frame
            color_frame = self._recent_frame
            depth_scale = self.device.depth_scale

            glEnableClientState( GL_VERTEX_ARRAY )

            pointcloud = self.device.pointcloud
            glVertexPointer(3,GL_FLOAT,0,pointcloud)
            glEnableClientState(GL_COLOR_ARRAY);
            depth_to_color = np.zeros(depth_frame.height * depth_frame.width * 3, np.uint8)
            rsutilwrapper.project_pointcloud_to_pixel(depth_to_color, self.device.depth_intrinsics, self.device.color_intrinsics, extrinsics, pointcloud, self._recent_frame.bgr)
            glColorPointer(3, GL_UNSIGNED_BYTE,0, depth_to_color)
            glDrawArrays (GL_POINTS, 0, depth_frame.width * depth_frame.height)
            gl_utils.glFlush()
            glDisable(GL_DEPTH_TEST)
            # gl_utils.make_coord_system_norm_based()
            glfw.glfwSwapBuffers(self.depth_window)
            glfw.glfwMakeContextCurrent(active_window)

        if self.preview_depth and self._recent_depth_frame is not None:
            self.g_pool.image_tex.update_from_ndarray(self._recent_depth_frame.bgr)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()
        elif self._recent_frame is not None:
            self.g_pool.image_tex.update_from_yuv_buffer(self._recent_frame.yuv_buffer,
                                                         self._recent_frame.width,
                                                         self._recent_frame.height)
            gl_utils.glFlush()
            gl_utils.make_coord_system_norm_based()
            self.g_pool.image_tex.draw()

        if not self.online:
            super().gl_display()

        gl_utils.make_coord_system_pixel_based((self.frame_size[1], self.frame_size[0], 3))

    def restart_device(self, device_id=None, color_frame_size=None, color_fps=None,
                       depth_frame_size=None, depth_fps=None, device_options=None):
        if device_id is None:
            if self.device is not None:
                device_id = self.device.device_id
            else:
               device_id = 0
        if color_frame_size is None:
            color_frame_size = self.frame_size
        if color_fps is None:
            color_fps = self.frame_rate
        if depth_frame_size is None:
            depth_frame_size = self.depth_frame_size
        if depth_fps is None:
            depth_fps = self.depth_frame_rate
        if device_options is None:
            device_options = self.controls.export_presets()
        if self.device is not None:
            self.device.stop()
            self.device = None
        self.service.stop()
        self.service.start()
        self.notify_all({'subject': 'realsense_source.restart',
                         'device_id': device_id,
                         'color_frame_size': color_frame_size,
                         'color_fps': color_fps,
                         'depth_frame_size': depth_frame_size,
                         'depth_fps': depth_fps,
                         'device_options': device_options})

    def on_click(self, pos, button, action):
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_PRESS:
            self.mouse_drag = True
        if button == glfw.GLFW_MOUSE_BUTTON_LEFT and action == glfw.GLFW_RELEASE:
            self.mouse_drag = False

    def on_notify(self, notification):
        if notification['subject'] == 'realsense_source.restart':
            kwargs = notification.copy()
            del kwargs['subject']
            self._initialize_device(**kwargs)
        elif notification['subject'] == 'recording.started':
            self.start_depth_recording(notification['rec_path'])
        elif notification['subject'] == 'recording.stopped':
            self.stop_depth_recording()

    def start_depth_recording(self, rec_loc):
        if not self.record_depth:
            return

        if self.depth_video_writer is not None:
            logger.warning('Depth video recording has been started already')
            return

        video_path = os.path.join(rec_loc, 'depth.mp4')
        self.depth_video_writer = AV_Writer(video_path, fps=self.depth_frame_rate, use_timestamps=True)

    def stop_depth_recording(self):
        if self.depth_video_writer is None:
            logger.warning('Depth video recording was not running')
            return

        self.depth_video_writer.close()
        self.depth_video_writer = None

    @property
    def frame_size(self):
        stream = self.streams[0]
        return stream.width, stream.height

    @frame_size.setter
    def frame_size(self, new_size):
        if self.device is not None and new_size != self.frame_size:
            self.restart_device(color_frame_size=new_size)

    @property
    def frame_rate(self):
        return self.streams[0].fps

    @frame_rate.setter
    def frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.frame_rate:
            self.restart_device(color_fps=new_rate)

    @property
    def depth_frame_size(self):
        stream = self.streams[1]
        return stream.width, stream.height

    @depth_frame_size.setter
    def depth_frame_size(self, new_size):
        if self.device is not None and new_size != self.depth_frame_size:
            self.restart_device(depth_frame_size=new_size)

    @property
    def depth_frame_rate(self):
        return self.streams[1].fps

    @depth_frame_rate.setter
    def depth_frame_rate(self, new_rate):
        if self.device is not None and new_rate != self.depth_frame_rate:
            self.restart_device(depth_fps=new_rate)

    @property
    def jpeg_support(self):
        return False

    @property
    def online(self):
        return self.device and self.device.is_streaming()

    @property
    def name(self):
        # not the same as `if self.device:`!
        if self.device is not None:
            return self.device.name
        else:
            return "Ghost capture"
예제 #25
0
    def start(self):
        session = os.path.join(self.rec_root_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug("Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".format(
                    session
                )
            )
            return

        self.pldata_writers = {}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()
        recording_uuid = uuid.uuid4()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(self.rec_path))
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, "w", newline="", encoding="utf-8") as csvfile:
            csv_utils.write_key_value_file(
                csvfile,
                {
                    "Recording Name": self.session_name,
                    "Start Date": strftime("%d.%m.%Y", localtime(self.start_time)),
                    "Start Time": strftime("%H:%M:%S", localtime(self.start_time)),
                    "Start Time (System)": self.start_time,
                    "Start Time (Synced)": start_time_synced,
                    "Recording UUID": recording_uuid,
                },
            )

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path, self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, "h264_buffer"):
            self.writer = H264Writer(
                self.video_path,
                self.g_pool.capture.frame_size[0],
                self.g_pool.capture.frame_size[1],
                int(self.g_pool.capture.frame_rate),
            )
        else:
            self.writer = AV_Writer(self.video_path, fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir, "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {"subject": "calibration.calibration_data", "record": True}
            notification.update(cal_data)
            notification["topic"] = "notify." + notification["subject"]

            writer = PLData_Writer(self.rec_path, "notify")
            writer.append(notification)
            self.pldata_writers["notify"] = writer
        except FileNotFoundError:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all(
            {
                "subject": "recording.started",
                "rec_path": self.rec_path,
                "session_name": self.session_name,
                "record_eye": self.record_eye,
                "compression": self.raw_jpeg,
            }
        )
예제 #26
0
파일: exporter.py 프로젝트: neuroidss/pupil
def export(should_terminate, frames_to_export, current_frame, rec_dir, user_dir, min_data_confidence,
           start_frame=None, end_frame=None, plugin_initializers=(), out_file_path=None,pre_computed={}):

    vis_plugins = sorted([Vis_Circle,Vis_Cross,Vis_Polyline,Vis_Light_Points,
        Vis_Watermark,Vis_Scan_Path,Vis_Eye_Video_Overlay], key=lambda x: x.__name__)
    analysis_plugins = sorted([ Pupil_Angle_3D_Fixation_Detector,
                               Gaze_Position_2D_Fixation_Detector], key=lambda x: x.__name__)
    user_plugins = sorted(import_runtime_plugins(os.path.join(user_dir, 'plugins')), key=lambda x: x.__name__)

    available_plugins = vis_plugins + analysis_plugins + user_plugins
    name_by_index = [p.__name__ for p in available_plugins]
    plugin_by_name = dict(zip(name_by_index, available_plugins))

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()))

    update_recording_to_recent(rec_dir)

    video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) if f[-3:] in ('mp4', 'mkv', 'avi')][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")
    audio_path = os.path.join(rec_dir, "audio.mp4")

    meta_info = load_meta_info(rec_dir)

    g_pool = Global_Container()
    g_pool.app = 'exporter'
    g_pool.min_data_confidence = min_data_confidence
    timestamps = np.load(timestamps_path)
    cap = File_Source(g_pool, video_path, timestamps=timestamps)

    # Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to {}".format(out_file_path))

    # Trim mark verification
    # make sure the trim marks (start frame, endframe) make sense:
    # We define them like python list slices, thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame is None:
        start_frame = 0

    # these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
    logger.debug(exp_info.format(start_frame, start_frame + frames_to_export.value, frames_to_export.value))

    # setup of writer
    writer = AV_Writer(out_file_path, fps=cap.frame_rate, audio_loc=audio_path, use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g_pool.capture = cap
    g_pool.rec_dir = rec_dir
    g_pool.user_dir = user_dir
    g_pool.meta_info = meta_info
    g_pool.timestamps = timestamps
    g_pool.delayed_notifications = {}
    g_pool.notifications = []
    # load pupil_positions, gaze_positions
    pupil_data = pre_computed.get("pupil_data") or load_object(pupil_data_path)
    g_pool.pupil_data = pupil_data
    g_pool.pupil_positions = pre_computed.get("pupil_positions") or pupil_data['pupil_positions']
    g_pool.gaze_positions = pre_computed.get("gaze_positions") or pupil_data['gaze_positions']
    g_pool.fixations = [] # populated by the fixation detector plugin

    g_pool.pupil_positions_by_frame = correlate_data(g_pool.pupil_positions,g_pool.timestamps)
    g_pool.gaze_positions_by_frame = correlate_data(g_pool.gaze_positions,g_pool.timestamps)
    g_pool.fixations_by_frame = [[] for x in g_pool.timestamps]  # populated by the fixation detector plugin

    # add plugins
    g_pool.plugins = Plugin_List(g_pool, plugin_by_name, plugin_initializers)

    while frames_to_export.value > current_frame.value:

        if should_terminate.value:
            logger.warning("User aborted export. Exported {} frames to {}.".format(current_frame.value, out_file_path))

            # explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {'frame':frame}
        # new positons and events
        events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g_pool.pupil_positions_by_frame[frame.index]

        # publish delayed notifiactions when their time has come.
        for n in list(g_pool.delayed_notifications.values()):
            if n['_notify_time_'] < time():
                del n['_notify_time_']
                del g_pool.delayed_notifications[n['subject']]
                g_pool.notifications.append(n)

        # notify each plugin if there are new notifactions:
        while g_pool.notifications:
            n = g_pool.notifications.pop(0)
            for p in g_pool.plugins:
                p.on_notify(n)

        # allow each Plugin to do its work.
        for p in g_pool.plugins:
            p.recent_events(events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
    logger.info(result.format(current_frame.value, out_file_path, duration, effective_fps))
    return True
예제 #27
0
class Recorder(System_Plugin_Base):
    """Capture Recorder"""

    icon_chr = chr(0xE04B)
    icon_font = "pupil_icons"
    warning_low_disk_space_th = 5.0  # threshold in GB
    stop_rec_low_disk_space_th = 1.0  # threshold in GB

    def __init__(
        self,
        g_pool,
        session_name=get_auto_name(),
        rec_root_dir=None,
        user_info={"name": "", "additional_field": "change_me"},
        info_menu_conf={},
        show_info_menu=False,
        record_eye=True,
        raw_jpeg=True,
    ):
        super().__init__(g_pool)
        # update name if it was autogenerated.
        if session_name.startswith("20") and len(session_name) == 10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep, 1)[0]
        default_rec_root_dir = os.path.join(base_dir, "recordings")

        if (
            rec_root_dir
            and rec_root_dir != default_rec_root_dir
            and self.verify_path(rec_root_dir)
        ):
            self.rec_root_dir = rec_root_dir
        else:
            try:
                os.makedirs(default_rec_root_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info(
                    'Created standard Rec dir at "{}"'.format(default_rec_root_dir)
                )
            self.rec_root_dir = default_rec_root_dir

        self.raw_jpeg = raw_jpeg
        self.order = 0.9
        self.record_eye = record_eye
        self.session_name = session_name
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf

        self.low_disk_space_thumb = None
        check_timer = timer(1.0)
        self.check_space = lambda: next(check_timer)

    def get_init_dict(self):
        d = {}
        d["record_eye"] = self.record_eye
        d["session_name"] = self.session_name
        d["user_info"] = self.user_info
        d["info_menu_conf"] = self.info_menu_conf
        d["show_info_menu"] = self.show_info_menu
        d["rec_root_dir"] = self.rec_root_dir
        d["raw_jpeg"] = self.raw_jpeg
        return d

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Recorder"
        self.menu_icon.order = 0.29

        self.menu.append(
            ui.Info_Text(
                'Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'
            )
        )
        self.menu.append(
            ui.Info_Text(
                'Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'
            )
        )
        self.menu.append(
            ui.Text_Input(
                "rec_root_dir",
                self,
                setter=self.set_rec_root_dir,
                label="Path to recordings",
            )
        )
        self.menu.append(
            ui.Text_Input(
                "session_name",
                self,
                setter=self.set_session_name,
                label="Recording session name",
            )
        )
        self.menu.append(
            ui.Switch(
                "show_info_menu",
                self,
                on_val=True,
                off_val=False,
                label="Request additional user info",
            )
        )
        self.menu.append(
            ui.Selector(
                "raw_jpeg",
                self,
                selection=[True, False],
                labels=["bigger file, less CPU", "smaller file, more CPU"],
                label="Compression",
            )
        )
        self.menu.append(
            ui.Info_Text(
                "Recording the raw eye video is optional. We use it for debugging."
            )
        )
        self.menu.append(
            ui.Switch(
                "record_eye", self, on_val=True, off_val=False, label="Record eye"
            )
        )
        self.button = ui.Thumb(
            "running", self, setter=self.toggle, label="R", hotkey="r"
        )
        self.button.on_color[:] = (1, 0.0, 0.0, 0.8)
        self.g_pool.quickbar.insert(2, self.button)

        self.low_disk_space_thumb = ui.Thumb(
            "low_disk_warn", label="!", getter=lambda: True, setter=lambda x: None
        )
        self.low_disk_space_thumb.on_color[:] = (1, 0.0, 0.0, 0.8)
        self.low_disk_space_thumb.status_text = "Low disk space"

    def deinit_ui(self):
        if self.low_disk_space_thumb in self.g_pool.quickbar:
            self.g_pool.quickbar.remove(self.low_disk_space_thumb)
        self.g_pool.quickbar.remove(self.button)
        self.button = None
        self.remove_menu()

    def toggle(self, _=None):
        if self.running:
            self.notify_all({"subject": "recording.should_stop"})
            self.notify_all(
                {"subject": "recording.should_stop", "remote_notify": "all"}
            )
        else:
            self.notify_all(
                {"subject": "recording.should_start", "session_name": self.session_name}
            )
            self.notify_all(
                {
                    "subject": "recording.should_start",
                    "session_name": self.session_name,
                    "remote_notify": "all",
                }
            )

    def on_notify(self, notification):
        """Handles recorder notifications

        Reacts to notifications:
            ``recording.should_start``: Starts a new recording session.
                fields:
                - 'session_name' change session name
                    start with `/` to ingore the rec base dir and start from root instead.
                - `record_eye` boolean that indicates recording of the eyes, defaults to current setting
            ``recording.should_stop``: Stops current recording session

        Emits notifications:
            ``recording.started``: New recording session started
            ``recording.stopped``: Current recording session stopped

        Args:
            notification (dictionary): Notification dictionary
        """
        # notification wants to be recorded
        if notification.get("record", False) and self.running:
            if "timestamp" not in notification:
                logger.error("Notification without timestamp will not be saved.")
            else:
                notification["topic"] = "notify." + notification["subject"]
                try:
                    writer = self.pldata_writers["notify"]
                except KeyError:
                    writer = PLData_Writer(self.rec_path, "notify")
                    self.pldata_writers["notify"] = writer
                writer.append(notification)

        elif notification["subject"] == "recording.should_start":
            if self.running:
                logger.info("Recording already running!")
            else:
                self.record_eye = notification.get("record_eye", self.record_eye)
                if notification.get("session_name", ""):
                    self.set_session_name(notification["session_name"])
                self.start()

        elif notification["subject"] == "recording.should_stop":
            if self.running:
                self.stop()
            else:
                logger.info("Recording already stopped!")

    def get_rec_time_str(self):
        rec_time = gmtime(time() - self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self):
        session = os.path.join(self.rec_root_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug("Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".format(
                    session
                )
            )
            return

        self.pldata_writers = {}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()
        recording_uuid = uuid.uuid4()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(self.rec_path))
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, "w", newline="", encoding="utf-8") as csvfile:
            csv_utils.write_key_value_file(
                csvfile,
                {
                    "Recording Name": self.session_name,
                    "Start Date": strftime("%d.%m.%Y", localtime(self.start_time)),
                    "Start Time": strftime("%H:%M:%S", localtime(self.start_time)),
                    "Start Time (System)": self.start_time,
                    "Start Time (Synced)": start_time_synced,
                    "Recording UUID": recording_uuid,
                },
            )

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path, self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, "h264_buffer"):
            self.writer = H264Writer(
                self.video_path,
                self.g_pool.capture.frame_size[0],
                self.g_pool.capture.frame_size[1],
                int(self.g_pool.capture.frame_rate),
            )
        else:
            self.writer = AV_Writer(self.video_path, fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir, "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {"subject": "calibration.calibration_data", "record": True}
            notification.update(cal_data)
            notification["topic"] = "notify." + notification["subject"]

            writer = PLData_Writer(self.rec_path, "notify")
            writer.append(notification)
            self.pldata_writers["notify"] = writer
        except FileNotFoundError:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all(
            {
                "subject": "recording.started",
                "rec_path": self.rec_path,
                "session_name": self.session_name,
                "record_eye": self.record_eye,
                "compression": self.raw_jpeg,
            }
        )

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu(
            "additional Recording Info", size=(300, 300), pos=(300, 300)
        )
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.keys():
                self.info_menu.insert(0, ui.Text_Input(name, self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(
            ui.Info_Text(
                'Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'
            )
        )
        self.info_menu.append(
            ui.Text_Input("user_info", self, setter=set_user_info, label="User info")
        )
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def recent_events(self, events):

        if self.check_space():
            disk_space = available_gb(self.rec_root_dir)
            if (
                disk_space < self.warning_low_disk_space_th
                and self.low_disk_space_thumb not in self.g_pool.quickbar
            ):
                self.g_pool.quickbar.append(self.low_disk_space_thumb)
            elif (
                disk_space >= self.warning_low_disk_space_th
                and self.low_disk_space_thumb in self.g_pool.quickbar
            ):
                self.g_pool.quickbar.remove(self.low_disk_space_thumb)

            if self.running and disk_space <= self.stop_rec_low_disk_space_th:
                self.stop()
                logger.error("Recording was stopped due to low disk space!")

        if self.running:
            for key, data in events.items():
                if key not in ("dt", "depth_frame") and not key.startswith("frame"):
                    try:
                        writer = self.pldata_writers[key]
                    except KeyError:
                        writer = PLData_Writer(self.rec_path, key)
                        self.pldata_writers[key] = writer
                    writer.extend(data)
            if "frame" in events:
                frame = events["frame"]
                self.writer.write_video_frame(frame)
                self.frame_count += 1

            # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))

            self.button.status_text = self.get_rec_time_str()

    def stop(self):
        # explicit release of VideoWriter
        try:
            self.writer.release()
        except RuntimeError:
            logger.error("No world video recorded")
        else:
            logger.debug("Closed media container")
            self.g_pool.capture.intrinsics.save(self.rec_path, custom_name="world")
        finally:
            self.writer = None

        # save_object(self.data, os.path.join(self.rec_path, "pupil_data"))
        for writer in self.pldata_writers.values():
            writer.close()

        del self.pldata_writers

        surface_definition_file_paths = glob.glob(os.path.join(self.g_pool.user_dir, "surface_definitions*"))

        if len(surface_definition_file_paths) > 0:
            for source_path in surface_definition_file_paths:
                _, filename = os.path.split(source_path)
                target_path = os.path.join(self.rec_path, filename)
                copy2(source_path, target_path)
        else:
            logger.info(
                "No surface_definitions data found. You may want this if you do marker tracking."
            )

        try:
            with open(self.meta_info_path, "a", newline="") as csvfile:
                csv_utils.write_key_value_file(
                    csvfile,
                    {
                        "Duration Time": self.get_rec_time_str(),
                        "World Camera Frames": self.frame_count,
                        "World Camera Resolution": str(
                            self.g_pool.capture.frame_size[0]
                        )
                        + "x"
                        + str(self.g_pool.capture.frame_size[1]),
                        "Capture Software Version": self.g_pool.version,
                        "Data Format Version": self.g_pool.version,
                        "System Info": get_system_info(),
                    },
                    append=True,
                )
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(
                os.path.join(self.rec_path, "user_info.csv"), "w", newline=""
            ) as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")

        self.close_info_menu()

        self.running = False
        if self.menu:
            self.menu.read_only = False
            self.button.status_text = ""

        logger.info("Saved Recording.")
        self.notify_all({"subject": "recording.stopped", "rec_path": self.rec_path})

    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()

    def verify_path(self, val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '{}'.".format(n_path))
            return False
        else:
            return n_path

    def set_rec_root_dir(self, val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_root_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if os.path.sep in val:
                logger.warning(
                    "You session name will create one or more subdirectories"
                )
            self.session_name = val
예제 #28
0
class Recorder(Plugin):
    """Capture Recorder"""
    def __init__(self,g_pool,session_name = get_auto_name(),rec_dir=None, user_info={},info_menu_conf={},show_info_menu=False, record_eye = True, audio_src = 'No Audio',raw_jpeg=True):
        super(Recorder, self).__init__(g_pool)
        #update name if it was autogenerated.
        if session_name.startswith('20') and len(session_name)==10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep,1)[0]
        default_rec_dir = os.path.join(base_dir,'recordings')

        if rec_dir and rec_dir != default_rec_dir and self.verify_path(rec_dir):
            self.rec_dir = rec_dir
        else:
            try:
                os.makedirs(default_rec_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info('Created standard Rec dir at "%s"'%default_rec_dir)
            self.rec_dir = default_rec_dir

        self.raw_jpeg = raw_jpeg
        self.order = .9
        self.record_eye = record_eye
        self.session_name = session_name
        self.audio_devices_dict = Audio_Input_Dict()
        if audio_src in self.audio_devices_dict.keys():
            self.audio_src = audio_src
        else:
            self.audio_src = 'No Audio'
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf

        self.calibration_start_at = None
        self.calibration_end_at = None

    def get_init_dict(self):
        d = {}
        d['record_eye'] = self.record_eye
        d['audio_src'] = self.audio_src
        d['session_name'] = self.session_name
        d['user_info'] = self.user_info
        d['info_menu_conf'] = self.info_menu_conf
        d['show_info_menu'] = self.show_info_menu
        d['rec_dir'] = self.rec_dir
        d['raw_jpeg'] = self.raw_jpeg
        return d


    def init_gui(self):
        self.menu = ui.Growing_Menu('Recorder')
        self.g_pool.sidebar.insert(3,self.menu)
        self.menu.append(ui.Info_Text('Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'))
        self.menu.append(ui.Info_Text('Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'))
        self.menu.append(ui.Text_Input('rec_dir',self,setter=self.set_rec_dir,label='Path to recordings'))
        self.menu.append(ui.Text_Input('session_name',self,setter=self.set_session_name,label='Recording session name'))
        self.menu.append(ui.Switch('show_info_menu',self,on_val=True,off_val=False,label='Request additional user info'))
        self.menu.append(ui.Selector('raw_jpeg',self,selection = [True,False], labels=["bigger file, less CPU", "smaller file, more CPU"],label='Compression'))
        self.menu.append(ui.Info_Text('Recording the raw eye video is optional. We use it for debugging.'))
        self.menu.append(ui.Switch('record_eye',self,on_val=True,off_val=False,label='Record eye'))
        self.menu.append(ui.Selector('audio_src',self, selection=self.audio_devices_dict.keys(),label='Audio Source'))

        # record on enter click from the remote shutter
        self.button = ui.Thumb('running',self,setter=self.toggle,label='Record',hotkey=GLFW_KEY_ENTER)

        self.button.on_color[:] = (1,.0,.0,.8)
        self.g_pool.quickbar.insert(1,self.button)


    def deinit_gui(self):
        if self.menu:
            self.g_pool.sidebar.remove(self.menu)
            self.menu = None
        if self.button:
            self.g_pool.quickbar.remove(self.button)
            self.button = None



    def toggle(self, _=None):
        if self.running:
            if self.calibration_start_at and not self.calibration_end_at: # Mark the end of claibration
                self.calibration_end_at = self.timestamps[-1]
            else:
                self.notify_all( {'subject':'should_stop_recording','network_propagate':True} )
        else:
            self.notify_all( {'subject':'should_start_recording','session_name':self.session_name,'network_propagate':True} )


    def on_notify(self,notification):

        # notification wants to be recorded
        if notification.get('record',False) and self.running:
            self.data['notifications'].append(notification)


        # Notificatio to start recording
        elif notification['subject'] == 'should_start_recording':
            if self.running:
                logger.info('Recording already running!')
            else:
                if notification.get("session_name",""):
                    self.set_session_name(notification["session_name"])
                self.start()
        # Remote has stopped recording, we should stop as well.
        elif notification['subject'] == 'should_stop_recording':
            if self.running:
                self.stop(shutdown_os=True)
            else:
                logger.info('Recording already stopped!')


    def get_rec_time_str(self):
        rec_time = gmtime(time()-self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self):
        self.timestamps = []
        self.data = {'pupil_positions':[],'gaze_positions':[],'notifications':[]}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir %s"%session)

        except:
            logger.debug("Recordings session dir %s already exists, using it." %session)

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "%03d/" % counter)
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir %s"%self.rec_path)
                break
            except:
                logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w') as f:
            f.write("Recording Name\t"+self.session_name+ "\n")
            f.write("Start Date\t"+ strftime("%d.%m.%Y", localtime(self.start_time))+ "\n")
            f.write("Start Time\t"+ strftime("%H:%M:%S", localtime(self.start_time))+ "\n")


        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(audio_path,self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,fps=self.g_pool.capture.frame_rate)

        # positions path to eye process
        if self.record_eye:
            for alive, pipe in zip(self.g_pool.eyes_are_alive,self.g_pool.eye_pipes):
                if alive.value:
                    pipe.send( ('Rec_Start',(self.rec_path,self.raw_jpeg) ) )

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all( {'subject':'rec_started','rec_path':self.rec_path,'session_name':self.session_name,'network_propagate':True} )

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu('additional Recording Info',size=(300,300),pos=(300,300))
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.iterkeys():
                self.info_menu.insert(0,ui.Text_Input(name,self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(ui.Info_Text('Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'))
        self.info_menu.append(ui.Text_Input('user_info',self,setter=set_user_info,label="User info"))
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def update(self,frame,events):
        ''' listen to commands from eye window '''
        if self.g_pool.eye_pipes[0].poll():
            cmd = self.g_pool.eye_pipes[0].recv()
            # toggle recording if enter key clicked
            if cmd == GLFW_KEY_ENTER:
                self.toggle()
        if self.running:
            # for key,data in events.iteritems():
            #     if key not in ('dt'):
            #         try:
            #             self.data[key] += data
            #         except KeyError:
            #             self.data[key] = []
            #             self.data[key] += data

            ''' Record when it first started as this will be the start of calibration. When user click Enter again this
             should end calibration. Click once more to end recording'''
            if not self.calibration_start_at:
                self.calibration_start_at = frame.timestamp

            self.timestamps.append(frame.timestamp)
            self.writer.write_video_frame(frame)
            self.frame_count += 1

            # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))

            self.button.status_text = self.get_rec_time_str()

    def stop(self, shutdown_os=False):
        #explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        if self.record_eye:
            for alive, pipe in zip(self.g_pool.eyes_are_alive,self.g_pool.eye_pipes):
                if alive.value:
                    pipe.send(('Rec_Stop',None))

        # Mohammad: This will now save an empty dicts list
        save_object(self.data,os.path.join(self.rec_path, "pupil_data"))

        timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
        # ts = sanitize_timestamps(np.array(self.timestamps))
        ts = np.array(self.timestamps)
        np.save(timestamps_path,ts)

        try:
            copy2(os.path.join(self.g_pool.user_dir,"surface_definitions"),os.path.join(self.rec_path,"surface_definitions"))
        except:
            logger.info("No surface_definitions data found. You may want this if you do marker tracking.")
        try:
            copy2(os.path.join(self.g_pool.user_dir,"user_calibration_data"),os.path.join(self.rec_path,"user_calibration_data"))
        except:
            logger.warning("No user calibration data found. Please calibrate first.")

        # camera_calibration = load_camera_calibration(self.g_pool)
        # if camera_calibration is not None:
        #     save_object(camera_calibration,os.path.join(self.rec_path, "camera_calibration"))
        # else:
        #     logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a') as f:
                f.write("Duration Time\t"+ self.get_rec_time_str()+ "\n")
                f.write("World Camera Frames\t"+ str(self.frame_count)+ "\n")
                f.write("World Camera Resolution\t"+ str(self.g_pool.capture.frame_size[0])+"x"+str(self.g_pool.capture.frame_size[1])+"\n")
                f.write("Capture Software Version\t%s\n"%self.g_pool.version)
                f.write("System Info\t%s"%get_system_info())
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w') as f:
                self.user_info.update({'cal_start':self.calibration_start_at, 'cal_end':self.calibration_end_at})
                for name,val in self.user_info.iteritems():
                    f.write("%s\t%s\n"%(name,val))
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")

        self.calibration_start_at = None
        self.calibration_end_at = None


        self.close_info_menu()

        if self.audio_writer:
            self.audio_writer = None

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.timestamps = []
        self.data = {'pupil_positions':[],'gaze_positions':[]}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        logger.info("Saved Recording.")
        self.notify_all( {'subject':'rec_stopped','rec_path':self.rec_path,'network_propagate':True} )
        #     Mohammad - shutdown OS when stop recording
        if shutdown_os:
            os.system('shutdown now')

    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()
        self.deinit_gui()

    def verify_path(self,val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '%s'."%n_path)
            return False
        else:
            return n_path

    def set_rec_dir(self,val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if '/' in val:
                logger.warning('You session name will create one or more subdirectories')
            self.session_name = val
예제 #29
0
def export(should_terminate,
           frames_to_export,
           current_frame,
           rec_dir,
           start_frame=None,
           end_frame=None,
           plugin_initializers=[],
           out_file_path=None):

    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))

    #parse info.csv file
    with open(rec_dir + "/info.csv") as info:
        meta_info = dict(
            ((line.strip().split('\t')) for line in info.readlines()))
    rec_version = read_rec_version(meta_info)
    logger.debug("Exporting a video from recording with version: %s" %
                 rec_version)

    if rec_version < VersionFormat('0.4'):
        video_path = rec_dir + "/world.avi"
        timestamps_path = rec_dir + "/timestamps.npy"
    else:
        video_path = rec_dir + "/world.mkv"
        timestamps_path = rec_dir + "/world_timestamps.npy"

    gaze_positions_path = rec_dir + "/gaze_positions.npy"
    #load gaze information
    gaze_list = np.load(gaze_positions_path)
    timestamps = np.load(timestamps_path)

    #correlate data
    if rec_version < VersionFormat('0.4'):
        positions_by_frame = correlate_gaze_legacy(gaze_list, timestamps)
    else:
        positions_by_frame = correlate_gaze(gaze_list, timestamps)

    cap = autoCreateCapture(video_path, timestamps=timestamps_path)
    width, height = cap.get_size()

    #Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to %s" % out_file_path)

    #Trim mark verification
    #make sure the trim marks (start frame, endframe) make sense: We define them like python list slices,thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn(
            "Start and end frames are set such that no video will be exported."
        )
        return False

    if start_frame == None:
        start_frame = 0

    #these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    logger.debug(
        "Will export from frame %s to frame %s. This means I will export %s frames."
        % (start_frame, start_frame + frames_to_export.value,
           frames_to_export.value))

    #setup of writer
    writer = AV_Writer(out_file_path)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g = Global_Container()
    g.app = 'exporter'
    g.rec_dir = rec_dir
    g.rec_version = rec_version
    g.timestamps = timestamps
    g.gaze_list = gaze_list
    g.positions_by_frame = positions_by_frame
    g.plugins = Plugin_List(g, plugin_by_name, plugin_initializers)

    while frames_to_export.value - current_frame.value > 0:

        if should_terminate.value:
            logger.warning("User aborted export. Exported %s frames to %s." %
                           (current_frame.value, out_file_path))

            #explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {}
        #new positons and events
        events['pupil_positions'] = positions_by_frame[frame.index]
        # allow each Plugin to do its work.
        for p in g.plugins:
            p.update(frame, events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time() - start_time
    effective_fps = float(current_frame.value) / duration

    logger.info(
        "Export done: Exported %s frames to %s. This took %s seconds. Exporter ran at %s frames per second"
        % (current_frame.value, out_file_path, duration, effective_fps))
    return True
예제 #30
0
class Recorder(Plugin):
    """Capture Recorder"""
    def __init__(self,
                 g_pool,
                 session_name=get_auto_name(),
                 rec_dir=None,
                 user_info={
                     'name': '',
                     'additional_field': 'change_me'
                 },
                 info_menu_conf={},
                 show_info_menu=False,
                 record_eye=False,
                 audio_src='No Audio',
                 raw_jpeg=False):
        super(Recorder, self).__init__(g_pool)
        #update name if it was autogenerated.
        if session_name.startswith('20') and len(session_name) == 10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep, 1)[0]
        default_rec_dir = os.path.join(base_dir, 'recordings')

        if rec_dir and rec_dir != default_rec_dir and self.verify_path(
                rec_dir):
            self.rec_dir = rec_dir
        else:
            try:
                os.makedirs(default_rec_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info('Created standard Rec dir at "%s"' %
                            default_rec_dir)
            self.rec_dir = default_rec_dir

        self.raw_jpeg = raw_jpeg
        self.order = .9
        self.record_eye = record_eye
        self.session_name = session_name
        self.audio_devices_dict = Audio_Input_Dict()
        if audio_src in self.audio_devices_dict.keys():
            self.audio_src = audio_src
        else:
            self.audio_src = 'No Audio'
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf

    def get_init_dict(self):
        d = {}
        d['record_eye'] = self.record_eye
        d['audio_src'] = self.audio_src
        d['session_name'] = self.session_name
        d['user_info'] = self.user_info
        d['info_menu_conf'] = self.info_menu_conf
        d['show_info_menu'] = self.show_info_menu
        d['rec_dir'] = self.rec_dir
        d['raw_jpeg'] = self.raw_jpeg
        return d

    def init_gui(self):
        self.menu = ui.Growing_Menu('Recorder')
        self.g_pool.sidebar.insert(3, self.menu)
        self.menu.append(
            ui.Info_Text(
                'Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'
            ))
        self.menu.append(
            ui.Info_Text(
                'Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'
            ))
        self.menu.append(
            ui.Text_Input('rec_dir',
                          self,
                          setter=self.set_rec_dir,
                          label='Path to recordings'))
        self.menu.append(
            ui.Text_Input('session_name',
                          self,
                          setter=self.set_session_name,
                          label='Recording session name'))
        self.menu.append(
            ui.Switch('show_info_menu',
                      self,
                      on_val=True,
                      off_val=False,
                      label='Request additional user info'))
        self.menu.append(
            ui.Selector(
                'raw_jpeg',
                self,
                selection=[True, False],
                labels=["bigger file, less CPU", "smaller file, more CPU"],
                label='compression'))
        self.menu.append(
            ui.Info_Text(
                'Recording the raw eye video is optional. We use it for debugging.'
            ))
        self.menu.append(
            ui.Switch('record_eye',
                      self,
                      on_val=True,
                      off_val=False,
                      label='Record eye'))
        self.menu.append(
            ui.Selector('audio_src',
                        self,
                        selection=self.audio_devices_dict.keys()))

        self.button = ui.Thumb('running',
                               self,
                               setter=self.toggle,
                               label='Record',
                               hotkey='r')
        self.button.on_color[:] = (1, .0, .0, .8)
        self.g_pool.quickbar.insert(1, self.button)

    def deinit_gui(self):
        if self.menu:
            self.g_pool.sidebar.remove(self.menu)
            self.menu = None
        if self.button:
            self.g_pool.quickbar.remove(self.button)
            self.button = None

    def toggle(self, _=None):
        if self.running:
            self.stop()
        else:
            self.start()

    def on_notify(self, notification):

        # notification wants to be recorded
        if notification.get('record', False) and self.running:
            self.data['notifications'].append(notification)

        # Remote has started recording, we should start as well.
        elif notification['subject'] == 'rec_started' and notification.get(
                'source', 'local') != 'local':
            if self.running:
                logger.warning('Recording is already running!')
            else:
                self.set_session_name(notification["session_name"])
                self.start(network_propagate=False)
        # Remote has stopped recording, we should stop as well.
        elif notification['subject'] == 'rec_stopped' and notification.get(
                'source', 'local') != 'local':
            if self.running:
                self.stop(network_propagate=False)
            else:
                logger.warning('Recording is already stopped!')

    def get_rec_time_str(self):
        rec_time = gmtime(time() - self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self, network_propagate=True):
        self.timestamps = []
        self.data = {
            'pupil_positions': [],
            'gaze_positions': [],
            'notifications': []
        }
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir %s" % session)

        except:
            logger.debug(
                "Recordings session dir %s already exists, using it." %
                session)

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "%03d/" % counter)
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir %s" % self.rec_path)
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w') as f:
            f.write("Recording Name\t" + self.session_name + "\n")
            f.write("Start Date\t" +
                    strftime("%d.%m.%Y", localtime(self.start_time)) + "\n")
            f.write("Start Time\t" +
                    strftime("%H:%M:%S", localtime(self.start_time)) + "\n")

        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(
                audio_path, self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,
                                      self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,
                                    fps=self.g_pool.capture.frame_rate)
        # positions path to eye process
        if self.record_eye:
            for tx in self.g_pool.eye_tx:
                tx.send((self.rec_path, self.raw_jpeg))

        if self.show_info_menu:
            self.open_info_menu()

        self.notify_all({
            'subject': 'rec_started',
            'rec_path': self.rec_path,
            'session_name': self.session_name,
            'network_propagate': network_propagate
        })

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu('additional Recording Info',
                                         size=(300, 300),
                                         pos=(300, 300))
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.iterkeys():
                self.info_menu.insert(0, ui.Text_Input(name, self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(
            ui.Info_Text(
                'Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'
            ))
        self.info_menu.append(
            ui.Text_Input('user_info',
                          self,
                          setter=set_user_info,
                          label="User info"))
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def update(self, frame, events):
        if self.running:
            self.data['pupil_positions'] += events['pupil_positions']
            self.data['gaze_positions'] += events.get('gaze_positions', [])
            self.timestamps.append(frame.timestamp)
            self.writer.write_video_frame(frame)
            self.frame_count += 1

            # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))
            for p in events['pupil_positions']:
                pupil_pos = p['timestamp'], p['confidence'], p['id'], p[
                    'norm_pos'][0], p['norm_pos'][1], p['diameter']
                self.pupil_pos_list.append(pupil_pos)

            for g in events.get('gaze_positions', []):
                gaze_pos = g['timestamp'], g['confidence'], g['norm_pos'][
                    0], g['norm_pos'][1]
                self.gaze_pos_list.append(gaze_pos)

            self.button.status_text = self.get_rec_time_str()

    def stop(self, network_propagate=True):
        #explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        if self.record_eye:
            for tx in self.g_pool.eye_tx:
                try:
                    tx.send((None, None))
                except:
                    logger.warning(
                        "Could not stop eye-recording. Please report this bug!"
                    )

        save_object(self.data, os.path.join(self.rec_path, "pupil_data"))

        gaze_list_path = os.path.join(self.rec_path, "gaze_positions.npy")
        np.save(gaze_list_path, np.asarray(self.gaze_pos_list))

        pupil_list_path = os.path.join(self.rec_path, "pupil_positions.npy")
        np.save(pupil_list_path, np.asarray(self.pupil_pos_list))

        timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
        # ts = sanitize_timestamps(np.array(self.timestamps))
        ts = np.array(self.timestamps)
        np.save(timestamps_path, ts)

        try:
            copy2(os.path.join(self.g_pool.user_dir, "surface_definitions"),
                  os.path.join(self.rec_path, "surface_definitions"))
        except:
            logger.info(
                "No surface_definitions data found. You may want this if you do marker tracking."
            )

        try:
            copy2(os.path.join(self.g_pool.user_dir, "cal_pt_cloud.npy"),
                  os.path.join(self.rec_path, "cal_pt_cloud.npy"))
        except:
            logger.warning(
                "No calibration data found. Please calibrate first.")

        try:
            copy2(os.path.join(self.g_pool.user_dir, "camera_calibration"),
                  os.path.join(self.rec_path, "camera_calibration"))
        except:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a') as f:
                f.write("Duration Time\t" + self.get_rec_time_str() + "\n")
                if self.g_pool.binocular:
                    f.write("Eye Mode\tbinocular\n")
                else:
                    f.write("Eye Mode\tmonocular\n")
                f.write("Duration Time\t" + self.get_rec_time_str() + "\n")
                f.write("World Camera Frames\t" + str(self.frame_count) + "\n")
                f.write("World Camera Resolution\t" +
                        str(self.g_pool.capture.frame_size[0]) + "x" +
                        str(self.g_pool.capture.frame_size[1]) + "\n")
                f.write("Capture Software Version\t%s\n" % self.g_pool.version)
                if platform.system() == "Windows":
                    username = os.environ["USERNAME"]
                    sysname, nodename, release, version, machine, _ = platform.uname(
                    )
                else:
                    username = getpass.getuser()
                    try:
                        sysname, nodename, release, version, machine = os.uname(
                        )
                    except:
                        sysname, nodename, release, version, machine = sys.platform, None, None, None, None
                f.write("User\t" + username + "\n")
                f.write("Platform\t" + sysname + "\n")
                f.write("Machine\t" + nodename + "\n")
                f.write("Release\t" + release + "\n")
                f.write("Version\t" + version + "\n")
        except Exception:
            logger.exception(
                "Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w') as f:
                for name, val in self.user_info.iteritems():
                    f.write("%s\t%s\n" % (name, val))
        except Exception:
            logger.exception(
                "Could not save userdata. Please report this bug!")

        self.close_info_menu()

        if self.audio_writer:
            self.audio_writer = None

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.timestamps = []
        self.data = {'pupil_positions': [], 'gaze_positions': []}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        self.notify_all({
            'subject': 'rec_stopped',
            'rec_path': self.rec_path,
            'network_propagate': network_propagate
        })

    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()
        self.deinit_gui()

    def verify_path(self, val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '%s'." % n_path)
            return False
        else:
            return n_path

    def set_rec_dir(self, val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if '/' in val:
                logger.warning(
                    'You session name will create one or more subdirectories')
            self.session_name = val
예제 #31
0
    def start(self):
        session = os.path.join(self.rec_root_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug(
                "Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".
                format(session))
            return

        self.pldata_writers = {}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(
                    self.rec_path))
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, "w", newline="",
                  encoding="utf-8") as csvfile:
            csv_utils.write_key_value_file(
                csvfile,
                {
                    "Recording Name": self.session_name,
                    "Start Date": strftime("%d.%m.%Y",
                                           localtime(self.start_time)),
                    "Start Time": strftime("%H:%M:%S",
                                           localtime(self.start_time)),
                    "Start Time (System)": self.start_time,
                    "Start Time (Synced)": start_time_synced,
                },
            )

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path,
                                      self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, "h264_buffer"):
            self.writer = H264Writer(
                self.video_path,
                self.g_pool.capture.frame_size[0],
                self.g_pool.capture.frame_size[1],
                int(self.g_pool.capture.frame_rate),
            )
        else:
            self.writer = AV_Writer(self.video_path,
                                    fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir,
                                       "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {
                "subject": "calibration.calibration_data",
                "record": True
            }
            notification.update(cal_data)
            notification["topic"] = "notify." + notification["subject"]

            writer = PLData_Writer(self.rec_path, "notify")
            writer.append(notification)
            self.pldata_writers["notify"] = writer
        except FileNotFoundError:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all({
            "subject": "recording.started",
            "rec_path": self.rec_path,
            "session_name": self.session_name,
            "record_eye": self.record_eye,
            "compression": self.raw_jpeg,
        })
예제 #32
0
class Recorder(System_Plugin_Base):
    """Capture Recorder"""

    icon_chr = chr(0xE04B)
    icon_font = "pupil_icons"
    warning_low_disk_space_th = 5.0  # threshold in GB
    stop_rec_low_disk_space_th = 1.0  # threshold in GB

    def __init__(
        self,
        g_pool,
        session_name=get_auto_name(),
        rec_root_dir=None,
        user_info={"name": "", "additional_field": "change_me"},
        info_menu_conf={},
        show_info_menu=False,
        record_eye=True,
        raw_jpeg=True,
    ):
        super().__init__(g_pool)
        # update name if it was autogenerated.
        if session_name.startswith("20") and len(session_name) == 10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep, 1)[0]
        default_rec_root_dir = os.path.join(base_dir, "recordings")

        if (
            rec_root_dir
            and rec_root_dir != default_rec_root_dir
            and self.verify_path(rec_root_dir)
        ):
            self.rec_root_dir = rec_root_dir
        else:
            try:
                os.makedirs(default_rec_root_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info(
                    'Created standard Rec dir at "{}"'.format(default_rec_root_dir)
                )
            self.rec_root_dir = default_rec_root_dir

        self.raw_jpeg = raw_jpeg
        self.order = 0.9
        self.record_eye = record_eye
        self.session_name = session_name
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf

        self.low_disk_space_thumb = None
        check_timer = timer(1.0)
        self.check_space = lambda: next(check_timer)

    def get_init_dict(self):
        d = {}
        d["record_eye"] = self.record_eye
        d["session_name"] = self.session_name
        d["user_info"] = self.user_info
        d["info_menu_conf"] = self.info_menu_conf
        d["show_info_menu"] = self.show_info_menu
        d["rec_root_dir"] = self.rec_root_dir
        d["raw_jpeg"] = self.raw_jpeg
        return d

    def init_ui(self):
        self.add_menu()
        self.menu.label = "Recorder"
        self.menu_icon.order = 0.29

        self.menu.append(
            ui.Info_Text(
                'Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'
            )
        )
        self.menu.append(
            ui.Info_Text(
                'Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'
            )
        )
        self.menu.append(
            ui.Text_Input(
                "rec_root_dir",
                self,
                setter=self.set_rec_root_dir,
                label="Path to recordings",
            )
        )
        self.menu.append(
            ui.Text_Input(
                "session_name",
                self,
                setter=self.set_session_name,
                label="Recording session name",
            )
        )
        self.menu.append(
            ui.Switch(
                "show_info_menu",
                self,
                on_val=True,
                off_val=False,
                label="Request additional user info",
            )
        )
        self.menu.append(
            ui.Selector(
                "raw_jpeg",
                self,
                selection=[True, False],
                labels=["bigger file, less CPU", "smaller file, more CPU"],
                label="Compression",
            )
        )
        self.menu.append(
            ui.Info_Text(
                "Recording the raw eye video is optional. We use it for debugging."
            )
        )
        self.menu.append(
            ui.Switch(
                "record_eye", self, on_val=True, off_val=False, label="Record eye"
            )
        )
        self.button = ui.Thumb(
            "running", self, setter=self.toggle, label="R", hotkey="r"
        )
        self.button.on_color[:] = (1, 0.0, 0.0, 0.8)
        self.g_pool.quickbar.insert(2, self.button)

        self.low_disk_space_thumb = ui.Thumb(
            "low_disk_warn", label="!", getter=lambda: True, setter=lambda x: None
        )
        self.low_disk_space_thumb.on_color[:] = (1, 0.0, 0.0, 0.8)
        self.low_disk_space_thumb.status_text = "Low disk space"

    def deinit_ui(self):
        if self.low_disk_space_thumb in self.g_pool.quickbar:
            self.g_pool.quickbar.remove(self.low_disk_space_thumb)
        self.g_pool.quickbar.remove(self.button)
        self.button = None
        self.remove_menu()

    def toggle(self, _=None):
        if self.running:
            self.notify_all({"subject": "recording.should_stop"})
            self.notify_all(
                {"subject": "recording.should_stop", "remote_notify": "all"}
            )
        else:
            self.notify_all(
                {"subject": "recording.should_start", "session_name": self.session_name}
            )
            self.notify_all(
                {
                    "subject": "recording.should_start",
                    "session_name": self.session_name,
                    "remote_notify": "all",
                }
            )

    def on_notify(self, notification):
        """Handles recorder notifications

        Reacts to notifications:
            ``recording.should_start``: Starts a new recording session.
                fields:
                - 'session_name' change session name
                    start with `/` to ingore the rec base dir and start from root instead.
                - `record_eye` boolean that indicates recording of the eyes, defaults to current setting
            ``recording.should_stop``: Stops current recording session

        Emits notifications:
            ``recording.started``: New recording session started
            ``recording.stopped``: Current recording session stopped

        Args:
            notification (dictionary): Notification dictionary
        """
        # notification wants to be recorded
        if notification.get("record", False) and self.running:
            if "timestamp" not in notification:
                logger.error("Notification without timestamp will not be saved.")
            else:
                notification["topic"] = "notify." + notification["subject"]
                try:
                    writer = self.pldata_writers["notify"]
                except KeyError:
                    writer = PLData_Writer(self.rec_path, "notify")
                    self.pldata_writers["notify"] = writer
                writer.append(notification)

        elif notification["subject"] == "recording.should_start":
            if self.running:
                logger.info("Recording already running!")
            else:
                self.record_eye = notification.get("record_eye", self.record_eye)
                if notification.get("session_name", ""):
                    self.set_session_name(notification["session_name"])
                self.start()

        elif notification["subject"] == "recording.should_stop":
            if self.running:
                self.stop()
            else:
                logger.info("Recording already stopped!")

    def get_rec_time_str(self):
        rec_time = gmtime(time() - self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self):
        session = os.path.join(self.rec_root_dir, self.session_name)
        try:
            os.makedirs(session, exist_ok=True)
            logger.debug("Created new recordings session dir {}".format(session))
        except OSError:
            logger.error(
                "Could not start recording. Session dir {} not writable.".format(
                    session
                )
            )
            return

        self.pldata_writers = {}
        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()
        start_time_synced = self.g_pool.get_timestamp()
        recording_uuid = uuid.uuid4()

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "{:03d}/".format(counter))
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir {}".format(self.rec_path))
                break
            except:
                logger.debug(
                    "We dont want to overwrite data, incrementing counter & trying to make new data folder"
                )
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, "w", newline="", encoding="utf-8") as csvfile:
            csv_utils.write_key_value_file(
                csvfile,
                {
                    "Recording Name": self.session_name,
                    "Start Date": strftime("%d.%m.%Y", localtime(self.start_time)),
                    "Start Time": strftime("%H:%M:%S", localtime(self.start_time)),
                    "Start Time (System)": self.start_time,
                    "Start Time (Synced)": start_time_synced,
                    "Recording UUID": recording_uuid,
                },
            )

        self.video_path = os.path.join(self.rec_path, "world.mp4")
        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.writer = JPEG_Writer(self.video_path, self.g_pool.capture.frame_rate)
        elif hasattr(self.g_pool.capture._recent_frame, "h264_buffer"):
            self.writer = H264Writer(
                self.video_path,
                self.g_pool.capture.frame_size[0],
                self.g_pool.capture.frame_size[1],
                int(self.g_pool.capture.frame_rate),
            )
        else:
            self.writer = AV_Writer(self.video_path, fps=self.g_pool.capture.frame_rate)

        try:
            cal_pt_path = os.path.join(self.g_pool.user_dir, "user_calibration_data")
            cal_data = load_object(cal_pt_path)
            notification = {"subject": "calibration.calibration_data", "record": True}
            notification.update(cal_data)
            notification["topic"] = "notify." + notification["subject"]

            writer = PLData_Writer(self.rec_path, "notify")
            writer.append(notification)
            self.pldata_writers["notify"] = writer
        except FileNotFoundError:
            pass

        if self.show_info_menu:
            self.open_info_menu()
        logger.info("Started Recording.")
        self.notify_all(
            {
                "subject": "recording.started",
                "rec_path": self.rec_path,
                "session_name": self.session_name,
                "record_eye": self.record_eye,
                "compression": self.raw_jpeg,
            }
        )

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu(
            "additional Recording Info", size=(300, 300), pos=(300, 300)
        )
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.keys():
                self.info_menu.insert(0, ui.Text_Input(name, self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(
            ui.Info_Text(
                'Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'
            )
        )
        self.info_menu.append(
            ui.Text_Input("user_info", self, setter=set_user_info, label="User info")
        )
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def recent_events(self, events):

        if self.check_space():
            disk_space = available_gb(self.rec_root_dir)
            if (
                disk_space < self.warning_low_disk_space_th
                and self.low_disk_space_thumb not in self.g_pool.quickbar
            ):
                self.g_pool.quickbar.append(self.low_disk_space_thumb)
            elif (
                disk_space >= self.warning_low_disk_space_th
                and self.low_disk_space_thumb in self.g_pool.quickbar
            ):
                self.g_pool.quickbar.remove(self.low_disk_space_thumb)

            if self.running and disk_space <= self.stop_rec_low_disk_space_th:
                self.stop()
                logger.error("Recording was stopped due to low disk space!")

        if self.running:
            for key, data in events.items():
                if key not in ("dt", "depth_frame") and not key.startswith("frame"):
                    try:
                        writer = self.pldata_writers[key]
                    except KeyError:
                        writer = PLData_Writer(self.rec_path, key)
                        self.pldata_writers[key] = writer
                    writer.extend(data)
            if "frame" in events:
                frame = events["frame"]
                self.writer.write_video_frame(frame)
                self.frame_count += 1

            # # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))

            self.button.status_text = self.get_rec_time_str()

    def stop(self):
        # explicit release of VideoWriter
        try:
            self.writer.release()
        except RuntimeError:
            logger.error("No world video recorded")
        else:
            logger.debug("Closed media container")
            self.g_pool.capture.intrinsics.save(self.rec_path, custom_name="world")
        finally:
            self.writer = None

        # save_object(self.data, os.path.join(self.rec_path, "pupil_data"))
        for writer in self.pldata_writers.values():
            writer.close()

        del self.pldata_writers

        try:
            copy2(
                os.path.join(self.g_pool.user_dir, "surface_definitions"),
                os.path.join(self.rec_path, "surface_definitions"),
            )
        except:
            logger.info(
                "No surface_definitions data found. You may want this if you do marker tracking."
            )

        try:
            with open(self.meta_info_path, "a", newline="") as csvfile:
                csv_utils.write_key_value_file(
                    csvfile,
                    {
                        "Duration Time": self.get_rec_time_str(),
                        "World Camera Frames": self.frame_count,
                        "World Camera Resolution": str(
                            self.g_pool.capture.frame_size[0]
                        )
                        + "x"
                        + str(self.g_pool.capture.frame_size[1]),
                        "Capture Software Version": self.g_pool.version,
                        "Data Format Version": self.g_pool.version,
                        "System Info": get_system_info(),
                    },
                    append=True,
                )
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(
                os.path.join(self.rec_path, "user_info.csv"), "w", newline=""
            ) as csvfile:
                csv_utils.write_key_value_file(csvfile, self.user_info)
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")

        self.close_info_menu()

        self.running = False
        if self.menu:
            self.menu.read_only = False
            self.button.status_text = ""

        logger.info("Saved Recording.")
        self.notify_all({"subject": "recording.stopped", "rec_path": self.rec_path})

    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()

    def verify_path(self, val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '{}'.".format(n_path))
            return False
        else:
            return n_path

    def set_rec_root_dir(self, val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_root_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if os.path.sep in val:
                logger.warning(
                    "You session name will create one or more subdirectories"
                )
            self.session_name = val
예제 #33
0
def export(should_terminate, frames_to_export, current_frame, rec_dir, user_dir, min_data_confidence,
           start_frame=None, end_frame=None, plugin_initializers=(), out_file_path=None,pupil_data=None):

    vis_plugins = sorted([Vis_Circle,Vis_Cross,Vis_Polyline,Vis_Light_Points,
        Vis_Watermark,Vis_Scan_Path,Vis_Eye_Video_Overlay], key=lambda x: x.__name__)
    analysis_plugins = sorted([Manual_Gaze_Correction, Pupil_Angle_3D_Fixation_Detector,
                               Gaze_Position_2D_Fixation_Detector], key=lambda x: x.__name__)
    user_plugins = sorted(import_runtime_plugins(os.path.join(user_dir, 'plugins')), key=lambda x: x.__name__)

    available_plugins = vis_plugins + analysis_plugins + user_plugins
    name_by_index = [p.__name__ for p in available_plugins]
    plugin_by_name = dict(zip(name_by_index, available_plugins))

    logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()))

    update_recording_to_recent(rec_dir)

    video_path = [f for f in glob(os.path.join(rec_dir, "world.*")) if f[-3:] in ('mp4', 'mkv', 'avi')][0]
    timestamps_path = os.path.join(rec_dir, "world_timestamps.npy")
    pupil_data_path = os.path.join(rec_dir, "pupil_data")

    meta_info = load_meta_info(rec_dir)
    rec_version = read_rec_version(meta_info)

    g_pool = Global_Container()
    g_pool.app = 'exporter'
    g_pool.min_data_confidence = min_data_confidence
    timestamps = np.load(timestamps_path)
    cap = File_Source(g_pool, video_path, timestamps=timestamps)

    # Out file path verification, we do this before but if one uses a seperate tool, this will kick in.
    if out_file_path is None:
        out_file_path = os.path.join(rec_dir, "world_viz.mp4")
    else:
        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        if not dir_name:
            dir_name = rec_dir
        if not file_name:
            file_name = 'world_viz.mp4'
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

    if os.path.isfile(out_file_path):
        logger.warning("Video out file already exsists. I will overwrite!")
        os.remove(out_file_path)
    logger.debug("Saving Video to {}".format(out_file_path))

    # Trim mark verification
    # make sure the trim marks (start frame, endframe) make sense:
    # We define them like python list slices, thus we can test them like such.
    trimmed_timestamps = timestamps[start_frame:end_frame]
    if len(trimmed_timestamps) == 0:
        logger.warn("Start and end frames are set such that no video will be exported.")
        return False

    if start_frame is None:
        start_frame = 0

    # these two vars are shared with the lauching process and give a job length and progress report.
    frames_to_export.value = len(trimmed_timestamps)
    current_frame.value = 0
    exp_info = "Will export from frame {} to frame {}. This means I will export {} frames."
    logger.debug(exp_info.format(start_frame, start_frame + frames_to_export.value, frames_to_export.value))

    # setup of writer
    writer = AV_Writer(out_file_path, fps=cap.frame_rate, use_timestamps=True)

    cap.seek_to_frame(start_frame)

    start_time = time()

    g_pool.capture = cap
    g_pool.rec_dir = rec_dir
    g_pool.user_dir = user_dir
    g_pool.rec_version = rec_version
    g_pool.timestamps = timestamps
    g_pool.delayed_notifications = {}
    g_pool.notifications = []
    # load pupil_positions, gaze_positions
    pupil_data = pupil_data or load_object(pupil_data_path)
    pupil_list = pupil_data['pupil_positions']
    gaze_list = pupil_data['gaze_positions']

    g_pool.pupil_positions_by_frame = correlate_data(pupil_list, g_pool.timestamps)
    g_pool.gaze_positions_by_frame = correlate_data(gaze_list, g_pool.timestamps)
    g_pool.fixations_by_frame = [[] for x in g_pool.timestamps]  # populated by the fixation detector plugin

    # add plugins
    g_pool.plugins = Plugin_List(g_pool, plugin_by_name, plugin_initializers)

    while frames_to_export.value > current_frame.value:

        if should_terminate.value:
            logger.warning("User aborted export. Exported {} frames to {}.".format(current_frame.value, out_file_path))

            # explicit release of VideoWriter
            writer.close()
            writer = None
            return False

        try:
            frame = cap.get_frame()
        except EndofVideoFileError:
            break

        events = {'frame':frame}
        # new positons and events
        events['gaze_positions'] = g_pool.gaze_positions_by_frame[frame.index]
        events['pupil_positions'] = g_pool.pupil_positions_by_frame[frame.index]

        # publish delayed notifiactions when their time has come.
        for n in list(g_pool.delayed_notifications.values()):
            if n['_notify_time_'] < time():
                del n['_notify_time_']
                del g_pool.delayed_notifications[n['subject']]
                g_pool.notifications.append(n)

        # notify each plugin if there are new notifactions:
        while g_pool.notifications:
            n = g_pool.notifications.pop(0)
            for p in g_pool.plugins:
                p.on_notify(n)

        # allow each Plugin to do its work.
        for p in g_pool.plugins:
            p.recent_events(events)

        writer.write_video_frame(frame)
        current_frame.value += 1

    writer.close()
    writer = None

    duration = time()-start_time
    effective_fps = float(current_frame.value)/duration

    result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
    logger.info(result.format(current_frame.value, out_file_path, duration, effective_fps))
    return True
예제 #34
0
파일: recorder.py 프로젝트: lloves/pupil
class Recorder(Plugin):
    """Capture Recorder"""
    def __init__(self,g_pool,session_name = get_auto_name(),rec_dir=None, user_info={'name':'','additional_field':'change_me'},info_menu_conf={},show_info_menu=False, record_eye = False, audio_src = 'No Audio',raw_jpeg=False):
        super(Recorder, self).__init__(g_pool)
        #update name if it was autogenerated.
        if session_name.startswith('20') and len(session_name)==10:
            session_name = get_auto_name()

        base_dir = self.g_pool.user_dir.rsplit(os.path.sep,1)[0]
        default_rec_dir = os.path.join(base_dir,'recordings')

        if rec_dir and rec_dir != default_rec_dir and self.verify_path(rec_dir):
            self.rec_dir = rec_dir
        else:
            try:
                os.makedirs(default_rec_dir)
            except OSError as e:
                if e.errno != errno.EEXIST:
                    logger.error("Could not create Rec dir")
                    raise e
            else:
                logger.info('Created standard Rec dir at "%s"'%default_rec_dir)
            self.rec_dir = default_rec_dir

        self.raw_jpeg = raw_jpeg
        self.order = .9
        self.record_eye = record_eye
        self.session_name = session_name
        self.audio_devices_dict = Audio_Input_Dict()
        if audio_src in self.audio_devices_dict.keys():
            self.audio_src = audio_src
        else:
            self.audio_src = 'No Audio'
        self.running = False
        self.menu = None
        self.button = None

        self.user_info = user_info
        self.show_info_menu = show_info_menu
        self.info_menu = None
        self.info_menu_conf = info_menu_conf


    def get_init_dict(self):
        d = {}
        d['record_eye'] = self.record_eye
        d['audio_src'] = self.audio_src
        d['session_name'] = self.session_name
        d['user_info'] = self.user_info
        d['info_menu_conf'] = self.info_menu_conf
        d['show_info_menu'] = self.show_info_menu
        d['rec_dir'] = self.rec_dir
        d['raw_jpeg'] = self.raw_jpeg
        return d


    def init_gui(self):
        self.menu = ui.Growing_Menu('Recorder')
        self.g_pool.sidebar.insert(3,self.menu)
        self.menu.append(ui.Info_Text('Pupil recordings are saved like this: "path_to_recordings/recording_session_name/nnn" where "nnn" is an increasing number to avoid overwrites. You can use "/" in your session name to create subdirectories.'))
        self.menu.append(ui.Info_Text('Recordings are saved to "~/pupil_recordings". You can change the path here but note that invalid input will be ignored.'))
        self.menu.append(ui.Text_Input('rec_dir',self,setter=self.set_rec_dir,label='Path to recordings'))
        self.menu.append(ui.Text_Input('session_name',self,setter=self.set_session_name,label='Recording session name'))
        self.menu.append(ui.Switch('show_info_menu',self,on_val=True,off_val=False,label='Request additional user info'))
        self.menu.append(ui.Selector('raw_jpeg',self,selection = [True,False], labels=["bigger file, less CPU", "smaller file, more CPU"],label='compression'))
        self.menu.append(ui.Info_Text('Recording the raw eye video is optional. We use it for debugging.'))
        self.menu.append(ui.Switch('record_eye',self,on_val=True,off_val=False,label='Record eye'))
        self.menu.append(ui.Selector('audio_src',self, selection=self.audio_devices_dict.keys()))

        self.button = ui.Thumb('running',self,setter=self.toggle,label='Record',hotkey='r')
        self.button.on_color[:] = (1,.0,.0,.8)
        self.g_pool.quickbar.insert(1,self.button)


    def deinit_gui(self):
        if self.menu:
            self.g_pool.sidebar.remove(self.menu)
            self.menu = None
        if self.button:
            self.g_pool.quickbar.remove(self.button)
            self.button = None



    def toggle(self, _=None):
        if self.running:
            self.stop()
        else:
            self.start()

    def on_notify(self,notification):
        if notification['subject'] == 'rec_should_start':
            if self.running:
                logger.warning('Recording is already running!')
            else:
                self.set_session_name(notification["session_name"])
                self.start(network_propagate=notification.get('network_propagate',True))
        elif notification['subject'] == 'rec_should_stop':
            if self.running:
                self.stop(network_propagate=notification.get('network_propagate',True))
            else:
                logger.warning('Recording is already stopped!')


    def get_rec_time_str(self):
        rec_time = gmtime(time()-self.start_time)
        return strftime("%H:%M:%S", rec_time)

    def start(self,network_propagate=True):
        self.timestamps = []
        self.data = {'pupil_positions':[],'gaze_positions':[]}
        self.pupil_pos_list = []
        self.gaze_pos_list = []

        self.frame_count = 0
        self.running = True
        self.menu.read_only = True
        self.start_time = time()

        session = os.path.join(self.rec_dir, self.session_name)
        try:
            os.makedirs(session)
            logger.debug("Created new recordings session dir %s"%session)

        except:
            logger.debug("Recordings session dir %s already exists, using it." %session)

        # set up self incrementing folder within session folder
        counter = 0
        while True:
            self.rec_path = os.path.join(session, "%03d/" % counter)
            try:
                os.mkdir(self.rec_path)
                logger.debug("Created new recording dir %s"%self.rec_path)
                break
            except:
                logger.debug("We dont want to overwrite data, incrementing counter & trying to make new data folder")
                counter += 1

        self.meta_info_path = os.path.join(self.rec_path, "info.csv")

        with open(self.meta_info_path, 'w') as f:
            f.write("Recording Name\t"+self.session_name+ "\n")
            f.write("Start Date\t"+ strftime("%d.%m.%Y", localtime(self.start_time))+ "\n")
            f.write("Start Time\t"+ strftime("%H:%M:%S", localtime(self.start_time))+ "\n")


        if self.audio_src != 'No Audio':
            audio_path = os.path.join(self.rec_path, "world.wav")
            self.audio_writer = Audio_Capture(audio_path,self.audio_devices_dict[self.audio_src])
        else:
            self.audio_writer = None

        if self.raw_jpeg and self.g_pool.capture.jpeg_support:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = JPEG_Writer(self.video_path,self.g_pool.capture.frame_rate)
        else:
            self.video_path = os.path.join(self.rec_path, "world.mp4")
            self.writer = AV_Writer(self.video_path,fps=self.g_pool.capture.frame_rate)
        # positions path to eye process
        if self.record_eye:
            for tx in self.g_pool.eye_tx:
                tx.send((self.rec_path,self.raw_jpeg))

        if self.show_info_menu:
            self.open_info_menu()

        self.notify_all( {'subject':'rec_started','rec_path':self.rec_path,'session_name':self.session_name,'network_propagate':network_propagate} )

    def open_info_menu(self):
        self.info_menu = ui.Growing_Menu('additional Recording Info',size=(300,300),pos=(300,300))
        self.info_menu.configuration = self.info_menu_conf

        def populate_info_menu():
            self.info_menu.elements[:-2] = []
            for name in self.user_info.iterkeys():
                self.info_menu.insert(0,ui.Text_Input(name,self.user_info))

        def set_user_info(new_string):
            self.user_info = new_string
            populate_info_menu()

        populate_info_menu()
        self.info_menu.append(ui.Info_Text('Use the *user info* field to add/remove additional fields and their values. The format must be a valid Python dictionary. For example -- {"key":"value"}. You can add as many fields as you require. Your custom fields will be saved for your next session.'))
        self.info_menu.append(ui.Text_Input('user_info',self,setter=set_user_info,label="User info"))
        self.g_pool.gui.append(self.info_menu)

    def close_info_menu(self):
        if self.info_menu:
            self.info_menu_conf = self.info_menu.configuration
            self.g_pool.gui.remove(self.info_menu)
            self.info_menu = None

    def update(self,frame,events):
        if self.running:
            self.data['pupil_positions'] += events['pupil_positions']
            self.data['gaze_positions'] += events['gaze_positions']
            self.timestamps.append(frame.timestamp)
            self.writer.write_video_frame(frame)
            self.frame_count += 1

            # cv2.putText(frame.img, "Frame %s"%self.frame_count,(200,200), cv2.FONT_HERSHEY_SIMPLEX,1,(255,100,100))
            for p in events['pupil_positions']:
                pupil_pos = p['timestamp'],p['confidence'],p['id'],p['norm_pos'][0],p['norm_pos'][1],p['diameter']
                self.pupil_pos_list.append(pupil_pos)

            for g in events.get('gaze_positions',[]):
                gaze_pos = g['timestamp'],g['confidence'],g['norm_pos'][0],g['norm_pos'][1]
                self.gaze_pos_list.append(gaze_pos)

            self.button.status_text = self.get_rec_time_str()

    def stop(self,network_propagate=True):
        #explicit release of VideoWriter
        self.writer.release()
        self.writer = None

        if self.record_eye:
            for tx in self.g_pool.eye_tx:
                try:
                    tx.send((None,None))
                except:
                    logger.warning("Could not stop eye-recording. Please report this bug!")

        save_object(self.data,os.path.join(self.rec_path, "pupil_data"))

        gaze_list_path = os.path.join(self.rec_path, "gaze_positions.npy")
        np.save(gaze_list_path,np.asarray(self.gaze_pos_list))

        pupil_list_path = os.path.join(self.rec_path, "pupil_positions.npy")
        np.save(pupil_list_path,np.asarray(self.pupil_pos_list))

        timestamps_path = os.path.join(self.rec_path, "world_timestamps.npy")
        # ts = sanitize_timestamps(np.array(self.timestamps))
        ts = np.array(self.timestamps)
        np.save(timestamps_path,ts)

        try:
            copy2(os.path.join(self.g_pool.user_dir,"surface_definitions"),os.path.join(self.rec_path,"surface_definitions"))
        except:
            logger.info("No surface_definitions data found. You may want this if you do marker tracking.")

        try:
            copy2(os.path.join(self.g_pool.user_dir,"cal_pt_cloud.npy"),os.path.join(self.rec_path,"cal_pt_cloud.npy"))
        except:
            logger.warning("No calibration data found. Please calibrate first.")

        try:
            copy2(os.path.join(self.g_pool.user_dir,"camera_calibration"),os.path.join(self.rec_path,"camera_calibration"))
        except:
            logger.info("No camera calibration found.")

        try:
            with open(self.meta_info_path, 'a') as f:
                f.write("Duration Time\t"+ self.get_rec_time_str()+ "\n")
                if self.g_pool.binocular:
                    f.write("Eye Mode\tbinocular\n")
                else:
                    f.write("Eye Mode\tmonocular\n")
                f.write("Duration Time\t"+ self.get_rec_time_str()+ "\n")
                f.write("World Camera Frames\t"+ str(self.frame_count)+ "\n")
                f.write("World Camera Resolution\t"+ str(self.g_pool.capture.frame_size[0])+"x"+str(self.g_pool.capture.frame_size[1])+"\n")
                f.write("Capture Software Version\t%s\n"%self.g_pool.version)
                if platform.system() == "Windows":
                    username = os.environ["USERNAME"]
                    sysname, nodename, release, version, machine, _ = platform.uname()
                else:
                    username = getpass.getuser()
                    try:
                        sysname, nodename, release, version, machine = os.uname()
                    except:
                        sysname, nodename, release, version, machine = sys.platform,None,None,None,None
                f.write("User\t"+username+"\n")
                f.write("Platform\t"+sysname+"\n")
                f.write("Machine\t"+nodename+"\n")
                f.write("Release\t"+release+"\n")
                f.write("Version\t"+version+"\n")
        except Exception:
            logger.exception("Could not save metadata. Please report this bug!")

        try:
            with open(os.path.join(self.rec_path, "user_info.csv"), 'w') as f:
                for name,val in self.user_info.iteritems():
                    f.write("%s\t%s\n"%(name,val))
        except Exception:
            logger.exception("Could not save userdata. Please report this bug!")


        self.close_info_menu()

        if self.audio_writer:
            self.audio_writer = None

        self.running = False
        self.menu.read_only = False
        self.button.status_text = ''

        self.timestamps = []
        self.data = {'pupil_positions':[],'gaze_positions':[]}
        self.pupil_pos_list = []
        self.gaze_pos_list = []


        self.notify_all( {'subject':'rec_stopped','rec_path':self.rec_path,'network_propagate':network_propagate} )



    def cleanup(self):
        """gets called when the plugin get terminated.
           either volunatily or forced.
        """
        if self.running:
            self.stop()
        self.deinit_gui()

    def verify_path(self,val):
        try:
            n_path = os.path.expanduser(val)
            logger.debug("Expanded user path.")
        except:
            n_path = val
        if not n_path:
            logger.warning("Please specify a path.")
            return False
        elif not os.path.isdir(n_path):
            logger.warning("This is not a valid path.")
            return False
        # elif not os.access(n_path, os.W_OK):
        elif not writable_dir(n_path):
            logger.warning("Do not have write access to '%s'."%n_path)
            return False
        else:
            return n_path

    def set_rec_dir(self,val):
        n_path = self.verify_path(val)
        if n_path:
            self.rec_dir = n_path

    def set_session_name(self, val):
        if not val:
            self.session_name = get_auto_name()
        else:
            if '/' in val:
                logger.warning('You session name will create one or more subdirectories')
            self.session_name = val
예제 #35
0
def _export_world_video(
    rec_dir,
    user_dir,
    min_data_confidence,
    start_frame,
    end_frame,
    plugin_initializers,
    out_file_path,
    pre_computed_eye_data,
):
    """
    Simulates the generation for the world video and saves a certain time range as a video.
    It simulates a whole g_pool such that all plugins run as normal.
    """
    from glob import glob
    from time import time

    import file_methods as fm
    import player_methods as pm
    from av_writer import AV_Writer

    # we are not importing manual gaze correction. In Player corrections have already been applied.
    # in batch exporter this plugin makes little sense.
    from fixation_detector import Offline_Fixation_Detector
    from eye_movement import Offline_Eye_Movement_Detector

    # Plug-ins
    from plugin import Plugin_List, import_runtime_plugins
    from video_capture import EndofVideoError, File_Source
    from video_overlay.plugins import Video_Overlay, Eye_Overlay
    from vis_circle import Vis_Circle
    from vis_cross import Vis_Cross
    from vis_light_points import Vis_Light_Points
    from vis_polyline import Vis_Polyline
    from vis_scan_path import Vis_Scan_Path
    from vis_watermark import Vis_Watermark

    PID = str(os.getpid())
    logger = logging.getLogger(__name__ + " with pid: " + PID)
    start_status = "Starting video export with pid: {}".format(PID)
    logger.info(start_status)
    yield start_status, 0

    try:
        vis_plugins = sorted(
            [
                Vis_Circle,
                Vis_Cross,
                Vis_Polyline,
                Vis_Light_Points,
                Vis_Watermark,
                Vis_Scan_Path,
                Eye_Overlay,
                Video_Overlay,
            ],
            key=lambda x: x.__name__,
        )
        analysis_plugins = [Offline_Fixation_Detector, Offline_Eye_Movement_Detector]
        user_plugins = sorted(
            import_runtime_plugins(os.path.join(user_dir, "plugins")),
            key=lambda x: x.__name__,
        )

        available_plugins = vis_plugins + analysis_plugins + user_plugins
        name_by_index = [p.__name__ for p in available_plugins]
        plugin_by_name = dict(zip(name_by_index, available_plugins))

        meta_info = pm.load_meta_info(rec_dir)

        g_pool = GlobalContainer()
        g_pool.app = "exporter"
        g_pool.min_data_confidence = min_data_confidence

        valid_ext = (".mp4", ".mkv", ".avi", ".h264", ".mjpeg", ".fake")
        try:
            video_path = next(
                f
                for f in glob(os.path.join(rec_dir, "world.*"))
                if os.path.splitext(f)[1] in valid_ext
            )
        except StopIteration:
            raise FileNotFoundError("No Video world found")
        cap = File_Source(g_pool, source_path=video_path, fill_gaps=True, timing=None)

        timestamps = cap.timestamps

        file_name = os.path.basename(out_file_path)
        dir_name = os.path.dirname(out_file_path)
        out_file_path = os.path.expanduser(os.path.join(dir_name, file_name))

        if os.path.isfile(out_file_path):
            logger.warning("Video out file already exsists. I will overwrite!")
            os.remove(out_file_path)
        logger.debug("Saving Video to {}".format(out_file_path))

        # Trim mark verification
        # make sure the trim marks (start frame, end frame) make sense:
        # We define them like python list slices, thus we can test them like such.
        trimmed_timestamps = timestamps[start_frame:end_frame]
        if len(trimmed_timestamps) == 0:
            warn = "Start and end frames are set such that no video will be exported."
            logger.warning(warn)
            yield warn, 0.0
            return

        if start_frame is None:
            start_frame = 0

        # these two vars are shared with the launching process and give a job length and progress report.
        frames_to_export = len(trimmed_timestamps)
        current_frame = 0
        exp_info = (
            "Will export from frame {} to frame {}. This means I will export {} frames."
        )
        logger.debug(
            exp_info.format(
                start_frame, start_frame + frames_to_export, frames_to_export
            )
        )

        # setup of writer
        writer = AV_Writer(
            out_file_path, fps=cap.frame_rate, audio_dir=rec_dir, use_timestamps=True
        )

        cap.seek_to_frame(start_frame)

        start_time = time()

        g_pool.plugin_by_name = plugin_by_name
        g_pool.capture = cap
        g_pool.rec_dir = rec_dir
        g_pool.user_dir = user_dir
        g_pool.meta_info = meta_info
        g_pool.timestamps = timestamps
        g_pool.delayed_notifications = {}
        g_pool.notifications = []

        for initializers in pre_computed_eye_data.values():
            initializers["data"] = [
                fm.Serialized_Dict(msgpack_bytes=serialized)
                for serialized in initializers["data"]
            ]

        g_pool.pupil_positions = pm.Bisector(**pre_computed_eye_data["pupil"])
        g_pool.pupil_positions_by_id = (
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_0"]),
            pm.Bisector(**pre_computed_eye_data["pupil_by_id_1"]),
        )
        g_pool.gaze_positions = pm.Bisector(**pre_computed_eye_data["gaze"])
        g_pool.fixations = pm.Affiliator(**pre_computed_eye_data["fixations"])

        # add plugins
        g_pool.plugins = Plugin_List(g_pool, plugin_initializers)

        while frames_to_export > current_frame:
            try:
                frame = cap.get_frame()
            except EndofVideoError:
                break

            events = {"frame": frame}
            # new positions and events
            frame_window = pm.enclosing_window(g_pool.timestamps, frame.index)
            events["gaze"] = g_pool.gaze_positions.by_ts_window(frame_window)
            events["pupil"] = g_pool.pupil_positions.by_ts_window(frame_window)

            # publish delayed notifications when their time has come.
            for n in list(g_pool.delayed_notifications.values()):
                if n["_notify_time_"] < time():
                    del n["_notify_time_"]
                    del g_pool.delayed_notifications[n["subject"]]
                    g_pool.notifications.append(n)

            # notify each plugin if there are new notifications:
            while g_pool.notifications:
                n = g_pool.notifications.pop(0)
                for p in g_pool.plugins:
                    p.on_notify(n)

            # allow each Plugin to do its work.
            for p in g_pool.plugins:
                p.recent_events(events)

            writer.write_video_frame(frame)
            current_frame += 1
            yield "Exporting with pid {}".format(PID), current_frame

        writer.close(timestamp_export_format="all")

        duration = time() - start_time
        effective_fps = float(current_frame) / duration

        result = "Export done: Exported {} frames to {}. This took {} seconds. Exporter ran at {} frames per second."
        logger.info(
            result.format(current_frame, out_file_path, duration, effective_fps)
        )
        yield "Export done. This took {:.0f} seconds.".format(duration), current_frame

    except GeneratorExit:
        logger.warning("Video export with pid {} was canceled.".format(os.getpid()))