コード例 #1
0
class FrameFetcher:
    __slots__ = ("source", "current_frame")

    def __init__(self, video_path):
        self.source = File_Source(SimpleNamespace(),
                                  source_path=video_path,
                                  timing=None,
                                  fill_gaps=True)
        if not self.source.initialised:
            raise FileNotFoundError(video_path)
        self.current_frame = self.source.get_frame()

    def closest_frame_to_ts(self, ts):
        closest_idx = pm.find_closest(self.source.timestamps, ts)
        return self.frame_for_idx(closest_idx)

    def frame_for_idx(self, requested_frame_idx):
        if requested_frame_idx != self.current_frame.index:
            if requested_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame,
                # its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(int(requested_frame_idx))

            try:
                self.current_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info("End of video {}.".format(self.source.source_path))
        return self.current_frame
コード例 #2
0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )

    #  NOTE: Start time of the export recording will be synced with world recording
    #  export! This means that if the recording to export started later than the world
    #  video, the first frame of the exported recording will not be at timestamp 0 in
    #  the recording, but later. Some video players (e.g. VLC on windows) might display
    #  the video weirdly in this case, but we rather want syncronization between the
    #  exported video!
    start_time = export_window[0]
    writer = MPEG_Writer(output_file, start_time)

    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
コード例 #3
0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(SimpleNamespace(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps, (export_start, export_stop - 1))
    (export_from_index, export_to_index) = pm.find_closest(
        input_source.timestamps, export_window
    )
    writer = AV_Writer(
        output_file, fps=input_source.frame_rate, audio_dir=None, use_timestamps=True
    )
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index
            )
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
コード例 #4
0
def _convert_video_file(
    input_file,
    output_file,
    export_range,
    world_timestamps,
    process_frame,
    timestamp_export_format,
):
    yield "Export video", 0.0
    input_source = File_Source(EmptyGPool(), input_file, fill_gaps=True)
    if not input_source.initialised:
        yield "Exporting video failed", 0.0
        return

    # yield progress results two times per second
    update_rate = int(input_source.frame_rate / 2)

    export_start, export_stop = export_range  # export_stop is exclusive
    export_window = pm.exact_window(world_timestamps,
                                    (export_start, export_stop - 1))
    (export_from_index,
     export_to_index) = pm.find_closest(input_source.timestamps, export_window)
    writer = AV_Writer(output_file,
                       fps=input_source.frame_rate,
                       audio_dir=None,
                       use_timestamps=True)
    input_source.seek_to_frame(export_from_index)
    next_update_idx = export_from_index + update_rate
    while True:
        try:
            input_frame = input_source.get_frame()
        except EndofVideoError:
            break
        if input_frame.index >= export_to_index:
            break

        output_img = process_frame(input_source, input_frame)
        output_frame = input_frame
        output_frame._img = output_img  # it's ._img because .img has no setter
        writer.write_video_frame(output_frame)

        if input_source.get_frame_index() >= next_update_idx:
            progress = (input_source.get_frame_index() - export_from_index) / (
                export_to_index - export_from_index)
            yield "Exporting video", progress * 100.0
            next_update_idx += update_rate

    writer.close(timestamp_export_format)
    input_source.cleanup()
    yield "Exporting video completed", 100.0
コード例 #5
0
class Eye_Wrapper(object):
    def __init__(self, g_pool, eyeid, pos, hdpi_fac=1., hflip=False, vflip=False):
        super().__init__()
        self.g_pool = g_pool
        self.eyeid = eyeid
        self.pos = pos
        self.hflip = hflip
        self.vflip = vflip
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        self.drag_offset = None
        self.menu = None
        self.hdpi_fac = hdpi_fac

    def initliaze_video(self, rec_dir, world_timestamps):
        eye_loc = os.path.join(rec_dir, 'eye{}.*'.format(self.eyeid))
        try:
            self.source = File_Source(Empty(), source_path=glob(eye_loc)[0])
            self.current_eye_frame = self.source.get_frame()
        except (FileNotFoundError, IndexError, FileCaptureError):
            logger.warning('Video for eye{} was not found or could not be opened.'.format(self.eyeid))
        else:
            self.eye_world_frame_map = correlate_eye_world(self.source.timestamps, world_timestamps)
            if self.menu is not None:
                self.menu.read_only = False

    def add_eye_menu(self, parent):
        self.menu = ui.Growing_Menu('Eye {}'.format(self.eyeid))
        parent.append(self.menu)
        self.menu.append(ui.Switch('hflip', self, label='Horizontal flip'))
        self.menu.append(ui.Switch('vflip', self, label='Vertical flip'))
        self.menu.read_only = not self.initialized

    def remove_eye_menu(self, parent):
        parent.remove(self.menu)
        self.menu = None

    def deinitliaze_video(self):
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        if self.menu is not None:
            self.menu.read_only = True

    @property
    def initialized(self):
        return self.source is not None

    @property
    def config(self):
        return {'pos': self.pos, 'hflip': self.hflip, 'vflip': self.vflip}

    def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions):
        if not self.initialized:
            return

        requested_eye_frame_idx = self.eye_world_frame_map[frame.index]
        # 1. do we need a new frame?
        if requested_eye_frame_idx != self.current_eye_frame.index:
            if requested_eye_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_eye_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(requested_eye_frame_idx)

            try:
                self.current_eye_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info("Reached the end of the eye video for eye video {}.".format(self.eyeid))

        # 2. dragging image
        if self.drag_offset is not None:
            x, y = glfwGetCursorPos(glfwGetCurrentContext())
            pos = x * self.hdpi_fac, y * self.hdpi_fac
            pos = normalize(pos, self.g_pool.camera_render_size)
            # Position in img pixels
            pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0]))
            self.pos = int(pos[0] + self.drag_offset[0]), int(pos[1] + self.drag_offset[1])

        # 3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
        video_size = round(self.current_eye_frame.width * scale), round(self.current_eye_frame.height * scale)

        # frame.img.shape[0] is height, frame.img.shape[1] is width of screen
        self.pos = (min(frame.img.shape[1] - video_size[0], max(self.pos[0], 0)),
                    min(frame.img.shape[0] - video_size[1], max(self.pos[1], 0)))


        # 4. vflipping images, converting to greyscale
        eyeimage = self.current_eye_frame.gray
        eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)

        if show_ellipses:
            try:
                pp = next((pp for pp in pupil_positions if pp['id'] == self.eyeid and pp['timestamp'] == self.current_eye_frame.timestamp))
            except StopIteration:
                pass
            else:
                el = pp['ellipse']
                conf = int(pp.get('model_confidence', pp.get('confidence', 0.1)) * 255)
                el_points = getEllipsePts((el['center'], el["axes"], el['angle']))
                cv2.polylines(eyeimage, [np.asarray(el_points,dtype='i')], True, (0, 0, 255, conf), thickness=1)
                cv2.circle(eyeimage,(int(el['center'][0]),int(el['center'][1])), 5, (0, 0, 255, conf), thickness=-1)


        #flip and scale
        eyeimage = cv2.resize(eyeimage, (0, 0), fx=scale, fy=scale)
        if self.hflip:
            eyeimage = np.fliplr(eyeimage)
        if self.vflip:
            eyeimage = np.flipud(eyeimage)

        transparent_image_overlay(self.pos, eyeimage, frame.img, alpha)

    def on_click(self, pos, button, action, hdpi_fac, eye_scale):
        self.hdpi_fac = hdpi_fac
        if not self.initialized:
            return False  # click event has not been consumed

        video_size = round(self.current_eye_frame.width * eye_scale), round(self.current_eye_frame.height * eye_scale)

        if (self.pos[0] < pos[0] < self.pos[0] + video_size[0] and
                self.pos[1] < pos[1] < self.pos[1] + video_size[1]):
            self.drag_offset = self.pos[0] - pos[0], self.pos[1] - pos[1]
            return True
        else:
            self.drag_offset = None
            return False
コード例 #6
0
def fill_cache(visited_list,video_file_path,timestamps,q,seek_idx,run,min_marker_perimeter):
    '''
    this function is part of marker_detector it is run as a seperate process.
    it must be kept in a seperate file for namespace sanatisation
    '''
    import os
    #import logging
    #logger = logging.getLogger(__name__+' with pid: '+str(os.getpid()) )
    #logger.debug('Started cacher process for Marker Detector')
    import cv2
    from video_capture import File_Source, EndofVideoFileError,FileSeekError
    from screen_detector_methods import detect_screens
    #aperture = 9
    markers = []
    cap = File_Source(Global_Container(),video_file_path,timestamps=timestamps)

    def next_unvisited_idx(frame_idx):
        try:
            visited = visited_list[frame_idx]
        except IndexError:
            visited = True # trigger search

        if not visited:
            next_unvisited = frame_idx
        else:
            # find next unvisited site in the future
            try:
                next_unvisited = visited_list.index(False,frame_idx)
            except ValueError:
                # any thing in the past?
                try:
                    next_unvisited = visited_list.index(False,0,frame_idx)
                except ValueError:
                    #no unvisited sites left. Done!
                    #logger.debug("Caching completed.")
                    next_unvisited = None
        return next_unvisited

    def handle_frame(next):
        if next != cap.get_frame_index():
            #we need to seek:
            #logger.debug("Seeking to Frame %s" %next)
            try:
                cap.seek_to_frame(next)
            except FileSeekError:
                #could not seek to requested position
                #logger.warning("Could not evaluate frame: %s."%next)
                visited_list[next] = True # this frame is now visited.
                q.put((next,[])) # we cannot look at the frame, report no detection
                return
            #seeking invalidates prev markers for the detector
            markers[:] = []

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            #logger.debug("Video File's last frame(s) not accesible")
             #could not read frame
            #logger.warning("Could not evaluate frame: %s."%next)
            visited_list[next] = True # this frame is now visited.
            q.put((next,[])) # we cannot look at the frame, report no detection
            return

        markers[:] = detect_screens(frame.gray)

        visited_list[frame.index] = True
        q.put((frame.index,markers[:])) #object passed will only be pickeled when collected from other process! need to make a copy ot avoid overwrite!!!

    while run.value:
        next = cap.get_frame_index()
        if seek_idx.value != -1:
            next = seek_idx.value
            seek_idx.value = -1
            #logger.debug("User required seek. Marker caching at Frame: %s"%next)


        #check the visited list
        next = next_unvisited_idx(next)
        if next == None:
            #we are done here:
            break
        else:
            handle_frame(next)


    #logger.debug("Closing Cacher Process")
    cap.cleanup()
    q.close()
    run.value = False
    return
コード例 #7
0
def fill_cache(visited_list, video_file_path, timestamps, q, seek_idx, run,
               min_marker_perimeter):
    '''
    this function is part of marker_detector it is run as a seperate process.
    it must be kept in a seperate file for namespace sanatisation
    '''
    import os
    import logging
    logger = logging.getLogger(__name__ + ' with pid: ' + str(os.getpid()))
    logger.debug('Started cacher process for Marker Detector')
    import cv2
    from video_capture import File_Source, EndofVideoFileError, FileSeekError
    from square_marker_detect import detect_markers_robust
    aperture = 9
    markers = []
    cap = File_Source(Global_Container(),
                      video_file_path,
                      timestamps=timestamps)

    def next_unvisited_idx(frame_idx):
        try:
            visited = visited_list[frame_idx]
        except IndexError:
            visited = True  # trigger search

        if not visited:
            next_unvisited = frame_idx
        else:
            # find next unvisited site in the future
            try:
                next_unvisited = visited_list.index(False, frame_idx)
            except ValueError:
                # any thing in the past?
                try:
                    next_unvisited = visited_list.index(False, 0, frame_idx)
                except ValueError:
                    #no unvisited sites left. Done!
                    logger.debug("Caching completed.")
                    next_unvisited = None
        return next_unvisited

    def handle_frame(next):
        if next != cap.get_frame_index():
            #we need to seek:
            logger.debug("Seeking to Frame %s" % next)
            try:
                cap.seek_to_frame(next)
            except FileSeekError:
                #could not seek to requested position
                logger.warning("Could not evaluate frame: %s." % next)
                visited_list[next] = True  # this frame is now visited.
                q.put((next,
                       []))  # we cannot look at the frame, report no detection
                return
            #seeking invalidates prev markers for the detector
            markers[:] = []

        try:
            frame = cap.get_frame_nowait()
        except EndofVideoFileError:
            logger.debug("Video File's last frame(s) not accesible")
            #could not read frame
            logger.warning("Could not evaluate frame: %s." % next)
            visited_list[next] = True  # this frame is now visited.
            q.put(
                (next, []))  # we cannot look at the frame, report no detection
            return

        markers[:] = detect_markers_robust(
            frame.gray,
            grid_size=5,
            prev_markers=markers,
            min_marker_perimeter=min_marker_perimeter,
            aperture=aperture,
            visualize=0,
            true_detect_every_frame=1)

        visited_list[frame.index] = True
        q.put(
            (frame.index, markers[:])
        )  #object passed will only be pickeled when collected from other process! need to make a copy ot avoid overwrite!!!

    while run.value:
        next = cap.get_frame_index()
        if seek_idx.value != -1:
            next = seek_idx.value
            seek_idx.value = -1
            logger.debug("User required seek. Marker caching at Frame: %s" %
                         next)

        #check the visited list
        next = next_unvisited_idx(next)
        if next == None:
            #we are done here:
            break
        else:
            handle_frame(next)

    logger.debug("Closing Cacher Process")
    cap.cleanup()
    q.close()
    run.value = False
    return
コード例 #8
0
class Eye_Wrapper(object):
    def __init__(self,
                 g_pool,
                 eyeid,
                 pos,
                 hdpi_fac=1.0,
                 hflip=False,
                 vflip=False):
        super().__init__()
        self.g_pool = g_pool
        self.eyeid = eyeid
        self.pos = pos
        self.hflip = hflip
        self.vflip = vflip
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        self.drag_offset = None
        self.menu = None
        self.hdpi_fac = hdpi_fac

    def initliaze_video(self, rec_dir, world_timestamps):
        eye_loc = os.path.join(rec_dir, "eye{}.*".format(self.eyeid))
        try:
            self.source = File_Source(SimpleNamespace(),
                                      source_path=glob(eye_loc)[0],
                                      timing=None)
            self.current_eye_frame = self.source.get_frame()
        except (FileNotFoundError, IndexError):
            logger.warning(
                "Video for eye{} was not found or could not be opened.".format(
                    self.eyeid))
        else:
            self.eye_world_frame_map = correlate_eye_world(
                self.source.timestamps, world_timestamps)
            if self.menu is not None:
                self.menu.read_only = False

    def add_eye_menu(self, parent):
        self.menu = ui.Growing_Menu("Eye {}".format(self.eyeid))
        parent.append(self.menu)
        self.menu.append(ui.Switch("hflip", self, label="Horizontal flip"))
        self.menu.append(ui.Switch("vflip", self, label="Vertical flip"))
        self.menu.read_only = not self.initialized

    def remove_eye_menu(self, parent):
        parent.remove(self.menu)
        self.menu = None

    def deinitliaze_video(self):
        self.source = None
        self.eye_world_frame_map = None
        self.current_eye_frame = None
        if self.menu is not None:
            self.menu.read_only = True

    @property
    def initialized(self):
        return self.source is not None

    @property
    def config(self):
        return {"pos": self.pos, "hflip": self.hflip, "vflip": self.vflip}

    def visualize(self, frame, alpha, scale, show_ellipses, pupil_positions):
        if not self.initialized:
            return

        requested_eye_frame_idx = self.eye_world_frame_map[frame.index]
        # 1. do we need a new frame?
        if requested_eye_frame_idx != self.current_eye_frame.index:
            if requested_eye_frame_idx == self.source.get_frame_index() + 2:
                # if we just need to seek by one frame, its faster to just read one and and throw it away.
                self.source.get_frame()
            if requested_eye_frame_idx != self.source.get_frame_index() + 1:
                self.source.seek_to_frame(int(requested_eye_frame_idx))

            try:
                self.current_eye_frame = self.source.get_frame()
            except EndofVideoError:
                logger.info(
                    "Reached the end of the eye video for eye video {}.".
                    format(self.eyeid))

        # 2. dragging image
        if self.drag_offset is not None:
            x, y = glfwGetCursorPos(glfwGetCurrentContext())
            pos = x * self.hdpi_fac, y * self.hdpi_fac
            pos = normalize(pos, self.g_pool.camera_render_size)
            # Position in img pixels
            pos = denormalize(pos, (frame.img.shape[1], frame.img.shape[0]))
            self.pos = (
                int(pos[0] + self.drag_offset[0]),
                int(pos[1] + self.drag_offset[1]),
            )

        # 3. keep in image bounds, do this even when not dragging because the image video_sizes could change.
        video_size = (
            round(self.current_eye_frame.width * scale),
            round(self.current_eye_frame.height * scale),
        )

        # frame.img.shape[0] is height, frame.img.shape[1] is width of screen
        self.pos = (
            min(frame.img.shape[1] - video_size[0], max(self.pos[0], 0)),
            min(frame.img.shape[0] - video_size[1], max(self.pos[1], 0)),
        )

        # 4. flipping images, converting to greyscale
        eyeimage = self.current_eye_frame.gray
        eyeimage = cv2.cvtColor(eyeimage, cv2.COLOR_GRAY2BGR)

        if show_ellipses:
            try:
                pp = next(
                    (pp for pp in pupil_positions if pp["id"] == self.eyeid
                     and pp["timestamp"] == self.current_eye_frame.timestamp))
            except StopIteration:
                pass
            else:
                draw_pupil_on_image(eyeimage, pp)

        # flip and scale
        eyeimage = cv2.resize(eyeimage, (0, 0), fx=scale, fy=scale)
        if self.hflip:
            eyeimage = np.fliplr(eyeimage)
        if self.vflip:
            eyeimage = np.flipud(eyeimage)

        transparent_image_overlay(self.pos, eyeimage, frame.img, alpha)

    def on_click(self, pos, button, action, hdpi_fac, eye_scale):
        self.hdpi_fac = hdpi_fac
        if not self.initialized:
            return False  # click event has not been consumed

        video_size = (
            round(self.current_eye_frame.width * eye_scale),
            round(self.current_eye_frame.height * eye_scale),
        )

        if (self.pos[0] < pos[0] < self.pos[0] + video_size[0]
                and self.pos[1] < pos[1] < self.pos[1] + video_size[1]):
            self.drag_offset = self.pos[0] - pos[0], self.pos[1] - pos[1]
            return True
        else:
            self.drag_offset = None
            return False
コード例 #9
0
    def update_cache_hack(self):
        from video_capture import File_Source, EndofVideoError, FileSeekError
        from screen_detector_methods import detect_screens

        def put_in_cache(frame_idx, detected_screen):
            print(frame_idx)
            visited_list[frame_idx] = True
            self.cache.update(frame_idx, detected_screen)
            for s in self.surfaces:
                s.update_cache(self.cache,
                               min_marker_perimeter=self.min_marker_perimeter,
                               min_id_confidence=self.min_id_confidence,
                               idx=frame_idx)

        def next_unvisited_idx(frame_idx):
            try:
                visited = visited_list[frame_idx]
            except IndexError:
                visited = True  # trigger search

            if not visited:
                next_unvisited = frame_idx
            else:
                # find next unvisited site in the future
                try:
                    next_unvisited = visited_list.index(False, frame_idx)
                except ValueError:
                    # any thing in the past?
                    try:
                        next_unvisited = visited_list.index(
                            False, 0, frame_idx)
                    except ValueError:
                        #no unvisited sites left. Done!
                        #logger.debug("Caching completed.")
                        next_unvisited = None
            return next_unvisited

        def handle_frame(next_frame):
            if next_frame != cap.get_frame_index():
                #we need to seek:
                #logger.debug("Seeking to Frame %s" %next_frame)
                try:
                    cap.seek_to_frame(next_frame)
                except FileSeekError:
                    put_in_cache(
                        next_frame,
                        [])  # we cannot look at the frame, report no detection
                    return
                #seeking invalidates prev markers for the detector
                # markers[:] = []

            try:
                frame = cap.get_frame()
            except EndofVideoError:
                put_in_cache(
                    next_frame,
                    [])  # we cannot look at the frame, report no detection
                return

            put_in_cache(frame.index, detect_screens(frame.gray))

        self.cacher_seek_idx = 0
        visited_list = [False for x in self.cache]
        # markers = []
        cap = File_Source(Global_Container(), self.g_pool.capture.source_path)

        for _ in self.cache:
            next_frame = cap.get_frame_index()
            if next_frame is None or next_frame >= len(self.cache):
                #we are done here:
                break
            else:
                handle_frame(next_frame)
コード例 #10
0
def fill_cache(visited_list, video_file_path, q, seek_idx, run,
               min_marker_perimeter, invert_image):
    """
    this function is part of marker_detector it is run as a seperate process.
    it must be kept in a seperate file for namespace sanatisation
    """
    import os
    import logging

    logger = logging.getLogger(__name__ + " with pid: " + str(os.getpid()))
    logger.debug("Started cacher process for Marker Detector")
    from video_capture import File_Source, EndofVideoError, FileSeekError
    from square_marker_detect import detect_markers_robust

    aperture = 9
    markers = []
    cap = File_Source(SimpleNamespace(),
                      source_path=video_file_path,
                      timing=None,
                      fill_gaps=True)

    def next_unvisited_idx(frame_idx):
        try:
            visited = visited_list[frame_idx]
        except IndexError:
            visited = True  # trigger search

        if not visited:
            next_unvisited = frame_idx
        else:
            # find next unvisited site in the future
            try:
                next_unvisited = visited_list.index(False, frame_idx)
            except ValueError:
                # any thing in the past?
                try:
                    next_unvisited = visited_list.index(False, 0, frame_idx)
                except ValueError:
                    # no unvisited sites left. Done!
                    logger.debug("Caching completed.")
                    next_unvisited = None
        return next_unvisited

    def handle_frame(next_frame):
        if next_frame != cap.get_frame_index() + 1:
            # we need to seek:
            logger.debug("Seeking to Frame {}".format(next_frame))
            try:
                cap.seek_to_frame(next_frame)
            except FileSeekError:
                # could not seek to requested position
                logger.warning(
                    "Could not evaluate frame: {}.".format(next_frame))
                visited_list[next_frame] = True  # this frame is now visited.
                q.put((next_frame,
                       []))  # we cannot look at the frame, report no detection
                return
            # seeking invalidates prev markers for the detector
            markers[:] = []

        try:
            frame = cap.get_frame()
        except EndofVideoError:
            logger.debug("Video File's last frame(s) not accesible")

            # could not read frame
            logger.warning("Could not evaluate frame: {}.".format(next_frame))
            visited_list[next_frame] = True  # this frame is now visited.
            q.put((next_frame,
                   []))  # we cannot look at the frame, report no detection
            return

        markers[:] = detect_markers_robust(
            frame.gray,
            grid_size=5,
            prev_markers=markers,
            min_marker_perimeter=min_marker_perimeter,
            aperture=aperture,
            visualize=0,
            true_detect_every_frame=1,
            invert_image=invert_image,
        )

        visited_list[frame.index] = True
        q.put(
            (frame.index, markers[:])
        )  # object passed will only be pickeled when collected from other process! need to make a copy ot avoid overwrite!!!

    while run.value:
        next_frame = cap.get_frame_index()
        if seek_idx.value != -1:
            next_frame = seek_idx.value
            seek_idx.value = -1
            logger.debug(
                "User required seek. Marker caching at Frame: {}".format(
                    next_frame))
        # Delete the # to unfill the gap
        # target_entry = cap.videoset.lookup[next_frame]
        # if target_entry.container_idx == -1:
        #     continue

        # check the visited list
        next_frame = next_unvisited_idx(next_frame)
        if next_frame is None:
            # we are done here:
            break
        else:
            handle_frame(next_frame)

    logger.debug("Closing Cacher Process")
    cap.cleanup()
    q.close()
    run.value = False
    return