Example #1
0
class TestCalibrationDataFiler(unittest.TestCase):
    TOP_LEVEL_PATH = Path('.') / 'data/test_data/test_calibration_data'
    DATA_DIR_NAME = 'test_session_calibration_data'
    DATA_DIR_PATH = TOP_LEVEL_PATH / DATA_DIR_NAME
    DATA_FILENAME = CalibrationDataFiler.PAN_MOTOR_TIMEBASE_ALIGNMENT
    DATA_FILE_PATH = DATA_DIR_PATH / DATA_FILENAME

    TEST_OBJ = {'string_a': 'a string', 'list_a': [1, 2, 3, 4], 'int': 1}

    def setUp(self):
        self.cdf = CalibrationDataFiler(self.__class__.TOP_LEVEL_PATH)
        self.remove_data_dir_and_top_level_dir()

    def test_ensure_data_dir(self):
        self.assertFalse(os.path.isdir(self.__class__.DATA_DIR_PATH))
        self.assertFalse(os.path.isdir(self.__class__.TOP_LEVEL_PATH))
        self.cdf._ensure_data_dir(self.__class__.DATA_DIR_NAME)
        self.assertTrue(os.path.isdir(self.__class__.DATA_DIR_PATH))
        self.assertTrue(os.path.isdir(self.__class__.TOP_LEVEL_PATH))

    def test_save_as_yml(self):
        self.assertFalse(os.path.isfile(self.__class__.DATA_FILE_PATH))
        self.save_as_yml()
        self.assertTrue(os.path.isfile(self.__class__.DATA_FILE_PATH))

    def test_load(self):
        self.save_as_yml()
        obj = self.load()
        self.assertDictEqual(obj, self.__class__.TEST_OBJ)

    def test_load_with_no_file_returns_none(self):
        self.assertIsNone(self.load())

    def remove_data_dir_and_top_level_dir(self):
        if self.DATA_DIR_PATH.is_dir():
            for file_path in self.__class__.DATA_DIR_PATH.iterdir():
                if os.path.isfile(file_path):
                    os.remove(file_path)

        if os.path.isdir(self.__class__.DATA_DIR_PATH):
            os.rmdir(self.__class__.DATA_DIR_PATH)
        if os.path.isdir(self.__class__.TOP_LEVEL_PATH):
            os.rmdir(self.__class__.TOP_LEVEL_PATH)

    def save_as_yml(self):
        self.cdf.save_as_yml(
            self.__class__.TEST_OBJ,
            self.__class__.DATA_DIR_NAME,
            self.__class__.DATA_FILENAME,
        )

    def load(self):
        return self.cdf.load(
            self.__class__.DATA_DIR_NAME,
            self.__class__.DATA_FILENAME,
        )
    def __init__(self,
                 results_head_path=Path('./data/calibration_data'),
                 results_data_dir_name='frame_rate_calculation'):
        assert self.__class__.CLOCK_VIDEO_PATH.resolve().is_file(), \
               f'No video found at {self.__class__.CLOCK_VIDEO_PATH.resolve}'

        self._results_head_path = results_head_path
        self._results_data_dir_name = results_data_dir_name

        self._ifvg = ImageFromVideoGrabber(self.__class__.CLOCK_VIDEO_PATH)
        self._cdf = CalibrationDataFiler(self._results_head_path)

        self._start_frame_num = 10

        # Lazy init
        self._results = None
        self._end_frame_num = None
    def __init__(
            self,
            session_dir: str,
            input_data_head_path=Path('/Volumes/WD'),
            results_head_path=Path('.') / 'data/calibration_data',
            cache_dir_path=Path('/Volumes/WD/image_cache'),
            num_points=1,
    ):
        self._session_dir = session_dir
        self._input_data_head_path = input_data_head_path
        self._results_head_path = results_head_path
        self._cache_dir_path = cache_dir_path
        self._num_points = num_points

        # Helpers
        self._ldfh = LDFH(self._input_data_head_path)
        self._calibration_data_filer = CalibrationDataFiler(
            self._results_head_path)

        self.maxima_text_color = 'red'
        self.minima_text_color = 'green'
        self.fontsize = 8

        # Lazy initializers
        self._pan_motor_read_data_series = None
        self._base_time_series = None
        self._motor_maxima = None
        self._motor_minima = None
        self._motor_maxima_from_series = None
        self._motor_minima_from_series = None
        self._motor_maxima_from_archive = None
        self._motor_minima_from_archive = None
        self._archived_results = None
        self._selected_frames_from_archive = None

        # State
        self._base_time = None
        self._motor_idx = None
        self._min_or_max = None
        self._alignment_results = []
    def __init__(self,
                 session_dir: str,
                 input_data_head_path=Path('/Volumes/WD'),
                 results_head_path=Path('.')  / 'data/calibration_data',
                 cache_dir_path=Path('/Volumes/WD/image_cache'),
                ):
        self._session_dir = session_dir
        self._input_data_head_path = input_data_head_path
        self._results_head_path = results_head_path
        self._cache_dir_path = cache_dir_path

        # helpers
        self._ldfh = LDFH(self._input_data_head_path)
        self._calibration_data_filer = CalibrationDataFiler(self._results_head_path)

        # state
        self._alignment_data = []

        # lazy inits
        self._latitude_series = None
        self._longitude_series = None
        self._time_series = None
    def __init__(
            self,
            session_dir: str,
            input_data_head_path=Path('/Volumes/WD'),
            results_head_path=Path('.') / 'data/calibration_data',
            cache_dir_path=Path('/Volumes/WD/image_cache'),
    ):
        self._session_dir = session_dir
        self._input_data_head_path = input_data_head_path
        self._results_head_path = results_head_path
        self._cache_dir_path = cache_dir_path

        # settings
        self._anotation_color = 'green'
        self._anotation_fontsize = '8'

        self._ldfh = LDFH(self._input_data_head_path)
        self._calibration_data_filer = CalibrationDataFiler(
            self._results_head_path)

        # lazy inits
        self._fov_time_series = None
        self._fov_data_series = None
        self._unique_fovs = None
        self._transition_indexes = None
        self._transition_levels = None
        self._unique_transitions = None

        # state
        self._alignment_results = {}
        self._current_transition = None
        self._alignment_stats_delays = []
        self._alignment_stats_durations = []
        self._alignment_stats_zoom_out_delays = []
        self._alignment_stats_zoom_out_durations = []
        self._alignment_stats_zoom_in_delays = []
        self._alignment_stats_zoom_in_durations = []
class FrameRateCalculator:
    '''Calculates the frame rate based on a video of a clock'''

    CLOCK_VIDEO_PATH = Path(
        '/Volumes/WD/clock_video/SS3_VIDEO_2018_10_19_105439-1.MP4')
    # CLOCK_VIDEO_PAsTH = Path('/Volumes/WD/clock_video/clock_video.mp4')

    INPUT_DATA = 'input_data'
    START = 'start'
    END = 'end'
    HOURS = 'hours'
    MINUTES = 'minutes'
    SECONDS = 'seconds'

    CALCULATED_RESULTS = 'calculated_results'

    RESULTS_TEMPLATE = {
        INPUT_DATA: {
            START: {
                HOURS: None,
                MINUTES: None,
                SECONDS: None,
            },
            END: {
                HOURS: None,
                MINUTES: None,
                SECONDS: None,
            },
        },
        CALCULATED_RESULTS: None,
    }

    START_FRAME_NUM = 'start_frame_num'
    END_FRAME_NUM = 'end_frame_num'
    NUM_FRAMES = 'num_frames'
    START_TIME_MS = 'start_time_ms'
    END_TIME_MS = 'end_time_ms'
    TIME_DIFF = 'time_diff'
    FRAME_RATE = 'frame_rate'
    HEADER_FPS_CV2 = 'header_fps_cv2'
    VIDEO_ID = 'video_id'
    VIDEO_PATH = 'video_path'

    def __init__(self,
                 results_head_path=Path('./data/calibration_data'),
                 results_data_dir_name='frame_rate_calculation'):
        assert self.__class__.CLOCK_VIDEO_PATH.resolve().is_file(), \
               f'No video found at {self.__class__.CLOCK_VIDEO_PATH.resolve}'

        self._results_head_path = results_head_path
        self._results_data_dir_name = results_data_dir_name

        self._ifvg = ImageFromVideoGrabber(self.__class__.CLOCK_VIDEO_PATH)
        self._cdf = CalibrationDataFiler(self._results_head_path)

        self._start_frame_num = 10

        # Lazy init
        self._results = None
        self._end_frame_num = None

    def _get_ifvs(self):
        r = [
            self._ifvg.get_image_at_frame_num(self._start_frame_num),
            self._ifvg.get_image_at_frame_num(self._get_end_frame_num())
        ]
        assert r[1].get_frame_num() == self._get_end_frame_num()
        return r

    def _get_end_frame_num(self):
        if self._end_frame_num is None:
            self._end_frame_num = self._ifvg.get_frame_count() - 10
        return self._end_frame_num

    def _setup_results_file(self):
        if self._get_results() is None:
            self._cdf.save_as_yml(
                self.__class__.RESULTS_TEMPLATE,
                self._results_data_dir_name,
                CalibrationDataFiler.FRAME_RATE_CALCULATION,
            )
        return self

    def _get_results(self):
        if self._results is None:
            self._results = self._cdf.load(
                self._results_data_dir_name,
                CalibrationDataFiler.FRAME_RATE_CALCULATION,
            )
        return self._results

    def _get_calculated_results(self):
        r = {}
        r[self.__class__.START_FRAME_NUM] = self._start_frame_num
        r[self.__class__.END_FRAME_NUM] = self._get_end_frame_num()
        r[self.__class__.
          NUM_FRAMES] = self._get_end_frame_num() - self._start_frame_num
        r[self.__class__.START_TIME_MS] = self._get_ms_from_hms(
            self._get_input_data(self.__class__.START, self.__class__.HOURS),
            self._get_input_data(self.__class__.START, self.__class__.MINUTES),
            self._get_input_data(self.__class__.START, self.__class__.SECONDS),
        )
        r[self.__class__.END_TIME_MS] = self._get_ms_from_hms(
            self._get_input_data(self.__class__.END, self.__class__.HOURS),
            self._get_input_data(self.__class__.END, self.__class__.MINUTES),
            self._get_input_data(self.__class__.END, self.__class__.SECONDS),
        )
        r[self.__class__.TIME_DIFF] = r[self.__class__.END_TIME_MS] - r[
            self.__class__.START_TIME_MS]
        r[self.__class__.FRAME_RATE] = 1000 * r[self.__class__.NUM_FRAMES] / r[
            self.__class__.TIME_DIFF]
        r[self.__class__.HEADER_FPS_CV2] = self._ifvg.get_frame_rate()
        r[self.__class__.VIDEO_ID] = self._ifvg.get_video_id()
        r[self.__class__.VIDEO_PATH] = str(
            self.__class__.CLOCK_VIDEO_PATH.resolve())

        results = self._get_results()
        results[self.__class__.CALCULATED_RESULTS] = r
        return results

    def _save_calculated_results(self):
        self._cdf.save_as_yml(
            self._get_calculated_results(),
            self._results_data_dir_name,
            CalibrationDataFiler.FRAME_RATE_CALCULATION,
        )
        return self

    def _get_ms_from_hms(self, hours: int, minutes: int, seconds: float):
        return int(hours * 60 * 60 *1000 +\
                   minutes * 60 * 1000 +\
                   seconds * 1000)

    def _get_input_data(self, start_end, hms):
        return self._get_results()[self.__class__.INPUT_DATA][start_end][hms]

    def _show_ifvs(self):
        VerticalImageList(self._get_ifvs()).run()

    def run(self):
        self._setup_results_file()
        self._show_ifvs()
        self._save_calculated_results()
class FovTimebaseAligner:
    '''Determines with user assistance via scrubber picker statistics for offset in
       time between fov timebase and video time'''

    AGGREGATE_RESULTS = 'aggregate_results'
    UNIQUE_FOVS = 'unique_fovs'
    UNIQUE_TRANSITIONS = 'unique_transitions'

    INDIVIDUAL_RESULTS = 'individual_results'

    FOV_DATA = 'fov_data'
    TRANSITION_LEVELS = 'transition_levels'
    TRANSITION_INDEX = 'transition_index'
    TRANSITION_TIME = 'transition_time'

    BEGIN = 'begin'
    END = 'end'

    ALL = 'all'
    ZOOM_IN = 'zoom_in'
    ZOOM_OUT = 'zoom_out'

    VIDEO_DATA = 'video_data'
    FRAME_NUM = 'frame_num'
    TIME_MS = 'time_ms'
    DURATION = 'duration'
    FRAMES = 'frames'

    ALIGNMENT = 'alignment'
    DELAY_FOV_TO_VIDEO = 'delay_fov_to_video'

    ALIGNMENT_STATS = 'alignment_stats'
    DURATION = 'duration'
    DELAY = 'delay'
    MIN = 'min'
    MAX = 'max'
    MEAN = 'mean'

    def __init__(
            self,
            session_dir: str,
            input_data_head_path=Path('/Volumes/WD'),
            results_head_path=Path('.') / 'data/calibration_data',
            cache_dir_path=Path('/Volumes/WD/image_cache'),
    ):
        self._session_dir = session_dir
        self._input_data_head_path = input_data_head_path
        self._results_head_path = results_head_path
        self._cache_dir_path = cache_dir_path

        # settings
        self._anotation_color = 'green'
        self._anotation_fontsize = '8'

        self._ldfh = LDFH(self._input_data_head_path)
        self._calibration_data_filer = CalibrationDataFiler(
            self._results_head_path)

        # lazy inits
        self._fov_time_series = None
        self._fov_data_series = None
        self._unique_fovs = None
        self._transition_indexes = None
        self._transition_levels = None
        self._unique_transitions = None

        # state
        self._alignment_results = {}
        self._current_transition = None
        self._alignment_stats_delays = []
        self._alignment_stats_durations = []
        self._alignment_stats_zoom_out_delays = []
        self._alignment_stats_zoom_out_durations = []
        self._alignment_stats_zoom_in_delays = []
        self._alignment_stats_zoom_in_durations = []

    def visualize(self):
        fig, axis = plt.subplots()  # pylint: disable=W0612
        axis.plot(self.get_normalized_fov_time(), self.get_fov_data_series())

        for key, hsh in self.get_unique_transitions().items():
            axis.text(
                self.get_normalized_fov_time()[hsh[
                    self.__class__.TRANSITION_INDEX]],
                hsh[self.__class__.TRANSITION_LEVELS][0],
                key,
                color=self._anotation_color,
                fontsize=self._anotation_fontsize,
            )

        plt.show()

    def get_fov_time_series(self):
        if self._fov_time_series is None:
            self._fov_time_series =\
                self._ldfh.get_field_from_npz_file(self._session_dir,
                                                   LDFH.LENS_NPZ_FILE,
                                                   LDFH.LENS_TIME_FIELD,
                                                  )
        return self._fov_time_series

    def get_fov_data_series(self):
        if self._fov_data_series is None:
            self._fov_data_series =\
                self._ldfh.get_field_from_npz_file(self._session_dir,
                                                   LDFH.LENS_NPZ_FILE,
                                                   LDFH.LENS_FOV_FIELD,
                                                  )
        return self._fov_data_series

    def get_normalized_fov_time(self):
        return self.get_fov_time_series() - self.get_fov_data_series()[0]

    def get_unique_fovs(self):
        if self._unique_fovs is None:
            self._unique_fovs = np.unique(self.get_fov_data_series())
        return self._unique_fovs

    def get_fov_transition_indexes(self):
        if self._transition_indexes is None:
            self._transition_indexes = self.get_fov_data_series().transitions()
        return self._transition_indexes

    def get_fov_transition_levels(self):
        if self._transition_levels is None:
            self._transition_levels =\
                self.get_fov_data_series().levels_around_transitions(self.get_fov_transition_indexes())
        return self._transition_levels

    def get_unique_transitions(self):
        if self._unique_transitions is None:
            r = {}
            for idx, transition_index in enumerate(
                    self.get_fov_transition_indexes()):
                transition_levels = self.get_fov_transition_levels()[idx]

                if str(transition_levels) in r:
                    continue

                r[str(transition_levels)] = {
                    self.__class__.TRANSITION_LEVELS:
                    transition_levels,
                    self.__class__.TRANSITION_INDEX:
                    transition_index,
                    self.__class__.TRANSITION_TIME:
                    self.get_fov_time_series()[transition_index],
                }

            self._unique_transitions = r
        return self._unique_transitions

    def show_scrub_picker_for_transitions(self):
        for _, transition in self.get_unique_transitions().items():
            self._current_transition = transition
            self.show_scrub_picker_for_time(
                transition[self.__class__.TRANSITION_INDEX],
                transition[self.__class__.TRANSITION_TIME])

    def show_scrub_picker_for_time(self, idx, time):
        ScrubPicker(
            images_from_video=self.get_images_around_time(time),
            selector_type=ScrubPicker.SELECT_RANGE,
            callback=self._scrubber_callback,
            selected_start_frame_num=None,
        ).run()

    def get_images_around_time(self, time):
        return self.get_ifv_grabber().get_images_around_time_ms(time,
                                                                before=50,
                                                                after=80)

    def get_ifv_grabber(self) -> ImageFromVideoGrabber:
        return ImageFromVideoGrabber(self.get_video_path(),
                                     cache_dir_path=self._cache_dir_path)

    def get_video_path(self):
        return self._ldfh.get_video_path(self._session_dir)

    def _get_aggregate_data(self):
        r = {
            self.__class__.UNIQUE_FOVS:
            self.get_unique_fovs().tolist(),
            self.__class__.UNIQUE_TRANSITIONS: [
                self._get_name_for_transition(transition)
                for _, transition in self.get_unique_transitions().items()
            ],
            self.__class__.ALIGNMENT_STATS:
            self._get_alignment_stats(),
        }
        return r

    def _get_alignment_stats(self):
        return {
            self.__class__.ALL: {
                self.__class__.DELAY: {
                    self.__class__.MIN:
                    int(np.min(np.array(self._alignment_stats_delays))),
                    self.__class__.MAX:
                    int(np.max(np.array(self._alignment_stats_delays))),
                    self.__class__.MEAN:
                    int(np.mean(np.array(self._alignment_stats_delays))),
                },
                self.__class__.DURATION: {
                    self.__class__.MIN:
                    int(np.min(np.array(self._alignment_stats_durations))),
                    self.__class__.MAX:
                    int(np.max(np.array(self._alignment_stats_durations))),
                    self.__class__.MEAN:
                    int(np.mean(np.array(self._alignment_stats_durations))),
                },
            },
            self.__class__.ZOOM_IN: {
                self.__class__.DELAY: {
                    self.__class__.MIN:
                    int(np.min(np.array(
                        self._alignment_stats_zoom_in_delays))),
                    self.__class__.MAX:
                    int(np.max(np.array(
                        self._alignment_stats_zoom_in_delays))),
                    self.__class__.MEAN:
                    int(np.mean(np.array(
                        self._alignment_stats_zoom_in_delays))),
                },
                self.__class__.DURATION: {
                    self.__class__.MIN:
                    int(
                        np.min(
                            np.array(
                                self._alignment_stats_zoom_in_durations))),
                    self.__class__.MAX:
                    int(
                        np.max(
                            np.array(
                                self._alignment_stats_zoom_in_durations))),
                    self.__class__.MEAN:
                    int(
                        np.mean(
                            np.array(
                                self._alignment_stats_zoom_in_durations))),
                },
            },
            self.__class__.ZOOM_OUT: {
                self.__class__.DELAY: {
                    self.__class__.MIN:
                    int(np.min(np.array(
                        self._alignment_stats_zoom_out_delays))),
                    self.__class__.MAX:
                    int(np.max(np.array(
                        self._alignment_stats_zoom_out_delays))),
                    self.__class__.MEAN:
                    int(
                        np.mean(np.array(
                            self._alignment_stats_zoom_out_delays))),
                },
                self.__class__.DURATION: {
                    self.__class__.MIN:
                    int(
                        np.min(
                            np.array(
                                self._alignment_stats_zoom_out_durations))),
                    self.__class__.MAX:
                    int(
                        np.max(
                            np.array(
                                self._alignment_stats_zoom_out_durations))),
                    self.__class__.MEAN:
                    int(
                        np.mean(
                            np.array(
                                self._alignment_stats_zoom_out_durations))),
                },
            },
        }

    def _get_results_object(self):
        r = {}
        r[self.__class__.AGGREGATE_RESULTS] = self._get_aggregate_data()
        r[self.__class__.INDIVIDUAL_RESULTS] = self._alignment_results
        return r

    def save_results(self):
        self._calibration_data_filer.save_as_yml(
            self._get_results_object(),
            self._session_dir,
            CalibrationDataFiler.FOV_TIMEBASE_ALIGNMENT,
        )

    def _scrubber_callback(self, selection: list):
        delay_fov_to_video = \
            int(selection[0].get_time_ms() - self._current_transition[self.__class__.TRANSITION_TIME])

        duration = self._get_duration_ms(selection[0], selection[1])

        r = {
            self.__class__.VIDEO_DATA: {
                self.__class__.BEGIN: self._get_ifv_info(selection[0]),
                self.__class__.END: self._get_ifv_info(selection[1]),
                self.__class__.DURATION: {
                    self.__class__.TIME_MS:
                    duration,
                    self.__class__.FRAMES:
                    self._get_duration_frames(selection[0], selection[1]),
                },
            },
            self.__class__.FOV_DATA: {
                self.__class__.TRANSITION_INDEX:
                int(self._current_transition[self.__class__.TRANSITION_INDEX]),
                self.__class__.TRANSITION_TIME:
                int(self._current_transition[self.__class__.TRANSITION_TIME]),
            },
            self.__class__.ALIGNMENT: {
                self.__class__.DELAY_FOV_TO_VIDEO: delay_fov_to_video,
            },
        }
        self._alignment_results[self._get_name_for_transition(
            self._current_transition)] = r
        self._alignment_stats_delays.append(delay_fov_to_video)
        self._alignment_stats_durations.append(duration)

        if self._get_zoom_type_for_transition(
                self._current_transition) == self.__class__.ZOOM_IN:
            self._alignment_stats_zoom_in_delays.append(delay_fov_to_video)
            self._alignment_stats_zoom_in_durations.append(duration)
        else:
            self._alignment_stats_zoom_out_delays.append(delay_fov_to_video)
            self._alignment_stats_zoom_out_durations.append(duration)

    def _get_ifv_info(self, ivf):
        return {
            self.__class__.FRAME_NUM: ivf.get_frame_num(),
            self.__class__.TIME_MS: ivf.get_time_ms(),
        }

    def _get_duration_ms(self, begin_ifv: ImageFromVideo,
                         end_ifv: ImageFromVideo):
        return end_ifv.get_time_ms() - begin_ifv.get_time_ms()

    def _get_duration_frames(self, begin_ifv: ImageFromVideo,
                             end_ifv: ImageFromVideo):
        return end_ifv.get_frame_num() - begin_ifv.get_frame_num()

    def _get_name_for_transition(self, transition):
        return (f'{self._get_zoom_type_for_transition(transition)}: '
                f'{str(transition[self.__class__.TRANSITION_LEVELS])}')

    def _get_zoom_type_for_transition(self, transition: dict):
        if transition[self.__class__.TRANSITION_LEVELS][0] < transition[
                self.__class__.TRANSITION_LEVELS][1]:
            return self.__class__.ZOOM_OUT
        else:
            return self.__class__.ZOOM_IN

    def run(self):
        self.show_scrub_picker_for_transitions()
        self.save_results()
class PanMotorTimeBaseAligner:
    '''Determines with user assistance via scrubber picker statistics for offset in
       time between pan_motor_read timebase and video time'''

    RAW_RESULTS = 'raw_results'
    AGGREGATE_RESULTS = 'aggregate_results'

    MIN_OR_MAX = 'min_or_max'
    MIN = 'min'
    MAX = 'max'
    MEAN = 'mean'
    N = 'n'
    DATA = 'data'

    DELAY_MOTOR_TO_VIDEO = 'delay_motor_to_video'
    BASE_TIME = 'base_time'
    MOTOR_IDX = 'motor_idx'

    PAN_LOCAL_MINIMA = 'pan_local_minima'
    PAN_LOCAL_MAXIMA = 'pan_local_maxima'
    PAN_COMBINED = 'pan_combined'

    def __init__(
            self,
            session_dir: str,
            input_data_head_path=Path('/Volumes/WD'),
            results_head_path=Path('.') / 'data/calibration_data',
            cache_dir_path=Path('/Volumes/WD/image_cache'),
            num_points=1,
    ):
        self._session_dir = session_dir
        self._input_data_head_path = input_data_head_path
        self._results_head_path = results_head_path
        self._cache_dir_path = cache_dir_path
        self._num_points = num_points

        # Helpers
        self._ldfh = LDFH(self._input_data_head_path)
        self._calibration_data_filer = CalibrationDataFiler(
            self._results_head_path)

        self.maxima_text_color = 'red'
        self.minima_text_color = 'green'
        self.fontsize = 8

        # Lazy initializers
        self._pan_motor_read_data_series = None
        self._base_time_series = None
        self._motor_maxima = None
        self._motor_minima = None
        self._motor_maxima_from_series = None
        self._motor_minima_from_series = None
        self._motor_maxima_from_archive = None
        self._motor_minima_from_archive = None
        self._archived_results = None
        self._selected_frames_from_archive = None

        # State
        self._base_time = None
        self._motor_idx = None
        self._min_or_max = None
        self._alignment_results = []

    def visualize_motor_local_maxima_and_minima(self):
        fig, axis = plt.subplots()  # pylint: disable=W0612
        axis.plot(self.get_normalized_base_time(), self.get_motor_data())

        # maxima_shoulder_steepness = [self.get_motor_data().get_shoulder_steepness_around_idx(maximum, 4)
        #                              for maximum in self.get_motor_maxima()]

        for idx, val in enumerate(self._get_motor_values_at_maxima()):
            axis.text(
                self.get_normalized_base_time()[self.get_motor_maxima()[idx]],
                val,
                str(idx),
                color=self.maxima_text_color,
                fontsize=self.fontsize,
            )

        for idx, val in enumerate(self._get_motor_values_at_minima()):
            axis.text(
                self.get_normalized_base_time()[self.get_motor_minima()[idx]],
                val,
                str(idx),
                color=self.minima_text_color,
                fontsize=self.fontsize,
            )

        plt.show()

    def get_motor_maxima(self):
        if self._motor_maxima is None:
            if self._get_motor_maxima_from_archived_results() is not None \
                        and self._get_motor_maxima_from_archived_results().size == self._num_points:
                self._motor_maxima = self._get_motor_maxima_from_archived_results(
                )
            else:
                self._motor_maxima = self._get_motor_maxima_from_series()
        return self._motor_maxima

    def _get_motor_maxima_from_series(self):
        if self._motor_maxima_from_series is None:
            self._motor_maxima_from_series = self.get_motor_data().\
                                           local_maxima_sorted_by_peakyness_and_monotonic_with_steep_shoulders(8, 4, 2) # pylint: disable=C0301
        return self._motor_maxima_from_series[:self._num_points]

    def _get_motor_values_at_maxima(self):
        return np.array(
            [self.get_motor_data()[idx] for idx in self.get_motor_maxima()])

    def _get_time_at_maxima(self):
        return [(maximum, self.get_normalized_base_time()[maximum])
                for maximum in self.get_motor_maxima()]

    def get_motor_minima(self):
        if self._motor_minima is None:
            if self._get_motor_minima_from_archived_results() is not None and \
               self._get_motor_minima_from_archived_results().size == self._num_points:
                self._motor_minima = self._get_motor_minima_from_archived_results(
                )
            else:
                self._motor_minima = self._get_motor_minima_from_series()
        return self._motor_minima

    def _get_motor_minima_from_series(self):
        if self._motor_minima_from_series is None:
            self._motor_minima_from_series = self.get_motor_data().\
                                           local_minima_sorted_by_peakyness_and_monotonic_with_steep_shoulders(8, 4, 2) # pylint: disable=C0301
        return self._motor_minima_from_series[:self._num_points]

    def _get_motor_values_at_minima(self):
        return np.array(
            [self.get_motor_data()[idx] for idx in self.get_motor_minima()])

    def _get_time_at_minima(self):
        return [(minimum, self.get_normalized_base_time()[minimum])
                for minimum in self.get_motor_minima()]

    def show_scrub_picker_for_maxima(self):
        self._min_or_max = self.__class__.MAX
        for idx, time in self._get_time_at_maxima():
            self._motor_idx = idx
            self._base_time = time
            self.show_scrub_picker_for_time(idx, time)

    def show_scrub_picker_for_minima(self):
        self._min_or_max = self.__class__.MIN
        for idx, time in self._get_time_at_minima():
            self._motor_idx = idx
            self._base_time = time
            self.show_scrub_picker_for_time(idx, time)

    def show_scrub_picker_for_min_and_max(self):
        self.show_scrub_picker_for_minima()
        self.show_scrub_picker_for_maxima()

    def show_scrub_picker_for_time(self, motor_idx, time):
        ScrubPicker(images_from_video=self.get_images_around_time(time),
                    selector_type=ScrubPicker.SELECT_SINGLE_IMAGE,
                    callback=self._scrubber_callback,
                    selected_start_frame_num=self.
                    _get_selected_frame_num_for_motor_idx(motor_idx)).run()

    def get_images_around_time(self, time):
        return self.get_ifv_grabber().get_images_around_time_ms(time,
                                                                before=40,
                                                                after=40)

    def get_motor_data(self) -> np.ndarray:
        if self._pan_motor_read_data_series is None:
            self._pan_motor_read_data_series = \
                self._ldfh.get_field_from_npz_file(self._session_dir,
                                                   LDFH.BASE_NPZ_FILE,
                                                   LDFH.PAN_MOTOR_READ_FIELD,
                                                  )
        return self._pan_motor_read_data_series

    def get_base_time(self):
        if self._base_time_series is None:
            self._base_time_series = self._ldfh.get_field_from_npz_file(
                self._session_dir,
                LDFH.BASE_NPZ_FILE,
                LDFH.BASE_TIME_FIELD,
            )
        return self._base_time_series

    def get_normalized_base_time(self):
        return self.get_base_time() - self.get_base_time()[0]

    def get_motor_data_time_tuples(self):
        return np.dstack(
            (self.get_normalized_base_time(), self.get_motor_data()))[0]

    def get_video_path(self):
        return self._ldfh.get_video_path(self._session_dir)

    def get_ifv_grabber(self) -> ImageFromVideoGrabber:
        return ImageFromVideoGrabber(self.get_video_path(),
                                     cache_dir_path=self._cache_dir_path)

    def _scrubber_callback(self, images_from_video: List[ImageFromVideo]):
        ifv = images_from_video[0]
        r = ifv.get_as_dict()
        r[self.__class__.MIN_OR_MAX] = self._min_or_max
        r[self.__class__.MOTOR_IDX] = int(self._motor_idx)
        r[self.__class__.BASE_TIME] = int(self._base_time)
        r[self.__class__.DELAY_MOTOR_TO_VIDEO] = ifv.get_time_ms() - int(
            self._base_time)
        self._alignment_results.append(r)

    def _get_aggregate_data(self):
        r = {}
        local_min_data = np.array([])
        local_max_data = np.array([])
        for result in self._alignment_results:
            if result[self.__class__.MIN_OR_MAX] == self.__class__.MIN:
                local_min_data = np.append(
                    local_min_data,
                    result[self.__class__.DELAY_MOTOR_TO_VIDEO])
            else:
                local_max_data = np.append(
                    local_max_data,
                    result[self.__class__.DELAY_MOTOR_TO_VIDEO])
        combined_data = np.concatenate((local_min_data, local_max_data))

        r[self.__class__.PAN_LOCAL_MINIMA] = self._get_stats_from_data(
            local_min_data)
        r[self.__class__.PAN_LOCAL_MAXIMA] = self._get_stats_from_data(
            local_max_data)
        r[self.__class__.PAN_COMBINED] = self._get_stats_from_data(
            combined_data)
        return r

    def _get_stats_from_data(self, data: np.ndarray):
        r = {}
        r[self.__class__.N] = int(data.size)
        r[self.__class__.DATA] = data.tolist()
        r[self.__class__.MIN] = int(np.min(data))
        r[self.__class__.MAX] = int(np.max(data))
        r[self.__class__.MEAN] = int(np.mean(data))
        return r

    def _get_results_object(self):
        r = {}
        r[self.__class__.RAW_RESULTS] = self._alignment_results
        r[self.__class__.AGGREGATE_RESULTS] = self._get_aggregate_data()
        return r

    def save_results(self):
        self._calibration_data_filer.save_as_yml(
            self._get_results_object(),
            self._session_dir,
            CalibrationDataFiler.PAN_MOTOR_TIMEBASE_ALIGNMENT,
        )

    def _get_archived_results(self):
        if self._archived_results is None:
            self._archived_results = self._calibration_data_filer.load(
                self._session_dir,
                CalibrationDataFiler.PAN_MOTOR_TIMEBASE_ALIGNMENT,  # pylint: disable=C0301
            )
        return self._archived_results

    def _get_motor_idxs_from_archived_results(self, min_or_max: str):
        if self._get_archived_results() is None:
            return None

        results = self._get_archived_results()[self.__class__.RAW_RESULTS]
        return [
            result[self.__class__.MOTOR_IDX] for result in results
            if result[self.__class__.MIN_OR_MAX] == min_or_max
        ]

    def _get_motor_maxima_from_archived_results(self):
        if self._motor_maxima_from_archive is None:
            self._motor_maxima_from_archive = self._get_motor_idxs_from_archived_results(
                self.__class__.MAX)
            if self._motor_maxima_from_archive is not None:
                self._motor_maxima_from_archive = np.array(
                    self._motor_maxima_from_archive)[:self._num_points]
        return self._motor_maxima_from_archive

    def _get_motor_minima_from_archived_results(self):
        if self._motor_minima_from_archive is None:
            self._motor_minima_from_archive = self._get_motor_idxs_from_archived_results(
                self.__class__.MIN)
            if self._motor_minima_from_archive is not None:
                self._motor_minima_from_archive = np.array(
                    self._motor_minima_from_archive)[:self._num_points]
        return self._motor_minima_from_archive

    def _get_selected_frames_from_archived_results(self):
        if self._selected_frames_from_archive is None:
            self._selected_frames_from_archive = {}
            if self._get_archived_results() is not None:
                results = self._get_archived_results()[
                    self.__class__.RAW_RESULTS]
                for result in results:
                    self._selected_frames_from_archive[result[self.__class__.MOTOR_IDX]] = \
                            result[ImageFromVideo.FRAME_NUM]
        return self._selected_frames_from_archive

    def _get_selected_frame_num_for_motor_idx(self, motor_idx):
        if motor_idx not in self._get_selected_frames_from_archived_results():
            return None
        else:
            return self._get_selected_frames_from_archived_results()[motor_idx]

    def run(self):
        self.show_scrub_picker_for_min_and_max()
        self.save_results()
class TagGpsTimebaseAligner:
    '''Determines with user assistance statistics for offset in
       time between tag_gps timebase and video time'''

    IFV = 'ifv'
    TAG_IDX = 'tag_idx'
    TAG_TIME = 'tag_time'

    AGGREGATE_RESULTS = 'aggregate_results'
    ALIGNMENT_STATS = 'alignment_stats'
    DELAY_VIDEO_TO_TAG_GPS = 'delay_video_to_tag_gps'

    MAX = 'max'
    MEAN = 'mean'
    MIN = 'min'
    NUM = 'num'

    INDIVIDUAL_RESULTS = 'individual_results'

    TAG_DATA = 'tag_data'
    TAG_IDX = 'tag_idx'
    TAG_TIME = 'tag_time'

    VIDEO_DATA = 'video_data'
    FRAME_NUM = 'frame_num'
    TIME_MS = 'time_ms'
    VIDEO_ID = 'video_id'
    VIDEO_URL = 'video_url'

    def __init__(self,
                 session_dir: str,
                 input_data_head_path=Path('/Volumes/WD'),
                 results_head_path=Path('.')  / 'data/calibration_data',
                 cache_dir_path=Path('/Volumes/WD/image_cache'),
                ):
        self._session_dir = session_dir
        self._input_data_head_path = input_data_head_path
        self._results_head_path = results_head_path
        self._cache_dir_path = cache_dir_path

        # helpers
        self._ldfh = LDFH(self._input_data_head_path)
        self._calibration_data_filer = CalibrationDataFiler(self._results_head_path)

        # state
        self._alignment_data = []

        # lazy inits
        self._latitude_series = None
        self._longitude_series = None
        self._time_series = None

    def _display_ui(self):
        geo_map_scrubber = GeoMapScrubber(
            latitude_series=self._get_latitude_series(),
            longitude_series=self._get_longitude_series(),
            time_series=self._get_time_series(),
        )
        TagGpsTimebaseAlignerUi(
            geo_map_scrubber=geo_map_scrubber,
            video_path=self._get_video_path(),
            save_alignment_callback=self._save_alignment_callback,
            done_callback=self._done_callback,
        ).run()

    def _save_alignment_callback(self, image_from_video, tag_idx, tag_time):
        self._alignment_data.append(
            {
                self.__class__.IFV: image_from_video,
                self.__class__.TAG_IDX: tag_idx,
                self.__class__.TAG_TIME: tag_time,
            }
        )
        print('Saved alignment delay: ', self._get_delay_video_to_tag(self._alignment_data[-1]))

    def _done_callback(self):
        self._calibration_data_filer.save_as_yml(
            obj=self._get_stats(),
            data_dir_name=self._session_dir,
            calibration_file=CalibrationDataFiler.TAG_GPS_TIMEBASE_ALIGNMENT,
        )

    def _get_stats(self):
        return {
            self.__class__.AGGREGATE_RESULTS: self._get_aggregate_stats(),
            self.__class__.INDIVIDUAL_RESULTS: self._get_individual_stats_list(),
        }

    def _get_aggregate_stats(self):
        delays = [stat[self.__class__.DELAY_VIDEO_TO_TAG_GPS]
                  for stat in self._get_individual_stats_list()]
        delays = np.array(delays)
        return {
            self.__class__.DELAY_VIDEO_TO_TAG_GPS:
                {
                    self.__class__.MEAN: int(round(np.mean(delays))),
                    self.__class__.MAX: int(round(np.max(delays))),
                    self.__class__.MIN: int(round(np.min(delays))),
                }
        }

    def _get_individual_stats_list(self):
        r = []
        for datum in self._alignment_data:
            r.append(
                {
                    self.__class__.TAG_DATA:
                        {
                            self.__class__.TAG_IDX: int(datum[self.__class__.TAG_IDX]),
                            self.__class__.TAG_TIME: int(datum[self.__class__.TAG_TIME]),
                        },
                    self.__class__.VIDEO_DATA: datum[self.__class__.IFV].get_as_dict(),
                    self.__class__.DELAY_VIDEO_TO_TAG_GPS: self._get_delay_video_to_tag(datum),
                }
            )
        return r

    def _get_delay_video_to_tag(self, datum):
        video_time = datum[self.__class__.IFV].get_time_ms()
        return int(datum[self.__class__.TAG_TIME] - video_time)

    def _get_latitude_series(self):
        if self._latitude_series is None:
            self._latitude_series =\
                self._ldfh.get_field_from_npz_file(self._session_dir,
                                                   LDFH.TAG_NPZ_FILE,
                                                   LDFH.TAG_LATITUDE_FIELD,
                                                  )
        return self._latitude_series

    def _get_longitude_series(self):
        if self._longitude_series is None:
            self._longitude_series =\
                self._ldfh.get_field_from_npz_file(self._session_dir,
                                                   LDFH.TAG_NPZ_FILE,
                                                   LDFH.TAG_LONGITUDE_FIELD,
                                                  )
        return self._longitude_series

    def _get_time_series(self):
        if self._time_series is None:
            self._time_series =\
                self._ldfh.get_field_from_npz_file(self._session_dir,
                                                   LDFH.TAG_NPZ_FILE,
                                                   LDFH.TAG_TIME_FIELD,
                                                  )
        return self._time_series

    def _get_video_path(self):
        return self._ldfh.get_video_path(self._session_dir)
Example #10
0
 def setUp(self):
     self.cdf = CalibrationDataFiler(self.__class__.TOP_LEVEL_PATH)
     self.remove_data_dir_and_top_level_dir()