Пример #1
0
 def test_label_from_path(self):
     # Test file path
     session_path = self.one.path_from_eid(self.eid)
     video_path = session_path / 'raw_video_data' / '_iblrig_bodyCamera.raw.mp4'
     label = video.label_from_path(video_path)
     self.assertEqual('body', label)
     # Test URL
     label = video.label_from_path(self.url)
     self.assertEqual('left', label)
     # Test file name
     label = video.label_from_path('_iblrig_rightCamera.raw.mp4')
     self.assertEqual('right', label)
     # Test wrong file
     label = video.label_from_path('_iblrig_taskSettings.raw.json')
     self.assertIsNone(label)
Пример #2
0
    def test_video_checks(self, display=False):
        # A tuple of QC checks and the expected outcome for each 10 second video
        video_checks = (
            (self.qc.check_position, (1, 2, 3, 3, 3, 3, 3, 1, 1, 3, 1, 3, 3, 3, 3, 3, 1, 3)),
            (self.qc.check_focus, (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 1, 1, 3, 1, 1)),
            (self.qc.check_brightness, (1, 1, 1, 1, 1, 3, 3, 3, 1, 3, 3, 3, 1, 1, 1, 3, 1, 1)),
            (self.qc.check_file_headers, [1] * 18),
            (self.qc.check_resolution, (1, 1, 1, 1, 1, 3, 3, 1, 1, 3, 1, 3, 1, 1, 1, 1, 1, 3))
        )

        # For each check get the outcome and determine whether it matches our expected outcome
        # for each video
        for (check, expected) in video_checks:
            check_name = check.__name__
            outcomes = []
            frame_samples = []
            for path, data in self.data.items():
                self.qc.data = data
                self.qc.label = vidio.label_from_path(path)
                outcomes.append(check())
                frame_samples.append(data.frame_samples[0])

            # If display if True, plot 1 frame per video along with its outcome
            # This is purely for manual inspection
            if display:  # Check outcomes look reasonable by eye
                fig, axes = plt.subplots(int(len(self.data) / 4), 4)
                [self.qc.imshow(frm, ax=ax, title=o)
                 for frm, ax, o in zip(frame_samples, axes.flatten(), outcomes)]
                fig.suptitle(check_name)
                plt.show()

            # Verify the outcome for each video matches what we expect
            [self.assertTrue(x == CRITERIA[y], f'Unexpected outcome for {check_name} video {i}')
             for i, (x, y) in enumerate(zip(expected, outcomes))]
Пример #3
0
    def _run(self, **kwargs):
        # avi to mp4 compression
        command = (
            'ffmpeg -i {file_in} -y -nostdin -codec:v libx264 -preset slow -crf 17 '
            '-loglevel 0 -codec:a copy {file_out}')
        output_files = ffmpeg.iblrig_video_compression(self.session_path,
                                                       command)

        if len(output_files) == 0:
            _logger.info(
                'No compressed videos found; skipping timestamp extraction')
            return

        labels = [label_from_path(x) for x in output_files]
        # Video timestamps extraction
        data, files = camera.extract_all(self.session_path,
                                         save=True,
                                         labels=labels)
        output_files.extend(files)

        # Video QC
        run_camera_qc(self.session_path,
                      update=True,
                      one=self.one,
                      cameras=labels)

        return output_files
Пример #4
0
 def load_data(self, download=False):
     """
     Load wheel, trial and camera timestamp data
     :return: wheel, trials
     """
     if download:
         self.data.wheel = self.one.load_object(self.eid, 'wheel')
         self.data.trials = self.one.load_object(self.eid, 'trials')
         cam = self.one.load(self.eid, ['camera.times'], dclass_output=True)
         self.data.camera_times = {
             vidio.label_from_path(url): ts
             for ts, url in zip(cam.data, cam.url)
         }
     else:
         alf_path = self.session_path / 'alf'
         self.data.wheel = alfio.load_object(alf_path, 'wheel')
         self.data.trials = alfio.load_object(alf_path, 'trials')
         self.data.camera_times = {
             vidio.label_from_path(x): alfio.load_file_content(x)
             for x in alf_path.glob('*Camera.times*')
         }
     assert all(x is not None for x in self.data.values())
Пример #5
0
 def __init__(self,
              eid=None,
              one=None,
              log=logging.getLogger('ibllib'),
              **kwargs):
     self.one = one or ONE()
     self.eid = eid
     self.session_path = kwargs.pop('session_path',
                                    None) or self.one.eid2path(eid)
     self.ref = self.one.dict2ref(self.one.path2ref(self.session_path))
     self.log = log
     self.trials = self.wheel = self.camera_times = None
     raw_cam_path = self.session_path.joinpath('raw_video_data')
     camera_path = list(raw_cam_path.glob('_iblrig_*Camera.raw.*'))
     self.video_paths = {vidio.label_from_path(x): x for x in camera_path}
     self.data = Bunch()
     self.alignment = Bunch()
Пример #6
0
 def __init__(self,
              eid,
              one=None,
              log=logging.getLogger('ibllib'),
              **kwargs):
     self.one = one or ONE()
     self.eid = eid
     self.session_path = kwargs.pop('session_path',
                                    self.one.path_from_eid(eid))
     if self.one and not isinstance(self.one, OneOffline):
         self.ref = eid2ref(self.eid, as_dict=False, one=self.one)
     else:
         self.ref = None
     self.log = log
     self.trials = self.wheel = self.camera_times = None
     raw_cam_path = self.session_path.joinpath('raw_video_data')
     camera_path = list(raw_cam_path.glob('_iblrig_*Camera.raw.*'))
     self.video_paths = {vidio.label_from_path(x): x for x in camera_path}
     self.data = Bunch()
     self.alignment = Bunch()
Пример #7
0
 def setUpClass(cls) -> None:
     """Load a few 10 second videos for testing the various video QC checks"""
     data_path = base.IntegrationTest.default_data_root()
     video_path = data_path.joinpath('camera')
     videos = sorted(video_path.rglob('*.mp4'))
     # Instantiate using session with a video path to fool constructor.
     # To remove once we use ONE cache file
     one = ONE(base_url='https://test.alyx.internationalbrainlab.org',
               username='******',
               password='******')
     dummy_id = 'd3372b15-f696-4279-9be5-98f15783b5bb'
     qc = CameraQC(dummy_id, 'left',
                   n_samples=10, stream=False, download_data=False, one=one)
     qc.one = None
     qc._type = 'ephys'  # All videos come from ephys sessions
     qcs = OrderedDict()
     for video in videos:
         qc.video_path = video
         qc.label = vidio.label_from_path(video)
         qc.n_samples = 10
         qc.load_video_data()
         qcs[video] = qc.data.copy()
     cls.qc = qc
     cls.data = qcs
Пример #8
0
"""
import numpy as np

import ibllib.io.video as vidio
from oneibl.one import ONE

one = ONE(silent=True)
eid = 'edd22318-216c-44ff-bc24-49ce8be78374'  # 2020-08-19_1_CSH_ZAD_019

# Example 1: get the remote video URL from eid
urls = vidio.url_from_eid(eid, one=one)
# Without the `label` kwarg, returns a dictionary of camera URLs
url = urls['left']  # URL for the left camera

# Example 2: get the video label from a video file path or URL
label = vidio.label_from_path(url)
print(f'Using URL for the {label} camera')

# Example 3: loading a single frame
frame_n = 1000  # Frame number to fetch.  Indexing starts from 0.
frame = vidio.get_video_frame(url, frame_n)
assert frame is not None, 'failed to load frame'

# Example 4: loading multiple frames
"""
The preload function will by default pre-allocate the memory before loading the frames, 
and will return the frames as a numpy array of the shape (l, h, w, 3), where l = the number of 
frame indices given.  The indices must be an iterable of positive integers.  Because the videos 
are in black and white the values of each color channel are identical.   Therefore to save on 
memory you can provide a slice that returns only one of the three channels for each frame.  The 
resulting shape will be (l, h, w).  NB: Any slice or boolean array may be provided which is