예제 #1
0
    def test_get_video_frames_preload(self):
        n = range(100, 103)  # Frame numbers to fetch

        # Test loading sequential frames without slice
        frames = vidio.get_video_frames_preload(self.video_path, n)
        expected_shape = (len(n), 1024, 1280, 3)
        self.assertEqual(frames.shape, expected_shape)
        self.assertEqual(frames.dtype, np.dtype(np.uint8))

        # Test loading frames with slice
        expected = np.array([[173, 133, 173, 216, 0], [182, 133, 22, 241, 19],
                             [170, 152, 97, 48, 25]],
                            dtype=np.uint8)
        frames = vidio.get_video_frames_preload(self.video_path,
                                                n,
                                                mask=np.s_[0, :5, 0])
        self.assertTrue(np.all(frames == expected))
        expected_shape = (len(n), 5)
        self.assertEqual(frames.shape, expected_shape)

        # Test loading frames as list
        frames = vidio.get_video_frames_preload(self.video_path,
                                                n,
                                                as_list=True)
        self.assertIsInstance(frames, list)
        self.assertEqual(frames[0].shape, (1024, 1280, 3))
        self.assertEqual(frames[0].dtype, np.dtype(np.uint8))
        self.assertEqual(len(frames), 3)

        # Test applying function
        frames = vidio.get_video_frames_preload(
            self.video_path, n, func=lambda x: np.mean(x, axis=2))
        expected_shape = (len(n), 1024, 1280)
        self.assertEqual(frames.shape, expected_shape)
예제 #2
0
 def test_get_video_frames_preload_perf(self):
     # Fetch x frames every 100 frames for y hundred frames total
     x = 5
     y = 3
     n = np.tile(np.arange(x * 10), (y, 1))
     n += np.arange(1, y * 100, 100).reshape(y, -1) - 1
     # Test loading sequential frames without slice
     t0 = time.time()
     vidio.get_video_frames_preload(self.video_path, n.flatten())
     elapsed = time.time() - t0
     self.log.info(
         f'fetching {n.size} frames with {y - 1} incontiguities took {elapsed:.2f}s'
     )
예제 #3
0
def _save_qc_frames(qc, **kwargs):
    """
    Given a QC object, save the frames required for wheel alignment, etc.
    This may then be used as a test fixture.
    :param qc:
    :return:
    """
    if all(x is None for x in qc.data.values()):
        # Get wheel period for alignment frame indices
        qc.load_data(load_video=False, **kwargs)
    length = camio.get_video_length(qc.video_path)
    indices = np.linspace(100, length - 100, qc.n_samples).astype(int)
    frame_ids = np.insert(indices, 0, 0)  # First read is not saved and may be
    # re-read
    wheel_present = camQC.data_for_keys(('position', 'timestamps', 'period'), qc.data['wheel'])
    if wheel_present and qc.label != 'body':
        a, b = qc.data.wheel.period
        mask = np.logical_and(qc.data.timestamps >= a, qc.data.timestamps <= b)
        wheel_align_frames, = np.where(mask)
        # Again, the first read is not saved and may be re-read, so repeat the first index
        wheel_align_frames = np.insert(wheel_align_frames, 0, wheel_align_frames[0])
        frame_ids = np.r_[frame_ids, wheel_align_frames]

    # load and save the frames to file
    frames = vidio.get_video_frames_preload(qc.video_path, frame_ids)
    file = base.IntegrationTest.default_data_root() / 'camera' / (qc.eid + '_frame_samples.npy')
    if not file.parent.exists():
        file.parent.mkdir()
    np.save(file, frames)
    assert file.exists()
예제 #4
0
 def load_video_data(self):
     # Get basic properties of video
     try:
         self.data['video'] = get_video_meta(self.video_path, one=self.one)
         # Sample some frames from the video file
         indices = np.linspace(100, self.data['video'].length - 100,
                               self.n_samples).astype(int)
         self.frame_samples_idx = indices
         self.data['frame_samples'] = get_video_frames_preload(
             self.video_path, indices, mask=np.s_[:, :, 0])
     except AssertionError:
         _log.error(
             'Failed to read video file; setting outcome to CRITICAL')
         self._outcome = 'CRITICAL'
예제 #5
0
def get_example_images(eid):

    eids = get_repeated_sites()
    #    eid = eids[23]
    #    video_type = 'body'

    #eids = ['15f742e1-1043-45c9-9504-f1e8a53c1744']
    eids = ['4a45c8ba-db6f-4f11-9403-56e06a33dfa4']
    frts = {'body': 30, 'left': 60, 'right': 150}

    one = ONE()

    #for eid in eids:
    for video_type in frts:

        frame_idx = [20 * 60 * frts[video_type]]
        try:

            r = one.list(eid, 'dataset_types')
            recs = [
                x for x in r if f'{video_type}Camera.raw.mp4' in x['name']
            ][0]['file_records']
            video_path = [
                x['data_url'] for x in recs if x['data_url'] is not None
            ][0]

            frames = get_video_frames_preload(video_path,
                                              frame_idx,
                                              mask=np.s_[:, :, 0])
            np.save(
                '/home/mic/reproducible_dlc/example_images/'
                f'{eid}_{video_type}.npy', frames)
            print(eid, video_type, 'done')
        except:
            print(eid, video_type, 'error')
            continue
예제 #6
0
frame = vidio.get_video_frame(url, frame_n)
assert frame is not None, 'failed to load frame'

# Example 4: loading multiple frames
"""
The preload function will by default pre-allocate the memory before loading the frames, 
and will return the frames as a numpy array of the shape (l, h, w, 3), where l = the number of 
frame indices given.  The indices must be an iterable of positive integers.  Because the videos 
are in black and white the values of each color channel are identical.   Therefore to save on 
memory you can provide a slice that returns only one of the three channels for each frame.  The 
resulting shape will be (l, h, w).  NB: Any slice or boolean array may be provided which is 
useful for cropping to an ROI.

If you don't need to apply operations over all the fetched frames you can use the `as_list` 
kwarg to return the frames as a list.  This is slightly faster than fetching as an ndarray.

A warning is printed if fetching a frame fails.  The affected frames will be returned as zeros 
or None if `as_list` is True.
"""
frames = vidio.get_video_frames_preload(url, range(10), mask=np.s_[:, :, 0])

# Example 5: load video meta-data
"""
You can load all the information for a given video.  In order to load the video size from a URL 
an instance of ONE must be provided, otherwise this entry will be blank. An Bunch is returned 
with a number of fields.
"""
meta = vidio.get_video_meta(url, one=one)
for k, v in meta.items():
    print(f'The video {k} = {v}')
def stream_save_labeled_frames(eid, video_type):

    startTime = time.time()
    '''
    For a given eid and camera type, stream
    sample frames, print DLC labels on them
    and save
    '''

    # eid = '5522ac4b-0e41-4c53-836a-aaa17e82b9eb'
    # video_type = 'left'

    n_frames = 5  # sample 5 random frames

    save_images_folder = '/home/mic/DLC_QC/example_frames/'
    one = ONE()
    info = '_'.join(
        np.array(str(one.path_from_eid(eid)).split('/'))[[5, 7, 8]])
    print(info, video_type)

    r = one.list(eid, 'dataset_types')

    dtypes_DLC = [
        '_ibl_rightCamera.times.npy', '_ibl_leftCamera.times.npy',
        '_ibl_bodyCamera.times.npy', '_iblrig_leftCamera.raw.mp4',
        '_iblrig_rightCamera.raw.mp4', '_iblrig_bodyCamera.raw.mp4',
        '_ibl_leftCamera.dlc.pqt', '_ibl_rightCamera.dlc.pqt',
        '_ibl_bodyCamera.dlc.pqt'
    ]

    dtype_names = [x['name'] for x in r]

    assert all([i in dtype_names
                for i in dtypes_DLC]), 'For this eid, not all data available'

    D = one.load(eid,
                 dataset_types=['camera.times', 'camera.dlc'],
                 dclass_output=True)
    alf_path = Path(D.local_path[0]).parent.parent / 'alf'

    cam0 = alf.io.load_object(alf_path,
                              '%sCamera' % video_type,
                              namespace='ibl')

    Times = cam0['times']

    cam = cam0['dlc']
    points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()])

    XYs = {}
    for point in points:
        x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_x'])
        x = x.filled(np.nan)
        y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9,
                               cam[point + '_y'])
        y = y.filled(np.nan)
        XYs[point] = np.array([x, y])

    if video_type != 'body':
        d = list(points)
        d.remove('tube_top')
        d.remove('tube_bottom')
        points = np.array(d)

    # stream frames
    recs = [x for x in r
            if f'{video_type}Camera.raw.mp4' in x['name']][0]['file_records']
    video_path = [x['data_url'] for x in recs if x['data_url'] is not None][0]
    vid_meta = get_video_meta(video_path)

    frame_idx = sample(range(vid_meta['length']), n_frames)
    print('frame indices:', frame_idx)
    frames = get_video_frames_preload(video_path,
                                      frame_idx,
                                      mask=np.s_[:, :, 0])
    size = [vid_meta['width'], vid_meta['height']]
    #return XYs, frames

    x0 = 0
    x1 = size[0]
    y0 = 0
    y1 = size[1]
    if video_type == 'left':
        dot_s = 10  # [px] for painting DLC dots
    else:
        dot_s = 5

    # writing stuff on frames
    font = cv2.FONT_HERSHEY_SIMPLEX

    if video_type == 'left':
        bottomLeftCornerOfText = (20, 1000)
        fontScale = 4
    else:
        bottomLeftCornerOfText = (10, 500)
        fontScale = 2

    lineType = 2

    # assign a color to each DLC point (now: all points red)
    cmap = matplotlib.cm.get_cmap('Set1')
    CR = np.arange(len(points)) / len(points)

    block = np.ones((2 * dot_s, 2 * dot_s, 3))

    k = 0
    for frame in frames:

        gray = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)

        # print session info
        fontColor = (255, 255, 255)
        cv2.putText(gray, info, bottomLeftCornerOfText, font, fontScale / 4,
                    fontColor, lineType)

        # print time
        Time = round(Times[frame_idx[k]], 3)
        a, b = bottomLeftCornerOfText
        bottomLeftCornerOfText0 = (int(a * 10 + b / 2), b)

        a, b = bottomLeftCornerOfText
        bottomLeftCornerOfText0 = (int(a * 10 + b / 2), b)
        cv2.putText(gray, '  time: ' + str(Time), bottomLeftCornerOfText0,
                    font, fontScale / 2, fontColor, lineType)

        # print DLC dots
        ll = 0
        for point in points:

            # Put point color legend
            fontColor = (np.array([cmap(CR[ll])]) * 255)[0][:3]
            a, b = bottomLeftCornerOfText
            if video_type == 'right':
                bottomLeftCornerOfText2 = (a, a * 2 * (1 + ll))
            else:
                bottomLeftCornerOfText2 = (b, a * 2 * (1 + ll))
            fontScale2 = fontScale / 4
            cv2.putText(gray, point, bottomLeftCornerOfText2, font, fontScale2,
                        fontColor, lineType)

            X0 = XYs[point][0][frame_idx[k]]
            Y0 = XYs[point][1][frame_idx[k]]

            X = Y0
            Y = X0

            #print(point,X,Y)
            if not np.isnan(X) and not np.isnan(Y):
                try:
                    col = (np.array([cmap(CR[ll])]) * 255)[0][:3]
                    # col = np.array([0, 0, 255]) # all points red
                    X = X.astype(int)
                    Y = Y.astype(int)

                    uu = block * col
                    gray[X - dot_s:X + dot_s, Y - dot_s:Y + dot_s] = uu

                except Exception as e:
                    print('frame', frame_idx[k])
                    print(e)
            ll += 1

        gray = gray[y0:y1, x0:x1]
        # cv2.imshow('frame', gray)
        cv2.imwrite(f'{save_images_folder}{eid}_frame_{frame_idx[k]}.png',
                    gray)
        cv2.waitKey(1)
        k += 1

    print(f'{n_frames} frames done in', np.round(time.time() - startTime))
예제 #8
0
    def align_motion(self,
                     period=(-np.inf, np.inf),
                     side='left',
                     sd_thresh=10,
                     display=False):
        # Get data samples within period
        wheel = self.data['wheel']
        self.alignment.label = side
        self.alignment.to_mask = lambda ts: np.logical_and(
            ts >= period[0], ts <= period[1])
        camera_times = self.data['camera_times'][side]
        cam_mask = self.alignment.to_mask(camera_times)
        frame_numbers, = np.where(cam_mask)

        if frame_numbers.size == 0:
            raise ValueError('No frames during given period')

        # Motion Energy
        camera_path = self.video_paths[side]
        roi = (*[slice(*r) for r in self.roi[side]], 0)
        try:
            # TODO Add function arg to make grayscale
            self.alignment.frames = \
                vidio.get_video_frames_preload(camera_path, frame_numbers, mask=roi)
            assert self.alignment.frames.size != 0
        except AssertionError:
            self.log.error('Failed to open video')
            return None, None, None
        self.alignment.df, stDev = video.motion_energy(self.alignment.frames,
                                                       2)
        self.alignment.period = period  # For plotting

        # Calculate rotary encoder velocity trace
        x = camera_times[cam_mask]
        Fs = 1000
        pos, t = wh.interpolate_position(wheel.timestamps,
                                         wheel.position,
                                         freq=Fs)
        v, _ = wh.velocity_smoothed(pos, Fs)
        interp_mask = self.alignment.to_mask(t)
        # Convert to normalized speed
        xs = np.unique([find_nearest(t[interp_mask], ts) for ts in x])
        vs = np.abs(v[interp_mask][xs])
        vs = (vs - np.min(vs)) / (np.max(vs) - np.min(vs))

        # FIXME This can be used as a goodness of fit measure
        USE_CV2 = False
        if USE_CV2:
            # convert from numpy format to openCV format
            dfCV = np.float32(self.alignment.df.reshape((-1, 1)))
            reCV = np.float32(vs.reshape((-1, 1)))

            # perform cross correlation
            resultCv = cv2.matchTemplate(dfCV, reCV, cv2.TM_CCORR_NORMED)

            # convert result back to numpy array
            xcorr = np.asarray(resultCv)
        else:
            xcorr = signal.correlate(self.alignment.df, vs)

        # Cross correlate wheel speed trace with the motion energy
        CORRECTION = 2
        self.alignment.c = max(xcorr)
        self.alignment.xcorr = np.argmax(xcorr)
        self.alignment.dt_i = self.alignment.xcorr - xs.size + CORRECTION
        self.log.info(
            f'{side} camera, adjusted by {self.alignment.dt_i} frames')

        if display:
            # Plot the motion energy
            fig, ax = plt.subplots(2, 1, sharex='all')
            y = np.pad(self.alignment.df, 1, 'edge')
            ax[0].plot(x, y, '-x', label='wheel motion energy')
            thresh = stDev > sd_thresh
            ax[0].vlines(x[np.array(
                np.pad(thresh, 1, 'constant', constant_values=False))],
                         0,
                         1,
                         linewidth=0.5,
                         linestyle=':',
                         label=f'>{sd_thresh} s.d. diff')
            ax[1].plot(t[interp_mask], np.abs(v[interp_mask]))

            # Plot other stuff
            dt = np.diff(camera_times[[0, np.abs(self.alignment.dt_i)]])
            fps = 1 / np.diff(camera_times).mean()
            ax[0].plot(t[interp_mask][xs] - dt,
                       vs,
                       'r-x',
                       label='velocity (shifted)')
            ax[0].set_title('normalized motion energy, %s camera, %.0f fps' %
                            (side, fps))
            ax[0].set_ylabel('rate of change (a.u.)')
            ax[0].legend()
            ax[1].set_ylabel('wheel speed (rad / s)')
            ax[1].set_xlabel('Time (s)')

            title = f'{self.ref}, from {period[0]:.1f}s - {period[1]:.1f}s'
            fig.suptitle(title, fontsize=16)
            fig.set_size_inches(19.2, 9.89)

        return self.alignment.dt_i, self.alignment.c, self.alignment.df