def test_get_video_meta(self): # Check with local video path meta = vidio.get_video_meta(self.video_path) expected = { 'length': 158377, 'fps': 60, 'width': 1280, 'height': 1024, 'size': 4257349100 } self.assertTrue(expected.items() <= meta.items()) self.assertEqual(meta.duration.total_seconds(), 2639.616667) # Check with remote path one = ONE(base_url='https://test.alyx.internationalbrainlab.org', username='******', password='******') dset = one.alyx.rest('datasets', 'list', name='_iblrig_leftCamera.raw.mp4', exist=True)[0] video_url = next(fr['data_url'] for fr in dset['file_records'] if fr['data_url']) expected = { 'length': 144120, 'fps': 30, 'width': 1280, 'height': 1024, 'size': 495090155 } meta = vidio.get_video_meta(video_url, one=one) self.assertTrue(expected.items() <= meta.items())
def load_video_data(self): # Get basic properties of video try: self.data['video'] = get_video_meta(self.video_path, one=self.one) # Sample some frames from the video file indices = np.linspace(100, self.data['video'].length - 100, self.n_samples).astype(int) self.frame_samples_idx = indices self.data['frame_samples'] = get_video_frames_preload( self.video_path, indices, mask=np.s_[:, :, 0]) except AssertionError: _log.error( 'Failed to read video file; setting outcome to CRITICAL') self._outcome = 'CRITICAL'
frame = vidio.get_video_frame(url, frame_n) assert frame is not None, 'failed to load frame' # Example 4: loading multiple frames """ The preload function will by default pre-allocate the memory before loading the frames, and will return the frames as a numpy array of the shape (l, h, w, 3), where l = the number of frame indices given. The indices must be an iterable of positive integers. Because the videos are in black and white the values of each color channel are identical. Therefore to save on memory you can provide a slice that returns only one of the three channels for each frame. The resulting shape will be (l, h, w). NB: Any slice or boolean array may be provided which is useful for cropping to an ROI. If you don't need to apply operations over all the fetched frames you can use the `as_list` kwarg to return the frames as a list. This is slightly faster than fetching as an ndarray. A warning is printed if fetching a frame fails. The affected frames will be returned as zeros or None if `as_list` is True. """ frames = vidio.get_video_frames_preload(url, range(10), mask=np.s_[:, :, 0]) # Example 5: load video meta-data """ You can load all the information for a given video. In order to load the video size from a URL an instance of ONE must be provided, otherwise this entry will be blank. An Bunch is returned with a number of fields. """ meta = vidio.get_video_meta(url, one=one) for k, v in meta.items(): print(f'The video {k} = {v}')
def stream_save_labeled_frames(eid, video_type): startTime = time.time() ''' For a given eid and camera type, stream sample frames, print DLC labels on them and save ''' # eid = '5522ac4b-0e41-4c53-836a-aaa17e82b9eb' # video_type = 'left' n_frames = 5 # sample 5 random frames save_images_folder = '/home/mic/DLC_QC/example_frames/' one = ONE() info = '_'.join( np.array(str(one.path_from_eid(eid)).split('/'))[[5, 7, 8]]) print(info, video_type) r = one.list(eid, 'dataset_types') dtypes_DLC = [ '_ibl_rightCamera.times.npy', '_ibl_leftCamera.times.npy', '_ibl_bodyCamera.times.npy', '_iblrig_leftCamera.raw.mp4', '_iblrig_rightCamera.raw.mp4', '_iblrig_bodyCamera.raw.mp4', '_ibl_leftCamera.dlc.pqt', '_ibl_rightCamera.dlc.pqt', '_ibl_bodyCamera.dlc.pqt' ] dtype_names = [x['name'] for x in r] assert all([i in dtype_names for i in dtypes_DLC]), 'For this eid, not all data available' D = one.load(eid, dataset_types=['camera.times', 'camera.dlc'], dclass_output=True) alf_path = Path(D.local_path[0]).parent.parent / 'alf' cam0 = alf.io.load_object(alf_path, '%sCamera' % video_type, namespace='ibl') Times = cam0['times'] cam = cam0['dlc'] points = np.unique(['_'.join(x.split('_')[:-1]) for x in cam.keys()]) XYs = {} for point in points: x = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_x']) x = x.filled(np.nan) y = np.ma.masked_where(cam[point + '_likelihood'] < 0.9, cam[point + '_y']) y = y.filled(np.nan) XYs[point] = np.array([x, y]) if video_type != 'body': d = list(points) d.remove('tube_top') d.remove('tube_bottom') points = np.array(d) # stream frames recs = [x for x in r if f'{video_type}Camera.raw.mp4' in x['name']][0]['file_records'] video_path = [x['data_url'] for x in recs if x['data_url'] is not None][0] vid_meta = get_video_meta(video_path) frame_idx = sample(range(vid_meta['length']), n_frames) print('frame indices:', frame_idx) frames = get_video_frames_preload(video_path, frame_idx, mask=np.s_[:, :, 0]) size = [vid_meta['width'], vid_meta['height']] #return XYs, frames x0 = 0 x1 = size[0] y0 = 0 y1 = size[1] if video_type == 'left': dot_s = 10 # [px] for painting DLC dots else: dot_s = 5 # writing stuff on frames font = cv2.FONT_HERSHEY_SIMPLEX if video_type == 'left': bottomLeftCornerOfText = (20, 1000) fontScale = 4 else: bottomLeftCornerOfText = (10, 500) fontScale = 2 lineType = 2 # assign a color to each DLC point (now: all points red) cmap = matplotlib.cm.get_cmap('Set1') CR = np.arange(len(points)) / len(points) block = np.ones((2 * dot_s, 2 * dot_s, 3)) k = 0 for frame in frames: gray = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB) # print session info fontColor = (255, 255, 255) cv2.putText(gray, info, bottomLeftCornerOfText, font, fontScale / 4, fontColor, lineType) # print time Time = round(Times[frame_idx[k]], 3) a, b = bottomLeftCornerOfText bottomLeftCornerOfText0 = (int(a * 10 + b / 2), b) a, b = bottomLeftCornerOfText bottomLeftCornerOfText0 = (int(a * 10 + b / 2), b) cv2.putText(gray, ' time: ' + str(Time), bottomLeftCornerOfText0, font, fontScale / 2, fontColor, lineType) # print DLC dots ll = 0 for point in points: # Put point color legend fontColor = (np.array([cmap(CR[ll])]) * 255)[0][:3] a, b = bottomLeftCornerOfText if video_type == 'right': bottomLeftCornerOfText2 = (a, a * 2 * (1 + ll)) else: bottomLeftCornerOfText2 = (b, a * 2 * (1 + ll)) fontScale2 = fontScale / 4 cv2.putText(gray, point, bottomLeftCornerOfText2, font, fontScale2, fontColor, lineType) X0 = XYs[point][0][frame_idx[k]] Y0 = XYs[point][1][frame_idx[k]] X = Y0 Y = X0 #print(point,X,Y) if not np.isnan(X) and not np.isnan(Y): try: col = (np.array([cmap(CR[ll])]) * 255)[0][:3] # col = np.array([0, 0, 255]) # all points red X = X.astype(int) Y = Y.astype(int) uu = block * col gray[X - dot_s:X + dot_s, Y - dot_s:Y + dot_s] = uu except Exception as e: print('frame', frame_idx[k]) print(e) ll += 1 gray = gray[y0:y1, x0:x1] # cv2.imshow('frame', gray) cv2.imwrite(f'{save_images_folder}{eid}_frame_{frame_idx[k]}.png', gray) cv2.waitKey(1) k += 1 print(f'{n_frames} frames done in', np.round(time.time() - startTime))