def __init__( self, filename, seek_ok=True, use_conventional_named_mean_fmf=True, ): super(UfmfV1, self).__init__() mode = "rb" self._filename = filename self._fd = open(filename, mode=mode) buf = self._fd.read(self.bufsz) intup = struct.unpack(FMT[1].HEADER, buf) (self._version, self._image_radius, self._timestamp0, self._width, self._height) = intup # extract background bg_im_buf = self._fd.read(self._width * self._height) self._bg_im = numpy.fromstring(bg_im_buf, dtype=numpy.uint8) self._bg_im.shape = self._height, self._width if hasattr(self, 'handle_bg'): self.handle_bg(self._timestamp0, self._bg_im) # get ready to extract frames self._chunkwidth = 2 * self._image_radius self._chunkheight = 2 * self._image_radius self._chunkshape = self._chunkheight, self._chunkwidth self._chunkimsize = self._chunkwidth * self._chunkheight self._last_safe_x = self._width - self._chunkwidth self._last_safe_y = self._height - self._chunkheight if seek_ok: self._fd_start = self._fd.tell() self._fd.seek(0, 2) self._fd_end = self._fd.tell() self._fd.seek(self._fd_start, 0) self._fd_length = self._fd_end - self._fd_start else: self._fd_length = None self.use_conventional_named_mean_fmf = use_conventional_named_mean_fmf self._sumsqf_fmf = None if self.use_conventional_named_mean_fmf: basename = os.path.splitext(self._filename)[0] fmf_filename = basename + '_mean.fmf' if os.path.exists(fmf_filename): self._mean_fmf = FMF.FlyMovie(fmf_filename) self._mean_fmf_timestamps = self._mean_fmf.get_all_timestamps() dt = self._mean_fmf_timestamps[ 1:] - self._mean_fmf_timestamps[:-1] assert np.all(dt > 0) # make sure searchsorted will work sumsqf_filename = basename + '_sumsqf.fmf' if os.path.exists(sumsqf_filename): self._sumsqf_fmf = FMF.FlyMovie(sumsqf_filename) else: self.use_conventional_named_mean_fmf = False
def get_data(fmf_file): for x in glob.glob(fmf_file + '/*wide*.fmf'): fmf = FMF.FlyMovie(x) data = sync_jaaba_with_ros(fmf_file, BAGS, WIDE_DIR) zero_timestamp = data['Timestamp'][data[data['Laser2_state'] > 0].index[0]] zero_ts_float = (np.datetime64(zero_timestamp.to_datetime()) - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64( 1, 's') frame_at_t0 = get_frame_number_at_or_before_timestamp(fmf, zero_ts_float) for y in glob.glob(fmf_file + '/*zoboomom*.fmf'): zoomfmf = FMF.FlyMovie(y) return fmf, data, frame_at_t0, zoomfmf
def __init__(self, fmf_dir, savedir, fly_id, plot_overlays=False): if (savedir[-1] == '/'): savedir = savedir[:-1] if not os.path.exists(savedir + '/temp_png'): os.makedirs(savedir + '/temp_png') self._savedir = savedir if (fmf_dir[-1] == '/'): self._fmf_dir = fmf_dir[:-1] else: self._fmf_dir = fmf_dir self._zoomfmf = FMF.FlyMovie(self._fmf_dir) self._plot_overlays = plot_overlays self._expdir = fmf_dir.rsplit('/', 2)[0] self._bag = utilities.match_fmf_and_bag(self._fmf_dir, (self._expdir + '/BAGS')) #self._wide = utilities.match_wide_to_zoom(self._fmf_dir, (self._expdir + '/')) self._handle, __, ___ = utilities.parse_fmftime(self._fmf_dir) self._data, self._Tzero = self.get_data() self._image_height, self._image_width = self._zoomfmf.get_frame( 0)[0].shape
def get_data(self): for x in glob.glob(self._fmf_dir + '/*.fmf'): fmf = FMF.FlyMovie(x) if not os.path.exists(self._savedir + '/' + self._handle + '_cache.pickle'): data = self.sync_jaaba_with_ros(self._fmf_dir, (self._expdir + '/BAGS'), (self._expdir + '/')) data.to_pickle(self._savedir + '/' + self._handle + '_cache.pickle') data = pd.read_pickle(self._savedir + '/' + self._handle + '_cache.pickle') try: zero_timestamp = data['Timestamp'][data[ data.Laser1_state + data.Laser2_state > 0].index[0]] except: print "WARNING: Cannot synchronize by stimulus. Setting T0 to frame0. " zero_timestamp = data['Timestamp'].index[0] zero_ts_float = ( np.datetime64(zero_timestamp.to_datetime()) - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64(1, 's') frame_at_t0 = self.get_frame_number_at_or_before_timestamp( fmf, zero_ts_float) return fmf, data, frame_at_t0
def set_video(self, video_file): """Opens video to be played in VideoPlayer. Parameters ---------- video_file : string Path to video file. """ self.video_file_name = video_file self.video = FMF.FlyMovie(video_file) #sample a subset of frames for the sample_size = 20 mean_frame_rates = [] timestamp_start_ixs = np.random.random_integers( low=0, high=self.video.get_n_frames() - sample_size - 1, size=4) for i in timestamp_start_ixs: timestamp_subset = [] for ix in xrange(i, i + sample_size): f, t = self.video.get_frame(ix) timestamp_subset.append(t) mean_frame_rates.append(1. / np.mean(np.diff(timestamp_subset))) self.frame_rate = np.mean(mean_frame_rates) if np.isnan(self.frame_rate): self.frame_rate = 24 self.update_label()
def make_png_set(_VIDEO_DIR): if os.path.exists(_VIDEO_DIR + '/flymad_annotated.mp4'): ###should use complete mp4 file here print 'skipping already finished file:', _VIDEO_DIR return if not os.path.exists(_VIDEO_DIR + '/temp_png'): os.makedirs(_VIDEO_DIR + '/temp_png') for x in glob.glob(_VIDEO_DIR + '/*.fmf'): fmf = FMF.FlyMovie(x) jaaba_data = sync_jaaba_with_ros((_VIDEO_DIR + '/registered_trx.csv'), x) if LAST == None: _LAST = fmf.get_n_frames() else: _LAST = LAST for frame_number in range((fmf.get_n_frames() - FIRST - (fmf.get_n_frames() - _LAST))): if os.path.exists(_VIDEO_DIR + '/temp_png/_tmp%05d.png'%(frame_number)): continue frame, timestamp = fmf.get_frame(frame_number + FIRST) #print frame_number #pd.to_datetime(timestamp, unit='s').tz_localize('UTC').tz_convert('US/Eastern'), '\t', frame_number jaaba_datum = jaaba_data[jaaba_data['Timestamp'] == pd.to_datetime(timestamp, unit='s').tz_localize('UTC').tz_convert('US/Eastern')] fmf2fig(frame, timestamp, cm.Greys_r, jaaba_datum) #plt.show() plt.savefig(_VIDEO_DIR + '/temp_png/_tmp%05d.png'%(frame_number), bbox_inches='tight', pad_inches=0) plt.close('all') sendMail('*****@*****.**','movie is finished', (x + ' has finished processing.'))
def fmf_ucmp(video_in, video_out, timestamps_in='None', fps=None, width=320, height=240): """Converts any file format to FlyMovieFormat. Parameters ---------- video_in : string Path to compressed video. video_out : string Path to save uncompressed, .fmf video. timestamps_in : string or None (default = None) Path to .gz file containing timestamps for video. fps : int or None (default = None) Frame rate to save uncompressed video. width : int (optional, default=640) Width of video, in pixels. height : int (optional, default=480) Height of video, in pixels. """ ts_file_given = os.path.isfile(timestamps_in) if ts_file_given: timestamps = np.loadtxt(timestamps_in) command = [ 'ffmpeg', '-i', video_in, '-f', 'image2pipe', '-pix_fmt', 'gray8', '-vf', 'scale=320:240', '-an', '-vcodec', 'rawvideo', '-' ] pipe = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=10**8) vid = FMF.FlyMovieSaver(video_out) try: count = 0 while True: raw_img = pipe.stdout.read(width * height) img = np.fromstring(raw_img, dtype=np.uint8) img = img.reshape((height, width)) if ts_file_given: vid.add_frame(img, timestamps[count]) else: vid.add_frame(img) count += 1 except: pipe.stdout.close() pipe.wait() del pipe vid.close()
def setUp(self): """Opens a video & sets an arena for use in test-cases.""" self.video = FMF.FlyMovie('D:/test_data/test_01.fmf') self.arena = trk_arena.CircularArena(self.video) self.arena.calculate_background() # set estimated arena center/radius self.arena.center = (self.arena.height / 2, self.arena.width / 2) self.arena.radius = (self.arena.height / 2)
def get_head_image(FMF_FILE, framenumber): fmf = FMF.FlyMovie(FMF_FILE) frame, timestamp = fmf.get_frame(framenumber) df = pd.read_pickle(FMF_FILE.rsplit('/', 1)[0] + '/tracking_info.pickle') hx = df.ix[framenumber].c_head_location_x hy = df.ix[framenumber].c_head_location_y centre = (int(hx), int(hy)) theta = np.radians(df.ix[framenumber].d_bodyAxis) patch = subimage(cv.fromarray(frame), centre, theta, width, height) return np.array(patch)
def main(): try: filename = sys.argv[1] except: print 'Usage: %s fmf_filename' % sys.argv[0] sys.exit() path,ext = os.path.splitext(filename) if ext != '.fmf': print 'fmf_filename does not end in .fmf' sys.exit() fly_movie = FlyMovieFormat.FlyMovie(filename) n_frames = fly_movie.get_n_frames() fmf_format = fly_movie.get_format() fmf = fly_movie delays = numpy.array([]) for frame_number in range(n_frames): frame,timestamp = fmf.get_frame(frame_number) mono=False if (fmf_format in ['RGB8','ARGB8','YUV411','YUV422'] or fmf_format.startswith('MONO8:') or fmf_format.startswith('MONO32f:')): save_frame = imops.to_rgb8(fmf_format, frame) else: if fmf_format not in ['MONO8','MONO16']: warnings.warn('converting unknown fmf format %s to mono'%( fmf_format,)) save_frame = imops.to_mono8(fmf_format,frame) mono=True h, w = save_frame.shape[:2] if mono: im = Image.fromstring('L',(w,h),save_frame.tostring()) else: im = Image.fromstring('RGB',(w,h),save_frame.tostring()) f = '%s_%08d.%s'%(os.path.join("./", "zbartmp"), frame_number, 'bmp') im.save(f) try: TS = subprocess.check_output(['zbarimg', '-q', f]) ts = float(TS[8:].strip()) except OSError: raise except: ts = float('nan') delay = timestamp-ts delays = numpy.append(delays,[delay]) print "ds: % 14.6f cam: % 14.6f delay: % 8.6f" % (ts, timestamp, delay) os.unlink(f) print "delay mean: % 8.6f std: % 8.6f" % (delays[~numpy.isnan(delays)].mean(),delays[~numpy.isnan(delays)].std()) print "delay max: % 8.6f min: % 8.6f" % (delays[~numpy.isnan(delays)].max(),delays[~numpy.isnan(delays)].min()) print "%i of %i frames used" % (len(delays[numpy.isnan(delays)]),len(delays[~numpy.isnan(delays)]))
def _load_fmf_and_smd(self,indexes): results, camn, cam_id, frame_type, remote_timestamp = indexes if frame_type == 'full_frame_fmf': if not hasattr(results.root,'exact_movie_info'): result_utils.make_exact_movie_info2(results,movie_dir=self.movie_dir) exact_movie_info = results.root.exact_movie_info # find normal (non background movie filename) found = False for row in exact_movie_info.where( exact_movie_info.cols.cam_id == cam_id ): if row['start_timestamp'] < remote_timestamp < row['stop_timestamp']: filename = row['filename'] found = True break if not found: raise ValueError('movie not found for %s'%(cam_id,)) filename = os.path.splitext(filename)[0] + '%s.fmf'%(suffix,) # alter to be background image else: if hasattr(results.root,'small_fmf_summary'): found = False small_fmf_summary = results.root.small_fmf_summary for row in small_fmf_summary.where( small_fmf_summary.cols.camn == camn ): if row['start_timestamp'] <= remote_timestamp <= row['stop_timestamp']: found = True basename = row['basename'] filename = basename+'.fmf' break elif hasattr(results.root,'exact_roi_movie_info'): found = False exact_roi_movie_info = results.root.exact_roi_movie_info for row in exact_roi_movie_info.where( exact_roi_movie_info.cols.timestamp == remote_timestamp ): if row['cam_id'] == cam_id: filename=row['filename'] found=True break else: raise RuntimeError('need "small_fmf_summary" or "exact_roi_movie_info" table') if not found: raise NoFrameRecordedHere("frame not found for %s, %s"%(cam_id,repr(remote_timestamp))) if filename not in self.fmfs_by_filename: self.fmfs_by_filename[filename] = FlyMovieFormat.FlyMovie(filename) if filename not in self.smds_by_fmf_filename: self.smds_by_fmf_filename[filename] = smdfile.SMDFile(filename[:-4]+'.smd') fmf = self.fmfs_by_filename[filename] smd = self.smds_by_fmf_filename[filename] return (fmf, smd)
def AddImageToFmf(self, image, imagetopic): if (imagetopic not in self.fmf_dict): self.fmf_dict[imagetopic] = FlyMovieFormat.FlyMovieSaver( self.fullpathFmf_dict[imagetopic], version=3, format=image.encoding.upper( ), # BUG: This isn't quite right, as the ROS encodings don't all match those in FlyMovieFormat.py bits_per_pixel=int(8 * image.step / image.width)) # Cast the pixels to the proper type, then add the frame to the .fmf pixels = np.array(image.data, 'c').view(np.uint8).reshape( (image.height, image.width)) self.fmf_dict[imagetopic].add_frame(pixels, image.header.stamp.to_sec())
def generate_background_image(self): wide_fmf = FMF.FlyMovie(self._fmf_filepath) frame0, _ = wide_fmf.get_frame(0) image_height, image_width = frame0.shape acc = np.zeros((image_height, image_width), np.float32) # 32 bit accumulator for frame_number in range(0, wide_fmf.get_n_frames(), self._sample_rate): frame, timestamp = wide_fmf.get_frame(frame_number) acc = np.maximum.reduce([frame, acc]) cv2.imshow('background', acc) cv2.imwrite((self._tempdir + 'background.png'), acc) cv2.destroyAllWindows() return
def get_heads(FMF_FILE): fmf = FMF.FlyMovie(FMF_FILE) df = pd.read_pickle(FMF_FILE.rsplit('/', 1)[0] + '/tracking_info.pickle') vals = [] frames = [] for framenumber in range(fmf.get_n_frames()): try: frame, timestamp = fmf.get_frame(framenumber) prob = get_proboscis(frame, df.ix[framenumber]) vals.append(prob) frames.append(framenumber) except: continue valdf = pd.DataFrame({'Frame': frames, 'Proboscis': vals}) return valdf
def get_data(fmf_file): for x in glob.glob(fmf_file + '/*.fmf'): fmf = FMF.FlyMovie(x) if not os.path.exists(savedir + 'stashed_data.pickle'): data = sync_jaaba_with_ros(fmf_file, BAGS, WIDE_DIR) data.to_pickle(savedir + 'stashed_data.pickle') data = pd.read_pickle(savedir + 'stashed_data.pickle') zero_timestamp = data['Timestamp'][data[data['Laser2_state'] > 0].index[0]] zero_ts_float = (np.datetime64(zero_timestamp.to_datetime()) - np.datetime64('1970-01-01T00:00:00Z')) / np.timedelta64( 1, 's') frame_at_t0 = get_frame_number_at_or_before_timestamp(fmf, zero_ts_float) print frame_at_t0 return fmf, data, frame_at_t0
def get_heads(FMF_FILE): fmf = FMF.FlyMovie(FMF_FILE) for framenumber in random.sample(range(0, fmf.get_n_frames()), 2000): try: frame, timestamp = fmf.get_frame(framenumber) df = pd.read_pickle( FMF_FILE.rsplit('/', 1)[0] + '/tracking_info.pickle') hx = df.ix[framenumber].c_head_location_x hy = df.ix[framenumber].c_head_location_y centre = (int(hx), int(hy)) theta = np.radians(df.ix[framenumber].d_bodyAxis) patch = subimage(cv.fromarray(frame), centre, theta, width, height) data = _data + np.array(patch) except: continue return data
def test_mmap(self): for filename in fmf_filenames: if (filename.endswith('test_rgb8.fmf') or filename.endswith('test_rgb32f.fmf')): continue fmf = FlyMovieFormat.FlyMovie( filename ) ra = FlyMovieFormat.mmap_flymovie( filename ) n_frames = len(ra) assert n_frames == fmf.get_n_frames() for i in range(n_frames): frame, timestamp = fmf.get_next_frame() mmap_frame = ra['frame'][i] assert mmap_frame.shape == frame.shape assert numpy.allclose( mmap_frame, frame ) assert timestamp == ra['timestamp'][i] fmf.close()
def setUp(self): """Loads video, arena, and female for testing tracking functions.""" self.video = FMF.FlyMovie('D:/test_data/test_01.fmf') self.arena = trk_arena.CircularArena(self.video) self.arena.calculate_background() self.arena.center = (self.arena.height / 2, self.arena.width / 2) self.arena.radius = (self.arena.height / 2) self.female = trk_female.Female(self.arena) self.female.center = (238, 320) self.female.head = (233, 318) self.female.rear = (256, 281) self.female.maj_ax_rad = 28 self.female.min_ax_rad = 14 self.female.orientation = -15 self.test_frame = long(6059)
def run(self): filename = time.strftime( 'movie%Y%m%d_%H%M%S.fmf' ) depth = FMF.format2bpp_func(self.format) fmf_saver = FMF.FlyMovieSaver(filename,version=3, format=self.format, bits_per_pixel=depth, ) while 1: try: frame,timestamp = self.backlog.pop(0) except IndexError: # no frame available if self.quit_when_done.isSet(): break else: time.sleep(0.05) # wait 50 msec continue # try again fmf_saver.add_frame(frame,timestamp)
def __init__(self, fmf_dir, savedir, fly_id, plot_overlays=False): if (savedir[-1] == '/'): savedir = savedir[:-1] if not os.path.exists(savedir + '/temp_png'): os.makedirs(savedir + '/temp_png') self._savedir = savedir if (fmf_dir[-1] == '/'): self._fmf_dir = fmf_dir[:-1] else: self._fmf_dir = fmf_dir self._plot_overlays = plot_overlays self._expdir = fmf_dir.rsplit('/', 1)[0] self._bag = utilities.match_fmf_and_bag(self._fmf_dir, (self._expdir + '/BAGS')) self._wide = utilities.match_wide_to_zoom(self._fmf_dir, (self._expdir + '/')) self._widefmf = FMF.FlyMovie(self._wide) self._handle, __, ___ = utilities.parse_fmftime(self._fmf_dir) self._zoomfmf, self._data, self._Tzero = self.get_data() if self._data.Laser0_state.mean( ) >= 0.5: #SOME EXPERIMENTS ARE DONE WITH lASER0 OFF. if self._data.Laser0_state.mean( ) <= 0.99: #SOME BAGFILES HAVE A FEW MSGS BEFORE LASER CONFIG self.ILLUMINATED_WITH_LASER0 = 1 else: self.ILLUMINATED_WITH_LASER0 = 0 else: self.ILLUMINATED_WITH_LASER0 = 0 self._image_height, self._image_width = self._zoomfmf.get_frame( 0)[0].shape
def generate_background_image( fmf_file, sample_rate, file_path ): # wide_fmf in FMF format, sample rate: n for every nth frame wide_fmf = FMF.FlyMovie(fmf_file) frame0, _ = wide_fmf.get_frame(0) image_width, image_height = frame0.shape acc = np.zeros((image_width, image_height), np.float32) # 32 bit accumulator for frame_number in range(0, wide_fmf.get_n_frames(), sample_rate): frame, timestamp = wide_fmf.get_frame(frame_number) acc = np.maximum.reduce([frame, acc]) fig = plt.figure(frameon=False) fig.set_size_inches(image_height, image_width) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() fig.add_axes(ax) ax.imshow(acc, cmap=cm.Greys_r) plt.savefig(file_path, dpi=1) plt.close('all') return
def update_meta_from_video(self): """Gets summary statistics about a this class' associated video file. If the video file associated with this VideoMeta object exists on disk, the following attributes will be updated: (1) fps, (2) start_time, (3) end_time, (4) timestamps. """ if not os.path.exists(self.filename): raise AttributeError( 'self.filename -- {} '.format(self.filename) + '-- does not exist on disk. Please update filename with a ' + 'valid file path.') video = FMF.FlyMovie(self.filename) self.timestamps = video.get_all_timestamps() self.fps = 1. / np.mean(np.diff(self.timestamps)) self.start_time = datetime.fromtimestamp( self.timestamps[0]).strftime('%Y-%m-%d %H:%M:%S') self.end_time = datetime.fromtimestamp( self.timestamps[-1]).strftime('%Y-%m-%d %H:%M:%S')
def track_proboscis(fmf_file, tracking_file): s = pd.DataFrame({ 'area': [], 'cx': [], 'cy': [], 'dCentre': [], 'length': [], 'tipAngle': [], 'tip_x': [], 'tip_y': [] }) fmf = FMF.FlyMovie(fmf_file) tracking = pd.read_pickle(tracking_file) if not os.path.exists(fmf_file.rsplit('/', 1)[0] + '/proboscis_movie'): os.makedirs(fmf_file.rsplit('/', 1)[0] + '/proboscis_movie') for framenumber in range(0, fmf.get_n_frames()): try: head = get_head_image( fmf.get_frame(framenumber)[0], tracking.ix[framenumber]) except: head = get_head_image(fmf.get_frame(framenumber)[0], tracking[0:1]) pro = detect_proboscis(head) s.loc[framenumber] = pro imcopy = head.copy() try: cv2.line(imcopy, (75, 75), (int(pro.tip_x), int(pro.tip_y)), (255, 255, 0), 2) except: cv2.putText(imcopy, 'X', (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1) cv2.putText(imcopy, str(framenumber), (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1) cv2.imwrite( fmf_file.rsplit('/', 1)[0] + '/proboscis_movie/_tmp%05d.png' % (framenumber), imcopy) s.to_pickle(fmf_file.rsplit('/', 1)[0] + '/proboscis_data.pickle') return s
def fmf2tiff (names): ''' Converts a list of .fmf files to .tiff files so they can be converted to mp4s.\ Can batch process multiple .fmf videos into multiple Parameters: names (list): a list of the .fmf files to be converted Returns: A directory of .tiff files, where each directory corresponds to an .fmf object.\ Can undo in terminal by navigating to `vid_path` and executing `rm -r */` ''' assert (names),\ "You've inputted an empty list. Please provide a populated list." for name in names: assert(".fmf" in name),\ f"The file {name} is not an .fmf video. Please provide an .fmf file." fmfs = [] for name in names: # Make a list of fmf objects: fmfs.append(FMF.FlyMovie(name)) # Get an fmf from the fmfs: fmf = fmfs[names.index(name)] # For each fmf, convert it to a series of .tiffs and store the series in its respective directory: print("Converting .fmf video to .tiff files ...") i = 0 for im in tqdm.tqdm(range(len(fmf.get_all_timestamps()))): skimage.io.imsave(arr=fmf.get_frame(i)[0], fname=name.replace('.fmf','') + '/' + str(format(i, '08d')) + '.tiff') i += 1
def get_framerate_duration (names): ''' Get the frame rate and length of video (secs) from a list of .fmf videos. Parameters: names (list): a list of the .fmf files to be converted Returns: Empty directories for each .fmf video. Can undo in terminal by navigating \ to `vid_path` and executing `rm -r */` ''' assert (names),\ "You've inputted an empty list. Please provide a populated list." for name in names: assert(".fmf" in name),\ f"The file {name} is not an .fmf video. Please provide an .fmf file." fmfs = [] for name in names: # Make a list of fmf objects: fmfs.append(FMF.FlyMovie(name)) # Get an fmf from the fmfs fmf = fmfs[names.index(name)] vid_size = fmf.get_n_frames() time_stamps = fmf.get_all_timestamps() time_length = time_stamps[-1] - time_stamps[0] #frame_rate in frames per second frame_rate = vid_size/time_length print(f"{frame_rate} Hz is frame rate and {time_length} s is length of video")
def doit( movie_fname=None, reconstructor_fname=None, h5_fname=None, cam_id=None, dest_dir=None, transform=None, start=None, stop=None, h5start=None, h5stop=None, show_obj_ids=False, obj_only=None, image_format=None, subtract_frame=None, save_framelist_fname=None, ): if dest_dir is None: dest_dir = os.curdir if movie_fname is None: raise NotImplementedError('') if image_format is None: image_format = 'png' if cam_id is None: raise NotImplementedError('') if movie_fname.lower().endswith('.fmf'): movie = fmf_mod.FlyMovie(movie_fname) else: movie = ufmf_mod.FlyMovieEmulator(movie_fname) if start is None: start = 0 if stop is None: stop = movie.get_n_frames() - 1 ca = core_analysis.get_global_CachingAnalyzer() (obj_ids, unique_obj_ids, is_mat_file, data_file, extra) = \ ca.initial_file_load(h5_fname) if obj_only is not None: unique_obj_ids = obj_only dynamic_model_name = extra['dynamic_model_name'] if dynamic_model_name.startswith('EKF'): dynamic_model_name = dynamic_model_name[4:] if reconstructor_fname is None: reconstructor = flydra_core.reconstruct.Reconstructor(data_file) else: reconstructor = flydra_core.reconstruct.Reconstructor( reconstructor_fname) fix_w = movie.get_width() fix_h = movie.get_height() is_color = imops.is_coding_color(movie.get_format()) if subtract_frame is not None: if not subtract_frame.endswith('.fmf'): raise NotImplementedError( 'only fmf supported for --subtract-frame') tmp_fmf = fmf_mod.FlyMovie(subtract_frame) if is_color: tmp_frame, tmp_timestamp = tmp_fmf.get_next_frame() subtract_frame = imops.to_rgb8(tmp_fmf.get_format(), tmp_frame) subtract_frame = subtract_frame.astype( np.float32) # force upconversion to float else: tmp_frame, tmp_timestamp = tmp_fmf.get_next_frame() subtract_frame = imops.to_mono8(tmp_fmf.get_format(), tmp_frame) subtract_frame = subtract_frame.astype( np.float32) # force upconversion to float if save_framelist_fname is not None: save_framelist_fd = open(save_framelist_fname, mode='w') movie_fno_count = 0 for movie_fno in range(start, stop + 1): movie.seek(movie_fno) image, timestamp = movie.get_next_frame() h5_frame = extra['time_model'].timestamp2framestamp(timestamp) if h5start is not None: if h5_frame < h5start: continue if h5stop is not None: if h5_frame > h5stop: continue if is_color: image = imops.to_rgb8(movie.get_format(), image) else: image = imops.to_mono8(movie.get_format(), image) if subtract_frame is not None: new_image = np.clip(image - subtract_frame, 0, 255) image = new_image.astype(np.uint8) warnings.warn('not implemented: interpolating data') h5_frame = int(round(h5_frame)) if save_framelist_fname is not None: save_framelist_fd.write('%d\n' % h5_frame) movie_fno_count += 1 if 0: # save starting from frame 1 save_fname_path = os.path.splitext(movie_fname)[ 0] + '_frame%06d.%s' % (movie_fno_count, image_format) else: # frame is frame in movie file save_fname_path = os.path.splitext( movie_fname)[0] + '_frame%06d.%s' % (movie_fno, image_format) save_fname_path = os.path.join(dest_dir, save_fname_path) if transform in ['rot 90', 'rot -90']: device_rect = (0, 0, fix_h, fix_w) canv = benu.Canvas(save_fname_path, fix_h, fix_w) else: device_rect = (0, 0, fix_w, fix_h) canv = benu.Canvas(save_fname_path, fix_w, fix_h) user_rect = (0, 0, image.shape[1], image.shape[0]) show_points = [] with canv.set_user_coords(device_rect, user_rect, transform=transform): canv.imshow(image, 0, 0) for obj_id in unique_obj_ids: try: data = ca.load_data( obj_id, data_file, frames_per_second=extra['frames_per_second'], dynamic_model_name=dynamic_model_name, ) except core_analysis.NotEnoughDataToSmoothError: continue cond = data['frame'] == h5_frame idxs = np.nonzero(cond)[0] if not len(idxs): continue # no data at this frame for this obj_id assert len(idxs) == 1 idx = idxs[0] row = data[idx] # circle over data point xyz = row['x'], row['y'], row['z'] x2d, y2d = reconstructor.find2d(cam_id, xyz, distorted=True) radius = 10 canv.scatter([x2d], [y2d], color_rgba=green, markeredgewidth=3, radius=radius) if 1: # z line to XY plane through origin xyz0 = row['x'], row['y'], 0 x2d_z0, y2d_z0 = reconstructor.find2d(cam_id, xyz0, distorted=True) warnings.warn('not distorting Z line') if 1: xdist = x2d - x2d_z0 ydist = y2d - y2d_z0 dist = np.sqrt(xdist**2 + ydist**2) start_frac = radius / dist if radius > dist: start_frac = 0 x2d_r = x2d - xdist * start_frac y2d_r = y2d - ydist * start_frac else: x2d_r = x2d y2d_r = y2d canv.plot([x2d_r, x2d_z0], [y2d_r, y2d_z0], color_rgba=green, linewidth=3) if show_obj_ids: show_points.append((obj_id, x2d, y2d)) for show_point in show_points: obj_id, x2d, y2d = show_point x, y = canv.get_transformed_point(x2d, y2d, device_rect, user_rect, transform=transform) canv.text( 'obj_id %d' % obj_id, x, y, color_rgba=(0, 1, 0, 1), font_size=20, ) canv.save()
return figure baglist = [] for bag in glob.glob(BAGS + '/*.bag'): bagtimestamp = parse_bagtime(bag) baglist.append((bag, bagtimestamp)) bagframe = DataFrame(baglist, columns=['Filepath', 'Timestamp']) bagframe.index = pd.to_datetime(bagframe['Timestamp']) bagframe = bagframe.sort() bagframe.to_csv(BAGS + '/list_of_bags.csv', sep=',') if not os.path.exists(VIDEO_DIR + 'temp_png'): os.makedirs(VIDEO_DIR + 'temp_png') fmf = FMF.FlyMovie(glob.glob(VIDEO_DIR + '*.fmf')[0]) jaaba_data = sync_jaaba_with_ros((VIDEO_DIR + 'registered_trx.csv'), (glob.glob(VIDEO_DIR + '*.fmf')[0])) _frame, _timestamp = fmf.get_frame(0) image_width, image_height = _frame.shape for frame_number in range((fmf.get_n_frames() - FIRST - LAST)): if os.path.exists(VIDEO_DIR + 'temp_png/_tmp%05d.png' % (frame_number - FIRST)): continue frame, timestamp = fmf.get_frame(frame_number + FIRST) print frame_number #pd.to_datetime(timestamp, unit='s').tz_localize('UTC').tz_convert('US/Eastern'), '\t', frame_number jaaba_datum = jaaba_data[jaaba_data['Timestamp'] == pd.to_datetime(
def extract_frames(config, mode='automatic', algo='kmeans', crop=False, userfeedback=True, cluster_step=1, cluster_resizewidth=30, cluster_color=False, opencv=True, flymovie=False, slider_width=25): """ Extracts frames from the videos in the config.yaml file. Only the videos in the config.yaml will be used to select the frames.\n Use the function ``add_new_video`` at any stage of the project to add new videos to the config file and extract their frames. The provided function either selects frames from the videos in a randomly and temporally uniformly distributed way (uniform), \n by clustering based on visual appearance (k-means), or by manual selection. Three important parameters for automatic extraction: numframes2pick, start and stop are set in the config file. Please refer to the user guide for more details on methods and parameters https://www.biorxiv.org/content/biorxiv/early/2018/11/24/476531.full.pdf Parameters ---------- config : string Full path of the config.yaml file as a string. mode : string String containing the mode of extraction. It must be either ``automatic`` or ``manual``. algo : string String specifying the algorithm to use for selecting the frames. Currently, deeplabcut supports either ``kmeans`` or ``uniform`` based selection. This flag is only required for ``automatic`` mode and the default is ``uniform``. For uniform, frames are picked in temporally uniform way, kmeans performs clustering on downsampled frames (see user guide for details). Note: color information is discarded for kmeans, thus e.g. for camouflaged octopus clustering one might want to change this. crop : bool, optional If this is set to True, a user interface pops up with a frame to select the cropping parameters. Use the left click to draw a cropping area and hit the button set cropping parameters to save the cropping parameters for a video. The default is ``False``; if provided it must be either ``True`` or ``False``. userfeedback: bool, optional If this is set to false during automatic mode then frames for all videos are extracted. The user can set this to true, which will result in a dialog, where the user is asked for each video if (additional/any) frames from this video should be extracted. Use this, e.g. if you have already labeled some folders and want to extract data for new videos. cluster_resizewidth: number, default: 30 For k-means one can change the width to which the images are downsampled (aspect ratio is fixed). cluster_step: number, default: 1 By default each frame is used for clustering, but for long videos one could only use every nth frame (set by: cluster_step). This saves memory before clustering can start, however, reading the individual frames takes longer due to the skipping. cluster_color: bool, default: False If false then each downsampled image is treated as a grayscale vector (discarding color information). If true, then the color channels are considered. This increases the computational complexity. opencv: bool, default: True Uses openCV for loading & extractiong (otherwise moviepy (legacy)) slider_width: number, default: 25 Width of the video frames slider, in percent of window Examples -------- for selecting frames automatically with 'kmeans' and want to crop the frames. >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',True) -------- for selecting frames automatically with 'kmeans' and considering the color information. >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic','kmeans',cluster_color=True) -------- for selecting frames automatically with 'uniform' and want to crop the frames. >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','automatic',crop=True) -------- for selecting frames manually, >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual') -------- for selecting frames manually, with a 60% wide frames slider >>> deeplabcut.extract_frames('/analysis/project/reaching-task/config.yaml','manual', slider_width=60) While selecting the frames manually, you do not need to specify the ``crop`` parameter in the command. Rather, you will get a prompt in the graphic user interface to choose if you need to crop or not. -------- """ import os import sys import numpy as np from pathlib import Path from skimage import io import skimage from skimage.util import img_as_ubyte import matplotlib.pyplot as plt import matplotlib.patches as patches from deeplabcut.utils import frameselectiontools from deeplabcut.utils import auxiliaryfunctions from matplotlib.widgets import RectangleSelector if mode == "manual": wd = Path(config).resolve().parents[0] os.chdir(str(wd)) from deeplabcut.generate_training_dataset import frame_extraction_toolbox from deeplabcut.utils import select_crop_parameters frame_extraction_toolbox.show(config, slider_width) elif mode == "automatic": config_file = Path(config).resolve() cfg = auxiliaryfunctions.read_config(config_file) print("Config file read successfully.") numframes2pick = cfg['numframes2pick'] start = cfg['start'] stop = cfg['stop'] # Check for variable correctness if start > 1 or stop > 1 or start < 0 or stop < 0 or start >= stop: raise Exception( "Erroneous start or stop values. Please correct it in the config file." ) if numframes2pick < 1 and not int(numframes2pick): raise Exception( "Perhaps consider extracting more, or a natural number of frames." ) videos = cfg['video_sets'].keys() if opencv: import cv2 elif flymovie: from motmot.FlyMovieFormat import FlyMovieFormat as FMF import cv2 else: from moviepy.editor import VideoFileClip for vindex, video in enumerate(videos): #plt.close("all") global coords coords = cfg['video_sets'][video]['crop'].split(',') if userfeedback: print( "Do you want to extract (perhaps additional) frames for video:", video, "?") askuser = input("yes/no") else: askuser = "******" if askuser == 'y' or askuser == 'yes' or askuser == 'Ja' or askuser == 'ha': # multilanguage support :) if opencv: cap = cv2.VideoCapture(video) fps = cap.get( 5 ) #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get nframes = int(cap.get(7)) duration = nframes * 1. / fps elif flymovie: cap = FMF.FlyMovie(video) nframes = cap.n_frames while True: try: cap.get_frame(nframes) except FMF.NoMoreFramesException: nframes -= 1 continue break fps = 1. / (cap.get_frame(min(100, nframes))[1] - cap.get_frame(min(100, nframes) - 1)[1]) duration = cap.get_frame(nframes)[1] else: #Moviepy: clip = VideoFileClip(video) fps = clip.fps duration = clip.duration nframes = int(np.ceil(clip.duration * 1. / fps)) indexlength = int(np.ceil(np.log10(nframes))) if crop == True: from deeplabcut.utils import select_crop_parameters if opencv: cap.set(2, start * duration) ret, frame = cap.read() if ret: image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) elif flymovie: frame = cap.get_frame(int(nframes * start))[0] if frame.ndim != 3: frame = skimage.color.gray2rgb(frame) image = frame else: image = clip.get_frame( start * clip.duration ) #frame is accessed by index *1./clip.fps (fps cancels) fname = Path(video) output_path = Path( config).parents[0] / 'labeled-data' / fname.stem if output_path.exists(): fig, ax = plt.subplots(1) # Call the GUI to select the cropping parameters coords = select_crop_parameters.show(config, image) # Update the config.yaml file with current cropping parameters cfg['video_sets'][video] = { 'crop': ', '.join( map(str, [ int(coords[0]), int(coords[1]), int(coords[2]), int(coords[3]) ])) } auxiliaryfunctions.write_config(config_file, cfg) if len(os.listdir(output_path)) == 0: #check if empty #store full frame from random location (good for augmentation) index = int(start * duration + np.random.rand() * duration * (stop - start)) if opencv: cap.set(1, index) ret, frame = cap.read() if ret: image = img_as_ubyte( cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) elif flymovie: frame = cap.get_frame(int(nframes * start))[0] if frame.ndim != 3: frame = skimage.color.gray2rgb(frame) image = img_as_ubyte(frame) else: image = img_as_ubyte( clip.get_frame(index * 1. / clip.fps)) clip = clip.crop( y1=int(coords[2]), y2=int(coords[3]), x1=int(coords[0]), x2=int(coords[1])) #now crop clip saveimg = str(output_path) + '/img' + str( index).zfill(indexlength) + ".png" io.imsave(saveimg, image) else: askuser = input( "The directory already contains some frames. Do you want to add to it?(yes/no): " ) if askuser == 'y' or askuser == 'yes' or askuser == 'Y' or askuser == 'Yes': #clip=clip.crop(y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1])) index = int(start * duration + np.random.rand() * duration * (stop - start)) if opencv: cap.set(1, index) ret, frame = cap.read() if ret: image = img_as_ubyte( cv2.cvtColor( frame, cv2.COLOR_BGR2RGB)) elif flymovie: frame = cap.get_frame(int(nframes * start))[0] if frame.ndim != 3: frame = skimage.color.gray2rgb(frame) image = img_as_ubyte(frame) else: image = img_as_ubyte( clip.get_frame(index * 1. / clip.fps)) clip = clip.crop(y1=int(coords[2]), y2=int(coords[3]), x1=int(coords[0]), x2=int(coords[1])) saveimg = str(output_path) + '/img' + str( index).zfill(indexlength) + ".png" io.imsave(saveimg, image) pass else: sys.exit( "Delete the frames and try again later!") else: numframes2pick = cfg[ 'numframes2pick'] + 1 # without cropping a full size frame will not be extracted >> thus one more frame should be selected in next stage. print("Extracting frames based on %s ..." % algo) if algo == 'uniform': #extract n-1 frames (0 was already stored) if opencv: frames2pick = frameselectiontools.UniformFramescv2( cap, numframes2pick - 1, start, stop) elif flymovie: frames2pick = frameselectiontools.UniformFramesfmf( cap, numframes2pick - 1, start, stop) else: frames2pick = frameselectiontools.UniformFrames( clip, numframes2pick - 1, start, stop) elif algo == 'kmeans': if opencv: frames2pick = frameselectiontools.KmeansbasedFrameselectioncv2( cap, numframes2pick - 1, start, stop, crop, coords, step=cluster_step, resizewidth=cluster_resizewidth, color=cluster_color) elif flymovie: print("FMF not supported by kmeans as of now!") frames2pick = [] else: frames2pick = frameselectiontools.KmeansbasedFrameselection( clip, numframes2pick - 1, start, stop, step=cluster_step, resizewidth=cluster_resizewidth, color=cluster_color) else: print( "Please implement this method yourself and send us a pull request! Otherwise, choose 'uniform' or 'kmeans'." ) frames2pick = [] output_path = Path(config).parents[0] / 'labeled-data' / Path( video).stem if opencv: for index in frames2pick: cap.set(1, index) #extract a particular frame ret, frame = cap.read() if ret: image = img_as_ubyte( cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) img_name = str(output_path) + '/img' + str( index).zfill(indexlength) + ".png" if crop: io.imsave( img_name, image[int(coords[2]):int(coords[3]), int(coords[0]):int(coords[1]), :] ) #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1] else: io.imsave(img_name, image) else: print("Frame", index, " not found!") cap.release() elif flymovie: for index in frames2pick: print(index) frame = cap.get_frame(int(index))[0] if frame.ndim != 3: frame = skimage.color.gray2rgb(frame) image = img_as_ubyte(frame) img_name = str(output_path) + '/img' + str( index).zfill(indexlength) + ".png" if crop: io.imsave( img_name, image[int(coords[2]):int(coords[3]), int(coords[0]):int(coords[1]), :] ) #y1 = int(coords[2]),y2 = int(coords[3]),x1 = int(coords[0]), x2 = int(coords[1] else: io.imsave(img_name, image) cap.close() else: for index in frames2pick: try: image = img_as_ubyte( clip.get_frame(index * 1. / clip.fps)) img_name = str(output_path) + '/img' + str( index).zfill(indexlength) + ".png" io.imsave(img_name, image) if np.var(image) == 0: #constant image print( "Seems like black/constant images are extracted from your video. Perhaps consider using opencv under the hood, by setting: opencv=True" ) except FileNotFoundError: print("Frame # ", index, " does not exist.") #close video. clip.close() del clip else: print( "Invalid MODE. Choose either 'manual' or 'automatic'. Check ``help(deeplabcut.extract_frames)`` on python and ``deeplabcut.extract_frames?`` \ for ipython/jupyter notebook for more details.") print( "\nFrames were selected.\nYou can now label the frames using the function 'label_frames' (if you extracted enough frames for all videos)." )
) # BUG: This isn't quite right, as the ROS encodings don't all match those in FlyMovieFormat.py # Save the image file. cv.SaveImage(fullpathnameextImage, image) print('Wrote %s' % fullpathnameextImage) iImage += 1 else: print( 'Only image encoding==mono8 is supported. This one has %s' % msg.encoding) if (fullpathnameextFmf is not None): if (fmf is None): fmf = FlyMovieFormat.FlyMovieSaver(fullpathnameextFmf, version=3, format=format, bits_per_pixel=bpp) # Add the frame to the .fmf if (pixels is not None): fmf.add_frame(pixels, msg.header.stamp.to_sec()) # Write the filename,timestamp info the the .csv file. fidCsv.write('%s.%s, %0.9f\n' % (nameImage, extImage, msg.header.stamp.to_sec())) # If requested, convert the image files to an .mov file, then delete the images. if (fullpathnameextMov is not None) and (extImage is not None): # Run avconv. #cmdCreateVideoFile = 'avconv -y -r 60 -i %s/%%08d.%s -same_quant -r 60 %s && rm -rf %s && echo Finished.' % (fullpathImages, extImage, fullpathnameextMov, fullpathImages) cmdCreateVideoFile = 'avconv -y -r 60 -i %s/%%08d.%s -same_quant -r 60 %s && echo Finished.' % (
def composite_fmfs(widef, zoomf, rosbagf, imagepath, fps=15): wide = fmf.FlyMovie(widef) zoom = fmf.FlyMovie(zoomf) bag = rosbag.Bag(rosbagf) wide_ts = wide.get_all_timestamps() zoom_ts = zoom.get_all_timestamps() objs = collections.defaultdict(list) for topic, msg, t in bag.read_messages(topics='/flymad/tracked'): stamp = msg.header.stamp state = msg.state_vec objs[msg.obj_id].append( (stamp.secs + stamp.nsecs * 1e-9, state[0], state[1])) obj_times = [] for obj_id in objs: objs[obj_id] = np.array(objs[obj_id], dtype=[('stamp', np.float64), ('x', np.float32), ('y', np.float32)]) stamps = objs[obj_id]['stamp'] obj_times.append((np.min(stamps), np.max(stamps), obj_id)) obj_times = np.array(obj_times) raw2d = [] for topic, msg, t in bag.read_messages(topics='/flymad/raw_2d_positions'): stamp = msg.header.stamp stamp = stamp.secs + stamp.nsecs * 1e-9 for pt in msg.points: raw2d.append((stamp, pt.x, pt.y)) raw2d = np.array(raw2d, dtype=[('stamp', np.float64), ('x', np.float32), ('y', np.float32)]) micro_vels = [] for topic, msg, t in bag.read_messages(topics='/flymad_micro/velocity'): micro_vels.append((t.secs + t.nsecs * 1e-9, msg.velA, msg.velB)) micro_vels = np.array(micro_vels, dtype=[('t', np.float64), ('A', np.float32), ('B', np.float32)]) micro_position_echos = [] for topic, msg, t in bag.read_messages( topics='/flymad_micro/position_echo'): micro_position_echos.append( (t.secs + t.nsecs * 1e-9, msg.posA, msg.posB)) micro_position_echos = np.array(micro_position_echos, dtype=[('t', np.float64), ('A', np.float32), ('B', np.float32)]) tzname = None for topic, msg, t in bag.read_messages(topics='/timezone'): tzname = msg.data if tzname is None: # default timezone tzname = 'CET' warnings.warn('No data in /timezone topic - setting default timezone ' 'to %s' % tzname) start_time = np.max( [np.min(wide_ts), np.min(zoom_ts), np.min(raw2d['stamp'])]) stop_time = np.min( [np.max(wide_ts), np.max(zoom_ts), np.max(raw2d['stamp'])]) dur = stop_time - start_time rate = 1.0 / fps #HACK ANDREW use_wide_zoomed = False times = np.arange(start_time, stop_time + 1e-10, rate) print 'rendered at %f fps, this is %d frames' % (fps, len(times)) tz = pytz.timezone(tzname) pretty_time = DateFormatter(tz) progress = get_progress_bar("frame", len(times)) for out_fno, cur_time in enumerate(times): progress.update(out_fno + 1) valid_obj_cond = (obj_times[:, 0] <= cur_time) & (cur_time <= obj_times[:, 1]) valid_obj_ids = map(int, obj_times[valid_obj_cond, 2]) wide_frame, this_wide_ts = wide.get_frame_at_or_before_timestamp( cur_time) zoom_frame, this_zoom_ts = zoom.get_frame_at_or_before_timestamp( cur_time) #cond = (cur_time - rate < raw2d['stamp']) & (raw2d['stamp'] <= cur_time) cond = (cur_time - 50 * rate < raw2d['stamp']) & (raw2d['stamp'] <= cur_time) this_raw2d = raw2d[cond] save_fname_path = os.path.join(imagepath, 'out%06d.png' % out_fno) final_w = 1024 final_h = 768 / (1 if use_wide_zoomed else 2) margin = 10 max_panel_w = final_w // 2 - 3 * margin // 2 max_panel_h = final_h - 2 * margin canv = benu.Canvas(save_fname_path, final_w, final_h) # wide-angle view -------------------------- x0 = 0 w = wide.get_width() y0 = 0 h = wide.get_height() #commented code disables zoomed region of widefield view. if use_wide_zoomed: warnings.warn('WARNING: zooming on center region as a hack!!!!') x0 += 100 w -= 240 user_rect = (x0, y0, w, h) dev_w, dev_h = scale(w, h, max_panel_w, max_panel_h) device_rect = (margin, margin, dev_w, dev_h) idxs = np.nonzero(micro_vels['t'] <= cur_time)[0] if len(idxs): last_idx = idxs[-1] row = micro_vels[last_idx] velA = row['A'] velB = row['B'] vel_age = cur_time - row['t'] else: velA = None velB = None vel_age = None idxs = np.nonzero(micro_position_echos['t'] <= cur_time)[0] if len(idxs): last_idx = idxs[-1] row = micro_position_echos[last_idx] posA = row['A'] posB = row['B'] pos_age = cur_time - row['t'] else: posA = None posB = None pos_age = None wide_imgs_benu = [(device_rect, user_rect)] if use_wide_zoomed: r = 25 wz_height = max_panel_h // 2 dev_w, dev_h = scale(r * 2, r * 2, max_panel_w, wz_height) device_rect_crop = (margin, final_h - margin - wz_height, dev_w, dev_h) if 1: warnings.warn( 'magnified wide-field view should use objs[ obj_id ] to zoom on tracked object' ) xc = this_raw2d['x'][-1] yc = this_raw2d['y'][-1] x0 = xc - r x1 = xc + r if x0 < 0: x0 = 0 x1 = 2 * r elif x1 >= wide.get_width(): x1 = wide.get_width - 1 x0 = x1 - 2 * r y0 = yc - r y1 = yc + r if y0 < 0: y0 = 0 y1 = 2 * r elif y1 >= wide.get_height(): y1 = wide.get_height() - 1 y0 = y1 - 2 * r user_rect_crop = (x0, y0, 2 * r, 2 * r) wide_imgs_benu.append((device_rect_crop, user_rect_crop)) for d, u in wide_imgs_benu: with canv.set_user_coords(d, u): canv.imshow(wide_frame, 0, 0, filter='nearest') canv.scatter(this_raw2d['x'], this_raw2d['y'], color_rgba=(0, 1, 0, 0.3), radius=2.0) for obj_id in valid_obj_ids: stamps = objs[obj_id]['stamp'] cond = stamps <= cur_time last_idx = np.nonzero(cond)[0][-1] r = objs[obj_id][last_idx] canv.scatter([r['x']], [r['y']], color_rgba=(1, 0, 1, 0.4), radius=1.5) if 1: #with benu_ctx.clip_off(): # also transform off canv.text('%d' % obj_id, r['x'], r['y'], color_rgba=(1, 0, 1, 0.4)) _dx0, _dy0, _dw, _dh = d canv.poly([_dx0, _dx0 + _dw, _dx0 + _dw, _dx0, _dx0], [_dy0, _dy0, _dy0 + 30, _dy0 + 30, _dy0], color_rgba=(0, 0, 0, 1)) canv.text('%s' % pretty_time.format_date(cur_time), 15, 25, color_rgba=(1, 1, 1, 1), font_face="Ubuntu", bold=False, font_size=14) # zoomed view -------------------------- w, h = zoom.get_width(), zoom.get_height() dev_w, dev_h = scale(w, h, max_panel_w, max_panel_h) device_rect = (final_w // 2 + margin // 2, margin, dev_w, dev_h) user_rect = (0, 0, zoom.get_width(), zoom.get_height()) with canv.set_user_coords(device_rect, user_rect): canv.imshow(zoom_frame, 0, 0, filter='best') if 1: #with benu_ctx.clip_off(): # also transform off if velA is not None: canv.text( 'vel: {0:>+8.1f}, {1:> 8.1f} (data age: {2:>4.1f} msec)' .format(float(velA), float(velB), float(vel_age * 1e3)), 5, 15, color_rgba=(1, 1, 1, 1), font_face="Droid Sans Mono", bold=False) if posA is not None: canv.text( 'pos: {0:> 8.1f}, {1:> 8.1f} (data age: {2:>4.1f} msec)' .format(float(posA), float(posB), float(pos_age * 1e3)), 5, 25, color_rgba=(1, 1, 1, 1), font_face="Droid Sans Mono", bold=False) canv.save() progress.finish()
#frame attributes return [tmstmp_par, *image_analysis(im_list_par, bgim_par)] if __name__ == "__main__": print('test') warnings.filterwarnings('ignore') #File path the the video file fname = '/data/2019_04_09_LO_1M_sulcatone_run_03\ _cam_0_date_2019_04_09_time_14_49_44_v001.fmf' filename = '2019_04_09_LO_1M_sulcatone_run_03' #create FMF object of the movie and get image dimensions fmf = FMF.FlyMovie(fname) frame_width = fmf.get_width() frame_height = fmf.get_height() image_labels = [ 'timestamp,s', 'area', 'bbox_min_row', 'bbox_min_col', 'bbox_max_row', 'bbox_max_col', 'bbox_area', 'centroid_row', 'centroid_col', 'convex_area', 'eccentricity', 'equivalent_diameter', 'euler_number', 'extent', 'filled_area', 'label', 'local_centroid_row', 'local_centoid_col', 'major_axis_length', 'max_intensity', 'mean_intensity', 'min_intensity', 'minor_axis_length', 'orientation', 'perimeter', 'solidity', 'weighted_centroid_row', 'weighted_centroid_col', 'weighted_local_centoid_row', 'weighted_local_centroid_col' ]
def AnalyzeVideo(video,DLCscorer,trainFraction,cfg,dlc_cfg,sess,inputs, outputs,pdindex,save_as_csv, destfolder=None,start=0,stop=1): ''' Helper function for analyzing a video ''' print("Starting to analyze % ", video) vname = Path(video).stem if destfolder is None: destfolder = str(Path(video).parents[0]) dataname = os.path.join(destfolder,vname + DLCscorer + '.h5') try: # Attempt to load data... pd.read_hdf(dataname) print("Video already analyzed!", dataname) except FileNotFoundError: print("Loading ", video) vid_type='cv2' if '.fmf' in video: cap=FMF.FlyMovie(video) vid_type='fmf' else: cap=cv2.VideoCapture(video) nframes, fps, duration = (0,0,0) size=(0,0) if vid_type == 'fmf': nframes = cap.n_frames while True: try: cap.get_frame(nframes) except FMF.NoMoreFramesException: nframes -= 1 continue break fps = 1./(cap.get_frame(min(100, nframes))[1] - cap.get_frame(min(100, nframes)-1)[1]) duration = cap.get_frame(nframes)[1] size=cap.framesize else: fps = cap.get(5) #https://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#videocapture-get nframes = int(cap.get(7)) duration=nframes*1./fps size=(int(cap.get(4)),int(cap.get(3))) ny,nx=size print("Duration of video [s]: ", round(duration,2), ", recorded with ", round(fps,2),"fps!") print("Overall # of frames: ", nframes," found with (before cropping) frame dimensions: ", nx,ny) start = time.time() print("Starting to extract posture") if int(dlc_cfg["batch_size"])>1: if vid_type == 'fmf': PredicteData,nframes=GetPoseFfmf(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,int(dlc_cfg["batch_size"]),start=start,stop=stop) else: PredicteData,nframes=GetPoseF(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,int(dlc_cfg["batch_size"])) else: if vid_type == 'fmf': PredicteData,nframes=GetPoseSfmf(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes,start=start,stop=stop) else: PredicteData,nframes=GetPoseS(cfg,dlc_cfg, sess, inputs, outputs,cap,nframes) stop = time.time() if cfg['cropping']==True: coords=[cfg['x1'],cfg['x2'],cfg['y1'],cfg['y2']] else: coords=[0, nx, 0, ny] dictionary = { "start": start, "stop": stop, "run_duration": stop - start, "Scorer": DLCscorer, "DLC-model-config file": dlc_cfg, "fps": fps, "batch_size": dlc_cfg["batch_size"], "frame_dimensions": (ny, nx), "nframes": nframes, "iteration (active-learning)": cfg["iteration"], "training set fraction": trainFraction, "cropping": cfg['cropping'], "cropping_parameters": coords } metadata = {'data': dictionary} print("Saving results in %s..." %(Path(video).parents[0])) auxiliaryfunctions.SaveData(PredicteData[:nframes,:], metadata, dataname, pdindex, range(nframes),save_as_csv)