def make_montage(h5_filename, cfg_filename=None, ufmf_dir=None, dest_dir=None, save_ogv_movie=False, no_remove=False, max_n_frames=None, start=None, stop=None, movie_fnames=None, movie_cam_ids=None, caminfo_h5_filename=None, colormap=None, kalman_filename=None, candidate_index=0, nth_frame=1, verbose=False, reconstructor=None, **kwargs): config = get_config_defaults() if cfg_filename is not None: loaded_cfg = cherrypy.lib.reprconf.as_dict(cfg_filename) for section in loaded_cfg: config[section].update(loaded_cfg.get(section, {})) else: warnings.warn('no configuration file specified -- using defaults') orientation_3d_line_length = 0.1 if (config['what to show']['show_3d_smoothed_position'] or config['what to show']['show_3d_MLE_position'] or config['what to show']['show_3d_raw_orientation'] or config['what to show']['show_3d_raw_chosen_orientation'] or config['what to show']['show_3d_smoothed_orientation'] or config['what to show']['show_3d_obj_position_text']): if kalman_filename is None: raise ValueError('need kalman filename to show requested 3D data') if config['what to show']['obj_labels']: if kalman_filename is None: raise ValueError('need kalman filename to show object labels') if kalman_filename is not None: if (config['what to show']['show_3d_smoothed_orientation'] or config['what to show']['show_3d_raw_orientation'] or config['what to show']['show_3d_raw_chosen_orientation']): need_quality_data = True else: need_quality_data = False if need_quality_data: # need data about quality of tracking data3d, dataqual_3d = load_3d_data(kalman_filename, start=start, stop=stop, require_qual=True, **kwargs) else: data3d = load_3d_data(kalman_filename, start=start, stop=stop, require_qual=False, **kwargs) dataqual_3d = None if (config['what to show']['show_3d_MLE_position'] or config['what to show']['show_3d_raw_orientation']): if need_quality_data: data_raw_3d, dataqual_raw_3d = load_3d_raw_data( kalman_filename, **kwargs) else: data_raw_3d = load_3d_raw_data(kalman_filename, require_qual=False, **kwargs) dataqual_raw_3d = None else: data_raw_3d, dataqual_raw_3d = None, None if reconstructor is None: R = reconstruct.Reconstructor(kalman_filename) else: R = reconstruct.Reconstructor(reconstructor) else: data3d = R = data_raw_3d = None dataqual_raw_3d = None dataqual_3d = None min_ori_qual = config['what to show'][ 'minimum_display_orientation_quality'] if movie_fnames is None: # This works based on UUIDs movie_fnames = auto_discover_movies.find_movies( h5_filename, ufmf_dir=ufmf_dir, candidate_index=candidate_index, verbose=verbose) if verbose: print 'autodiscovery: found movie_fnames: %r' % (movie_fnames, ) else: if verbose: print 'autodiscovery: movie_fnames specified, not finding movies' if len(movie_fnames) == 0: if verbose: print 'autodiscovery: no FMF files found, looking for ufmfs' movie_fnames = auto_discover_ufmfs.find_ufmfs( h5_filename, ufmf_dir=ufmf_dir, careful=True, verbose=verbose, ) else: if verbose: print 'autodiscovery: prefixing directory' if ufmf_dir is not None: if verbose: print 'autodiscovery: prefixing movie names with directory %r' % ( ufmf_dir, ) movie_fnames = [os.path.join(ufmf_dir, f) for f in movie_fnames] if len(movie_fnames) == 0: raise ValueError('no input movies -- nothing to do') elif verbose: print 'movie_fnames:', movie_fnames if dest_dir is None: dest_dir = os.curdir else: if not os.path.exists(dest_dir): os.makedirs(dest_dir) # get name of data datetime_str = os.path.splitext(os.path.split(h5_filename)[-1])[0] if datetime_str.startswith('DATA'): datetime_str = datetime_str[4:19] workaround_ffmpeg2theora_bug = True if caminfo_h5_filename is None: caminfo_h5_filename = h5_filename if caminfo_h5_filename is not None: with open_file_safe(caminfo_h5_filename, mode='r') as h5: camn2cam_id, tmp = result_utils.get_caminfo_dicts(h5) del tmp else: camn2cam_id = None blank_images = {} all_frame_montages = [] for frame_enum, (frame_dict, frame) in enumerate( ufmf_tools.iterate_frames( h5_filename, movie_fnames, movie_cam_ids=movie_cam_ids, white_background=config['what to show']['white_background'], max_n_frames=max_n_frames, start=start, stop=stop, rgb8_if_color=True, camn2cam_id=camn2cam_id, )): if frame_enum % nth_frame != 0: continue tracker_data = frame_dict['tracker_data'] global_data = frame_dict['global_data'] if data3d is not None: this_frame_3d_data = data3d[data3d['frame'] == frame] if dataqual_3d is None: this_frame_dataqual = None else: this_frame_dataqual = dataqual_3d[data3d['frame'] == frame] else: this_frame_3d_data = None this_frame_dataqual = None if data_raw_3d is not None: this_frame_raw_3d_data = data_raw_3d[data_raw_3d['frame'] == frame] if dataqual_raw_3d is None: this_frame_raw_dataqual = None else: this_frame_raw_dataqual = dataqual_raw_3d[data_raw_3d['frame'] == frame] else: this_frame_raw_3d_data = None this_frame_raw_dataqual = None if config['what to show']['zoom_obj']: zoom_cond_3d = this_frame_3d_data['obj_id'] == config[ 'what to show']['zoom_obj'] if np.sum(zoom_cond_3d) == 0: # object not in this frame this_frame_this_obj_3d_data = None else: this_frame_this_obj_3d_data = this_frame_3d_data[zoom_cond_3d] if (frame_enum % 100) == 0: print '%s: frame %d' % (datetime_str, frame) saved_fnames = [] for movie_idx, ufmf_fname in enumerate(movie_fnames): try: frame_data = frame_dict[ufmf_fname] except KeyError: # no data saved (frame skip on Prosilica camera?) if movie_cam_ids is not None: cam_id = movie_cam_ids[movie_idx] else: cam_id = ufmf_tools.get_cam_id_from_ufmf_fname(ufmf_fname) camn = None if cam_id not in blank_images: im_w, im_h = global_data['width_heights'][cam_id] image = np.empty((im_h, im_w), dtype=np.uint8) image.fill(255) blank_images[cam_id] = image image = blank_images[cam_id] mean_image = None else: cam_id = frame_data['cam_id'] camn = frame_data['camn'] image = frame_data['image'] if config['what to show']['image_manipulation'] == 'absdiff': mean_image = frame_data['mean'] del frame_data save_fname = 'tmp_frame%07d_%s.png' % (frame, cam_id) save_fname_path = os.path.join(dest_dir, save_fname) pixel_aspect = config[cam_id].get('pixel_aspect', 1) transform = config[cam_id].get('transform', 'orig') border_pixels = config['what to show']['border_pixels'] if config['what to show']['max_resolution'] is not None: b2 = border_pixels * 2 fix_w, fix_h = config['what to show']['max_resolution'] fix_aspect = (fix_w - b2) / float(fix_h - b2) desire_aspect = image.shape[1] / float( image.shape[0] * pixel_aspect) if desire_aspect >= fix_aspect: # image is wider than resolution given device_w = fix_w - b2 device_h = (fix_w - b2) / desire_aspect device_x = border_pixels device_y = (fix_h - device_h + border_pixels) / 2.0 else: # image is taller than resolution given device_h = fix_h - b2 device_w = (fix_h - b2) * desire_aspect device_y = border_pixels device_x = (fix_w - device_w + border_pixels) / 2.0 user_rect = (0, 0, image.shape[1], image.shape[0]) elif config['what to show']['zoom_obj']: if border_pixels != 0: raise NotImplementedError() device_x = 0 device_y = 0 device_w = config['what to show']['zoom_orig_pixels'] * config[ 'what to show']['zoom_factor'] device_h = device_w fix_w = device_w fix_h = device_h if this_frame_this_obj_3d_data is not None: X = np.array([ this_frame_this_obj_3d_data['x'], this_frame_this_obj_3d_data['y'], this_frame_this_obj_3d_data['z'], np.ones_like(this_frame_this_obj_3d_data['x']) ]).T xarr, yarr = R.find2d(cam_id, X, distorted=True) assert len(xarr) == 1 x = xarr[0] y = yarr[0] r = config['what to show']['zoom_orig_pixels'] * 0.5 user_rect = (x - r, y - r, r * 2, r * 2) else: # we're not tracking object -- don't draw anything user_rect = (-1000, -1000, 10, 10) else: device_x = border_pixels device_y = border_pixels device_w = image.shape[1] device_h = int(image.shape[0] * pixel_aspect) # compensate for pixel_aspect fix_w = device_w + 2 * border_pixels fix_h = device_h + 2 * border_pixels user_rect = (0, 0, image.shape[1], image.shape[0]) canv = benu.Canvas(save_fname_path, fix_w, fix_h) device_rect = (device_x, device_y, device_w, device_h) with canv.set_user_coords(device_rect, user_rect, transform=transform): if config['what to show']['image_manipulation'] == 'raw': canv.imshow(image, 0, 0, cmap=colormap) if config['what to show']['image_manipulation'] == 'absdiff': if mean_image is not None: adsdiff_image = abs( image.astype(np.int16) - mean_image.astype(np.int16)) scaled_show = np.clip((5 * adsdiff_image) + 127, 0, 255).astype(np.uint8) canv.imshow(scaled_show, 0, 0, cmap=colormap) if config['what to show'][ 'show_2d_position'] and camn is not None: cond = tracker_data['camn'] == camn this_cam_data = tracker_data[cond] xarr = np.atleast_1d(this_cam_data['x']) yarr = np.atleast_1d(this_cam_data['y']) canv.scatter( xarr, yarr, color_rgba=(0, 0, 0, 1), radius=10, markeredgewidth=config['what to show']['linewidth'], ) # draw shadow canv.scatter( xarr + config['what to show']['linewidth'], yarr + config['what to show']['linewidth'], color_rgba=(1, 1, 1, 1), radius=10, markeredgewidth=config['what to show']['linewidth'], ) if config['what to show'][ 'show_2d_orientation'] and camn is not None: cond = tracker_data['camn'] == camn this_cam_data = tracker_data[cond] xarr = np.atleast_1d(this_cam_data['x']) yarr = np.atleast_1d(this_cam_data['y']) slope = np.atleast_1d(this_cam_data['slope']) thetaarr = np.arctan(slope) line_len = 30.0 xinc = np.cos(thetaarr) * line_len yinc = np.sin(thetaarr) * line_len / float(pixel_aspect) for x, y, xi, yi in zip(xarr, yarr, xinc, yinc): xarr = np.array([x - xi, x + xi]) yarr = np.array([y - yi, y + yi]) if np.any(np.isnan(xarr)) or np.any(np.isnan(yarr)): continue canv.plot( xarr, yarr, color_rgba=(0, 1, 0, 0.4), linewidth=config['what to show']['linewidth'], ) if config['what to show'][ 'show_3d_smoothed_position'] and camn is not None: if len(this_frame_3d_data): X = np.array([ this_frame_3d_data['x'], this_frame_3d_data['y'], this_frame_3d_data['z'], np.ones_like(this_frame_3d_data['x']) ]).T xarr, yarr = R.find2d(cam_id, X, distorted=True) canv.scatter( xarr, yarr, color_rgba=(0, 1, 1, 1), radius=10, markeredgewidth=config['what to show'] ['linewidth'], ) if config['what to show'][ 'show_3d_MLE_position'] and camn is not None: if len(this_frame_raw_3d_data): X = np.array([ this_frame_raw_3d_data['x'], this_frame_raw_3d_data['y'], this_frame_raw_3d_data['z'], np.ones_like(this_frame_raw_3d_data['x']) ]).T xarr, yarr = R.find2d(cam_id, X, distorted=True) canv.scatter( xarr, yarr, color_rgba=(0.2, 0.2, 0.5, 1), radius=8, markeredgewidth=config['what to show'] ['linewidth'], ) # draw shadow canv.scatter( xarr + config['what to show']['linewidth'], yarr + config['what to show']['linewidth'], color_rgba=(0.7, 0.7, 1, 1), # blue radius=8, markeredgewidth=config['what to show'] ['linewidth'], ) if config['what to show'][ 'show_3d_raw_orientation'] and camn is not None: if len(this_frame_raw_3d_data): hzs = np.array([ this_frame_raw_3d_data['hz_line0'], this_frame_raw_3d_data['hz_line1'], this_frame_raw_3d_data['hz_line2'], this_frame_raw_3d_data['hz_line3'], this_frame_raw_3d_data['hz_line4'], this_frame_raw_3d_data['hz_line5'] ]).T Xs = np.array([ this_frame_raw_3d_data['x'], this_frame_raw_3d_data['y'], this_frame_raw_3d_data['z'] ]).T cam_center = R.get_camera_center(cam_id)[:, 0] for (X, hz, this_dataqual) in zip(Xs, hzs, this_frame_raw_dataqual): if this_dataqual < min_ori_qual: continue cam_ray = geom.line_from_points( geom.ThreeTuple(cam_center), geom.ThreeTuple(X)) raw_ori_line = geom.line_from_HZline(hz) X_ = raw_ori_line.get_my_point_closest_to_line( cam_ray) ld = raw_ori_line.direction() dmag = abs(ld) du = ld * (1. / dmag ) # unit length direction (normalize) length = 0.5 # arbitrary, 0.5 meters N = 100 # n segments (to deal with distortion) X0 = X_.vals + du.vals * -length / 2.0 X = X0[:, np.newaxis] + np.linspace(0, length, N)[ np.newaxis, :] * du.vals[:, np.newaxis] Xh = np.vstack( (X, np.ones_like(X[0, np.newaxis, :]))).T xarr, yarr = R.find2d(cam_id, Xh, distorted=True) canv.plot( xarr, yarr, color_rgba=(0, 0, 1, 1), # blue linewidth=config['what to show']['linewidth'], ) if config['what to show'][ 'show_3d_smoothed_orientation'] and camn is not None: if len(this_frame_3d_data): for (row, ori_qual) in zip(this_frame_3d_data, this_frame_dataqual): if ori_qual < min_ori_qual: continue X0 = np.array([ row['x'], row['y'], row['z'], np.ones_like(row['x']) ]).T dx = np.array([ row['dir_x'], row['dir_y'], row['dir_z'], np.zeros_like(row['x']) ]).T X1 = X0 + dx * orientation_3d_line_length if np.any(np.isnan(X1)): continue pts = np.vstack([X0, X1]) xarr, yarr = R.find2d(cam_id, pts, distorted=True) canv.plot( xarr, yarr, color_rgba=(1, 0, 0, 1), # red linewidth=config['what to show']['linewidth'], ) if config['what to show'][ 'show_3d_raw_chosen_orientation'] and camn is not None: if len(this_frame_3d_data): for (row, ori_qual) in zip(this_frame_3d_data, this_frame_dataqual): if ori_qual < min_ori_qual: continue X0 = np.array([ row['x'], row['y'], row['z'], np.ones_like(row['x']) ]).T dx = np.array([ row['rawdir_x'], row['rawdir_y'], row['rawdir_z'], np.zeros_like(row['x']) ]).T X1 = X0 + dx * orientation_3d_line_length if np.any(np.isnan(X1)): continue pts = np.vstack([X0, X1]) xarr, yarr = R.find2d(cam_id, pts, distorted=True) canv.plot( xarr, yarr, color_rgba=(1, 159. / 255, 0, 1), # orange linewidth=config['what to show']['linewidth'], ) if config['what to show']['obj_labels'] and camn is not None: if len(this_frame_3d_data): X = np.array([ this_frame_3d_data['x'], this_frame_3d_data['y'], this_frame_3d_data['z'], np.ones_like(this_frame_3d_data['x']) ]).T xarr, yarr = R.find2d(cam_id, X, distorted=True) for i in range(len(xarr)): obj_id = this_frame_3d_data['obj_id'][i] canv.text('%d' % obj_id, xarr[i], yarr[i], font_size=14, color_rgba=(1, 0, 0, 1)) if config['what to show'][ 'show_3d_obj_position_text'] and camn is not None: if len(this_frame_3d_data): X = np.array([ this_frame_3d_data['x'], this_frame_3d_data['y'], this_frame_3d_data['z'], np.ones_like(this_frame_3d_data['x']) ]).T xarr, yarr = R.find2d(cam_id, X, distorted=True) for i in range(len(xarr)): canv.text('(%.1f, %.1f, %.1f) mm' % (X[i, 0] * 1000.0, X[i, 1] * 1000.0, X[i, 2] * 1000.0), xarr[i] + 10, yarr[i], font_size=14, color_rgba=(0, 1, 1, 1)) if config['what to show']['show_cam_id']: canv.text('%s' % cam_id, 0, 20, font_size=14, color_rgba=(1, 0, 0, 1)) if workaround_ffmpeg2theora_bug: # first frame should get a colored pixel so that # ffmpeg doesn't interpret the whole move as grayscale canv.plot( [0, 1], [0, 1], color_rgba=(1, 0, 0, 0.1), ) workaround_ffmpeg2theora_bug = False # Now we already did it. canv.save() saved_fnames.append(save_fname_path) target = os.path.join( dest_dir, 'movie%s_frame%07d.png' % (datetime_str, frame_enum + 1)) # All cameras saved for this frame, make montage title = '%s frame %d' % (datetime_str, frame) montage(saved_fnames, title, target) all_frame_montages.append(target) if not no_remove: for fname in saved_fnames: os.unlink(fname) print '%s: %d frames montaged' % ( datetime_str, len(all_frame_montages), ) if save_ogv_movie: orig_dir = os.path.abspath(os.curdir) os.chdir(dest_dir) try: CMD = 'ffmpeg2theora -v 10 movie%s_frame%%07d.png -o movie%s.ogv' % ( datetime_str, datetime_str) subprocess.check_call(CMD, shell=True) finally: os.chdir(orig_dir) if not no_remove: for fname in all_frame_montages: os.unlink(fname)
def retrack_movies(h5_filename, output_h5_filename=None, max_n_frames=None, start=None, stop=None, ufmf_dir=None, cfg_filename=None, ufmf_filenames=None, save_debug_images=False): # 2D data format for PyTables: Info2D = flydra_core.data_descriptions.Info2D if ufmf_filenames is None: ufmf_filenames = auto_discover_ufmfs.find_ufmfs(h5_filename, ufmf_dir=ufmf_dir, careful=True) print 'ufmf_filenames: %r' % ufmf_filenames if len(ufmf_filenames) == 0: raise RuntimeError( 'nothing to do (autodetection of .ufmf files failed)') if ufmf_dir is not None: if (not ufmf_filenames[0].startswith('/')) and (not os.path.isfile( ufmf_filenames[0])): # filenames are not absolute and are not present, convert ufmf_filenames = [ os.path.join(ufmf_dir, fname) for fname in ufmf_filenames ] else: raise RuntimeError( 'ufmf_dir given but ufmf_filenames exist without it') if os.path.exists(output_h5_filename): raise RuntimeError("will not overwrite old file '%s'" % output_h5_filename) # get name of data config = get_config_defaults() if cfg_filename is not None: loaded_cfg = cherrypy.lib.reprconf.as_dict(cfg_filename) for section in loaded_cfg: config[section].update(loaded_cfg.get(section, {})) default_camcfg = config['default'] for cam_id in config.keys(): if cam_id == 'default': continue # ensure default key/value pairs in each cam_id for key, value in default_camcfg.iteritems(): if key not in config[cam_id]: config[cam_id][key] = value datetime_str = os.path.splitext(os.path.split(h5_filename)[-1])[0] datetime_str = datetime_str[4:19] retrack_cam_ids = [ ufmf_tools.get_cam_id_from_ufmf_fname(f) for f in ufmf_filenames ] with open_file_safe(h5_filename, mode='r') as h5: # Find camns in original data camn2cam_id, cam_id2camns = result_utils.get_caminfo_dicts(h5) retrack_camns = [] for cam_id in retrack_cam_ids: retrack_camns.extend(cam_id2camns[cam_id]) all_camns = camn2cam_id.keys() # Save results to temporary file. Copy to real location on success. tmpdir = tempfile.mkdtemp() tmp_output_h5_filename = os.path.join(tmpdir, 'retrack.h5') with open_file_safe(tmp_output_h5_filename, mode='w', delete_on_error=True) as output_h5: out_data2d = output_h5.create_table( output_h5.root, 'data2d_distorted', Info2D, "2d data", expectedrows=h5.root.data2d_distorted.nrows) # Are there any camns in original h5 that are not being retracked? if len(set(all_camns) - set(retrack_camns)): # Yes. # OK, exclude all camns to be retracked... orig_data2d = h5.root.data2d_distorted[:] # read all data for camn in retrack_camns: delete_cond = orig_data2d['camn'] == camn save_cond = ~delete_cond orig_data2d = orig_data2d[save_cond] # And save original data for untouched camns out_data2d.append(orig_data2d) for input_node in h5.root._f_iter_nodes(): if input_node._v_name not in [ 'data2d_distorted', 'kalman_estimates', 'ML_estimates', 'ML_estimates_2d_idxs', ]: print 'copying', input_node._v_name # copy everything from source to dest input_node._f_copy(output_h5.root, recursive=True) fpc = realtime_image_analysis.FitParamsClass( ) # allocate FitParamsClass count = 0 iterate_frames = ufmf_tools.iterate_frames # shorten notation for frame_enum, (frame_dict, frame) in enumerate( iterate_frames( h5_filename, ufmf_filenames, max_n_frames=max_n_frames, start=start, stop=stop, )): if (frame_enum % 100) == 0: print '%s: frame %d' % (datetime_str, frame) for ufmf_fname in ufmf_filenames: try: frame_data = frame_dict[ufmf_fname] except KeyError: # no data saved (frame skip on Prosilica camera?) continue count += 1 camn = frame_data['camn'] cam_id = frame_data['cam_id'] camcfg = config.get(cam_id, default_camcfg) image = frame_data['image'] cam_received_timestamp = frame_data[ 'cam_received_timestamp'] timestamp = frame_data['timestamp'] detected_points = True obj_slices = None if len(frame_data['regions']) == 0: # no data this frame -- go to next camera or frame detected_points = False if detected_points: #print frame,cam_id,len(frame_data['regions']) absdiff_im = abs( frame_data['mean'].astype(np.float32) - image) thresh_val = np.max( absdiff_im) * camcfg['absdiff_max_frac_thresh'] thresh_val = max(camcfg['min_absdiff'], thresh_val) thresh_im = absdiff_im > thresh_val labeled_im, n_labels = scipy.ndimage.label(thresh_im) if not n_labels: detected_points = False else: obj_slices = scipy.ndimage.find_objects(labeled_im) detection = out_data2d.row if detected_points: height, width = image.shape if save_debug_images: xarr = [] yarr = [] frame_pt_idx = 0 detected_points = False # possible not to find any below for i in range(n_labels): y_slice, x_slice = obj_slices[i] # limit pixel operations to covering rectangle this_labeled_im = labeled_im[y_slice, x_slice] this_label_im = this_labeled_im == (i + 1) # calculate area (number of binarized pixels) xsum = np.sum(this_label_im, axis=0) pixel_area = np.sum(xsum) if pixel_area < camcfg['area_minimum_threshold']: continue # calculate center xpos = np.arange(x_slice.start, x_slice.stop, x_slice.step) ypos = np.arange(y_slice.start, y_slice.stop, y_slice.step) xmean = np.sum((xsum * xpos)) / np.sum(xsum) ysum = np.sum(this_label_im, axis=1) ymean = np.sum((ysum * ypos)) / np.sum(ysum) if 1: if camcfg['pixel_aspect'] == 1: this_fit_im = this_label_im elif camcfg['pixel_aspect'] == 2: this_fit_im = np.repeat(this_label_im, 2, axis=0) else: raise ValueError('unknown pixel_aspect') fast_foreground = FastImage.asfastimage( this_fit_im.astype(np.uint8)) fail_fit = False try: (x0_roi, y0_roi, weighted_area, slope, eccentricity) = fpc.fit(fast_foreground) except realtime_image_analysis.FitParamsError, err: fail_fit = True print "frame %d, ufmf %s: fit failed" % ( frame, ufmf_fname) print err else: if camcfg['pixel_aspect'] == 2: y0_roi *= 0.5 xmean = x_slice.start + x0_roi ymean = y_slice.start + y0_roi del weighted_area # don't leave room for confusion else: fail_fit = True if fail_fit: slope = np.nan eccentricity = np.nan detection['camn'] = camn detection['frame'] = frame detection['timestamp'] = timestamp detection[ 'cam_received_timestamp'] = cam_received_timestamp detection['x'] = xmean detection['y'] = ymean detection['area'] = pixel_area detection['slope'] = slope detection['eccentricity'] = eccentricity detection['frame_pt_idx'] = frame_pt_idx # XXX These are not yet implemented: detection['cur_val'] = 0 detection['mean_val'] = np.nan detection['sumsqf_val'] = np.nan frame_pt_idx += 1 if save_debug_images: xarr.append(xmean) yarr.append(ymean) detection.append() detected_points = True if save_debug_images: save_dir = 'debug' mkdir_p(save_dir) save_fname = 'debug_%s_%d.png' % (cam_id, frame) save_fname_path = os.path.join( save_dir, save_fname) print 'saving', save_fname_path import benu canv = benu.Canvas(save_fname_path, width, height) maxlabel = np.max(labeled_im) fact = int(np.floor(255.0 / maxlabel)) canv.imshow((labeled_im * fact).astype(np.uint8), 0, 0) canv.scatter( xarr, yarr, color_rgba=(0, 1, 0, 1), radius=10, ) canv.save() if not detected_points: # If no point was tracked for this frame, # still save timestamp. detection['camn'] = camn detection['frame'] = frame detection['timestamp'] = timestamp detection[ 'cam_received_timestamp'] = cam_received_timestamp detection['x'] = np.nan detection['y'] = np.nan detection['area'] = np.nan detection['slope'] = np.nan detection['eccentricity'] = np.nan detection['frame_pt_idx'] = 0 detection['cur_val'] = 0 detection['mean_val'] = np.nan detection['sumsqf_val'] = np.nan detection.append() if count == 0: raise RuntimeError('no frames processed')
def doit( movie_fname=None, reconstructor_fname=None, h5_fname=None, cam_id=None, dest_dir=None, transform=None, start=None, stop=None, h5start=None, h5stop=None, show_obj_ids=False, obj_only=None, image_format=None, subtract_frame=None, save_framelist_fname=None, ): if dest_dir is None: dest_dir = os.curdir if movie_fname is None: raise NotImplementedError('') if image_format is None: image_format = 'png' if cam_id is None: raise NotImplementedError('') if movie_fname.lower().endswith('.fmf'): movie = fmf_mod.FlyMovie(movie_fname) else: movie = ufmf_mod.FlyMovieEmulator(movie_fname) if start is None: start = 0 if stop is None: stop = movie.get_n_frames() - 1 ca = core_analysis.get_global_CachingAnalyzer() (obj_ids, unique_obj_ids, is_mat_file, data_file, extra) = \ ca.initial_file_load(h5_fname) if obj_only is not None: unique_obj_ids = obj_only dynamic_model_name = extra['dynamic_model_name'] if dynamic_model_name.startswith('EKF'): dynamic_model_name = dynamic_model_name[4:] if reconstructor_fname is None: reconstructor = flydra_core.reconstruct.Reconstructor(data_file) else: reconstructor = flydra_core.reconstruct.Reconstructor( reconstructor_fname) fix_w = movie.get_width() fix_h = movie.get_height() is_color = imops.is_coding_color(movie.get_format()) if subtract_frame is not None: if not subtract_frame.endswith('.fmf'): raise NotImplementedError( 'only fmf supported for --subtract-frame') tmp_fmf = fmf_mod.FlyMovie(subtract_frame) if is_color: tmp_frame, tmp_timestamp = tmp_fmf.get_next_frame() subtract_frame = imops.to_rgb8(tmp_fmf.get_format(), tmp_frame) subtract_frame = subtract_frame.astype( np.float32) # force upconversion to float else: tmp_frame, tmp_timestamp = tmp_fmf.get_next_frame() subtract_frame = imops.to_mono8(tmp_fmf.get_format(), tmp_frame) subtract_frame = subtract_frame.astype( np.float32) # force upconversion to float if save_framelist_fname is not None: save_framelist_fd = open(save_framelist_fname, mode='w') movie_fno_count = 0 for movie_fno in range(start, stop + 1): movie.seek(movie_fno) image, timestamp = movie.get_next_frame() h5_frame = extra['time_model'].timestamp2framestamp(timestamp) if h5start is not None: if h5_frame < h5start: continue if h5stop is not None: if h5_frame > h5stop: continue if is_color: image = imops.to_rgb8(movie.get_format(), image) else: image = imops.to_mono8(movie.get_format(), image) if subtract_frame is not None: new_image = np.clip(image - subtract_frame, 0, 255) image = new_image.astype(np.uint8) warnings.warn('not implemented: interpolating data') h5_frame = int(round(h5_frame)) if save_framelist_fname is not None: save_framelist_fd.write('%d\n' % h5_frame) movie_fno_count += 1 if 0: # save starting from frame 1 save_fname_path = os.path.splitext(movie_fname)[ 0] + '_frame%06d.%s' % (movie_fno_count, image_format) else: # frame is frame in movie file save_fname_path = os.path.splitext( movie_fname)[0] + '_frame%06d.%s' % (movie_fno, image_format) save_fname_path = os.path.join(dest_dir, save_fname_path) if transform in ['rot 90', 'rot -90']: device_rect = (0, 0, fix_h, fix_w) canv = benu.Canvas(save_fname_path, fix_h, fix_w) else: device_rect = (0, 0, fix_w, fix_h) canv = benu.Canvas(save_fname_path, fix_w, fix_h) user_rect = (0, 0, image.shape[1], image.shape[0]) show_points = [] with canv.set_user_coords(device_rect, user_rect, transform=transform): canv.imshow(image, 0, 0) for obj_id in unique_obj_ids: try: data = ca.load_data( obj_id, data_file, frames_per_second=extra['frames_per_second'], dynamic_model_name=dynamic_model_name, ) except core_analysis.NotEnoughDataToSmoothError: continue cond = data['frame'] == h5_frame idxs = np.nonzero(cond)[0] if not len(idxs): continue # no data at this frame for this obj_id assert len(idxs) == 1 idx = idxs[0] row = data[idx] # circle over data point xyz = row['x'], row['y'], row['z'] x2d, y2d = reconstructor.find2d(cam_id, xyz, distorted=True) radius = 10 canv.scatter([x2d], [y2d], color_rgba=green, markeredgewidth=3, radius=radius) if 1: # z line to XY plane through origin xyz0 = row['x'], row['y'], 0 x2d_z0, y2d_z0 = reconstructor.find2d(cam_id, xyz0, distorted=True) warnings.warn('not distorting Z line') if 1: xdist = x2d - x2d_z0 ydist = y2d - y2d_z0 dist = np.sqrt(xdist**2 + ydist**2) start_frac = radius / dist if radius > dist: start_frac = 0 x2d_r = x2d - xdist * start_frac y2d_r = y2d - ydist * start_frac else: x2d_r = x2d y2d_r = y2d canv.plot([x2d_r, x2d_z0], [y2d_r, y2d_z0], color_rgba=green, linewidth=3) if show_obj_ids: show_points.append((obj_id, x2d, y2d)) for show_point in show_points: obj_id, x2d, y2d = show_point x, y = canv.get_transformed_point(x2d, y2d, device_rect, user_rect, transform=transform) canv.text( 'obj_id %d' % obj_id, x, y, color_rgba=(0, 1, 0, 1), font_size=20, ) canv.save()
top_row_width = scale*imw*n_ims + (1+n_ims)*margin SHOW_STACK=True if SHOW_STACK: n_stack_rows = 4 rw = scale*imw*stack_N_images + (1+n_ims)*margin row_width = max(top_row_width,rw) col_height = (n_stack_rows*scale*imh + (n_stack_rows+1)*margin) stack_margin=20 else: row_width = top_row_width col_height = scale*imh + 2*margin stack_margin=0 canv=benu.Canvas(fname,row_width,col_height+stack_margin, color_rgba=(1,1,1,1)) if SHOW_STACK: for (stacki,s_orig_idx) in enumerate( orig_idxs_in_average): (s_raw_im, s_raw_coords)=raw_images[s_orig_idx] s_raw_l, s_raw_b = s_raw_coords[:2] s_imh, s_imw = s_raw_im.shape[:2] user_rect = (s_raw_l,s_raw_b,s_imw,s_imh) x_display = (stacki+1)*margin+(scale*imw)*stacki for show in ['raw','absdiff','morphed']: if show=='raw': y_display = scale*imh + 2*margin elif show=='absdiff':