def main(): # Allows a user to select a directory root = Tk() root.withdraw() root.update() filepath = askopenfilename(title="Select background file", filetypes=(("png files", "*.png"), ("all files", "*.*"))) root.destroy() img = cv2.imread(filepath) line_length_pixels = measuring(img) divider_base_mm = 15 mm_per_pixel = divider_base_mm / line_length_pixels mm_per_pixel = np.mean(mm_per_pixel) print("mm_per_pixel: {}".format(mm_per_pixel)) parts = os.path.split(filepath) config_file = load_yaml(parts[0], "config") config_file["mm_per_pixel"] = mm_per_pixel.item() print("mm per pixel: {}".format(mm_per_pixel)) with open(os.path.join(parts[0], "config.yaml"), "w") as file: documents = yaml.dump(config_file, file)
def image_minmax(rootdir, ymin, ymax, fish_ID, meta): """ :param rootdir: :param ymin: :param ymax: :param fish_ID: :param meta: :return: """ track_roi = load_yaml(rootdir, "roi_file") vid_paths = glob.glob(os.path.join(rootdir, "*.mp4")) if len(vid_paths) > 0: try: cap = cv2.VideoCapture(vid_paths[0]) except: print("problem reading video file, check path") return ret, frame = cap.read() if track_roi: curr_roi = track_roi["roi_0"] frame = frame[curr_roi[1]:curr_roi[1] + curr_roi[3], curr_roi[0]:curr_roi[0] + curr_roi[2]] fig1, ax1 = plt.subplots() plt.imshow(frame) plt.plot([0, 500], [ymin, ymin], color='r') plt.plot([0, 500], [ymax, ymax], color='r') plt.savefig( os.path.join( rootdir, "{0}_ylims_{1}.png".format(fish_ID, meta["species"].replace(' ', '-')))) plt.close() cap.release() cv2.destroyAllWindows() return
def print_roi(roi_path, video_path): rois = load_yaml(roi_path, "roi_file") try: cap = cv2.VideoCapture(video_path) except: print("problem reading video file, check path") return False while (cap.isOpened()): ret, frame = cap.read() if frame is None: break for roi in range(0, len(rois) - 1): # for the frame define an ROI and crop image curr_roi = rois["roi_" + str(roi)] # add in ROIs start_point = (curr_roi[0], curr_roi[1]) end_point = (curr_roi[0] + curr_roi[2], curr_roi[1] + curr_roi[3]) cv2.rectangle(frame, start_point, end_point, 220, 2) cv2.imshow('Roi printed on video frame', frame) return frame
def full_analysis(rootdir): parts = os.path.split(rootdir) config = load_yaml(parts[0], "config") meta = load_yaml(rootdir, "meta_data") if len(parts[1]) < 19: print("old recording naming, reconstructing name") rec_folder = os.path.split(os.path.split(parts[0])[0])[1] cam_n = os.path.split(parts[0])[1][-1] roi_n = parts[1][-1] fish_ID = "{0}_c{1}_r{2}_{3}_s{4}".format(rec_folder, cam_n, roi_n, meta["species"].replace(' ', '-'), meta["sex"]) else: fish_ID = parts[1] roi_n = fish_ID.split("_")[2][1] file_ending = roi_n # load tracks track_full, speed_full = extract_tracks_from_fld(rootdir, file_ending) # for old recordings update time (subtract 30min) track_full[:, 0] = adjust_old_time_ns(fish_ID, track_full[:, 0]) # get starting time of video os.chdir(rootdir) files = glob.glob("*.csv") files.sort() start_time = files[0][9:15] start_total_sec = (int(start_time[0:2]) * 60 * 60 + int(start_time[2:4]) * 60 + int(start_time[4:])) # set sunrise, day, sunset, night times (ns, s, m, h) and set day length in ns change_times_s, change_times_ns, change_times_m, change_times_h, day_ns, day_s, change_times_d = output_timings() # set time vector if track_full[0, 0] == 0: tv = np.arange(start_total_sec * 10 ** 9, ((track_full.shape[0] / config['fps']) * 10 ** 9 + start_total_sec * 10 ** 9), ((1 / config['fps']) * 10 ** 9)) print("using retracked data so using interpolated time vector") else: tv = track_full[:, 0] - track_full[0, 0] + start_total_sec * 10 ** 9 # correct to seconds tv_sec = tv / 10 ** 9 tv_24h_sec = tv / 10 ** 9 num_days = 7 # get time vector with 24h time for i in range(num_days): tv_24h_sec[np.where(tv_24h_sec > day_ns / 10 ** 9)] -= day_ns / 10 ** 9 min_bins = 30 # interpolate between NaN stretches x_n = int_nan_streches(track_full[:, 1]) y_n = int_nan_streches(track_full[:, 2]) # replace bad track NaNs (-1) -> these are manually defined as artifacts plt.xlabel("Time (h)") # ax2.invert_yaxis() by "split_tracking" x_n[np.where(x_n == -1)] = np.nan y_n[np.where(y_n == -1)] = np.nan # find displacement speed_full_i = np.sqrt(np.diff(x_n) ** 2 + np.diff(y_n) ** 2) speed_t, x_nt, y_nt = remove_high_spd_xy(speed_full_i, x_n, y_n) speed_sm = smooth_speed(speed_t, win_size=5) speed_sm_mm = speed_sm * config["mm_per_pixel"] speed_sm_mm_ps = speed_sm_mm * config['fps'] speed_sm_tbl = speed_sm_mm / meta["fish_length_mm"] speed_sm_tbl_ps = speed_sm_tbl * config['fps'] # smoothing on coordinates fig1, ax1 = plt.subplots() plt.hist(np.diff(x_nt), 1000) plt.yscale('log') plt.xlabel("pixels") plt.ylabel("frequency") plt.title("X_{0}".format(meta["species"])) plt.savefig(os.path.join(rootdir, "{0}_X_jumps_{1}.png".format(fish_ID, meta["species"].replace(' ', '-')))) fig1, ax1 = plt.subplots() plt.hist(np.diff(y_nt), 1000) plt.yscale('log') plt.xlabel("pixels") plt.ylabel("frequency") plt.title("Y_{0}".format(meta["species"])) plt.savefig(os.path.join(rootdir, "{0}_Y-jumps_{1}.png".format(fish_ID, meta["species"].replace(' ', '-')))) y_sm = smooth_speed(y_nt, win_size=5) smooth_win = 10 * 60 * min_bins y_bin = smooth_speed(y_sm, win_size=smooth_win) plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, y_bin[0:-1], change_times_h, day_ns / 10 ** 9 / 60 / 60) plt.ylabel("average y position") plt.title("Y position_{0}_smoothed_by_{1}".format(meta["species"], min_bins)) plt.savefig(os.path.join(rootdir, "{0}_Y-position_{1}.png".format(fish_ID, meta["species"].replace(' ', '-')))) # area area_sm = smooth_speed(track_full[0:-1, 3], win_size=5) area_bin = smooth_speed(area_sm, win_size=smooth_win) plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, area_bin, change_times_h, day_ns / 10 ** 9 / 60 / 60) plt.xlabel("Time (h)") plt.ylabel("average area size") plt.title("Area_{0}_smoothed_by_{1}".format(meta["species"], min_bins)) # split data into day and night tv_night = np.empty([0, 0]) speed_sm_night = np.empty([0, 0]) tv_night = np.append(tv_night, tv_24h_sec[np.where(change_times_s[0] > tv_24h_sec)]) tv_night = np.append(tv_night, tv[np.where(tv_24h_sec[0:-1] > change_times_s[3])]) speed_sm_night = np.append(speed_sm_night, speed_sm_mm_ps[np.where(change_times_s[0] > tv_24h_sec[0:-1]), 0]) speed_sm_night = np.append(speed_sm_night, speed_sm_mm_ps[np.where(tv_24h_sec[0:-1] > change_times_s[3]), 0]) tv_day = np.empty([0, 0]) speed_sm_day = np.empty([0, 0]) tv_day = np.append(tv_day, tv[np.where((change_times_s[0] < tv_24h_sec[0:-1]) & (tv_24h_sec[0:-1] < change_times_s[3]))]) speed_sm_day = np.append(speed_sm_day, speed_sm_mm_ps[np.where((change_times_s[0] < tv_24h_sec[0:-1]) & (tv_24h_sec[0:-1] < change_times_s[3])), 0]) # plot speed distributions bin_edges_plot = np.linspace(0, 200, 101) # bin_edges_plot = np.logspace(0, 1.2, 10) plot_hist_2(bin_edges_plot, speed_sm_day, "day", speed_sm_night, "night", "speed mm/s", 1) plt.savefig(os.path.join(rootdir, "{0}_hist_D_vs_N_{1}_spd_mms.png".format(fish_ID, meta["species"].replace(' ', '-')))) # split data into day and night position_night_x = np.empty([0, 0]) position_night_y = np.empty([0, 0]) position_night_x = np.append(position_night_x, x_nt[np.where(change_times_s[0] > tv_24h_sec)]) position_night_x = np.append(position_night_x, x_nt[np.where(tv_24h_sec[0:-1] > change_times_s[3])]) position_night_y = np.append(position_night_y, y_nt[np.where(change_times_s[0] > tv_24h_sec)]) position_night_y = np.append(position_night_y, y_nt[np.where(tv_24h_sec[0:-1] > change_times_s[3])]) position_day_x = np.empty([0, 0]) position_day_y = np.empty([0, 0]) position_day_x = np.append(position_day_x, x_nt[np.where((change_times_s[0] < tv_24h_sec[0:-1]) & (tv_24h_sec[0:-1] < change_times_s[3]))]) position_day_y = np.append(position_day_y, y_nt[np.where((change_times_s[0] < tv_24h_sec[0:-1]) & (tv_24h_sec[0:-1] < change_times_s[3]))]) # plot position (fix = remove x,y when they were over threshold) bin_edges_plot = np.linspace(0, 800, 101) plot_hist_2(bin_edges_plot, position_day_y, "day", position_night_y, "night", "Y position", 1) xmin = np.nanmin(x_nt[:]) xmax = np.nanmax(x_nt[:]) ymin = np.nanmin(y_nt[:]) ymax = np.nanmax(y_nt[:]) image_minmax(rootdir, ymin, ymax, fish_ID, meta) fig, (ax1, ax2) = plt.subplots(2, 7, sharey=True) for day in range(num_days): position_night_x = x_nt[np.where((tv_sec > (change_times_s[3] + day_s * day)) & (tv_sec < (change_times_s[0] + day_s * (day + 1))))] position_night_y = y_nt[np.where((tv_sec > (change_times_s[3] + day_s * day)) & (tv_sec < (change_times_s[0] + day_s * (day + 1))))] position_day_x = x_nt[np.where((tv_sec > (change_times_s[0] + day_s * day)) & (tv_sec < (change_times_s[3] + day_s * day)))] position_day_y = y_nt[np.where((tv_sec > (change_times_s[0] + day_s * day)) & (tv_sec < (change_times_s[3] + day_s * day)))] ax1[day].hist2d(position_day_x[~np.isnan(position_day_x)], neg_values(position_day_y[~np.isnan(position_day_y)]), bins=10, range=[[xmin, xmax], [-ymax, ymin]], cmap='inferno') ax2[day].hist2d(position_night_x[~np.isnan(position_night_x)], neg_values(position_night_y[~np.isnan(position_night_y)]), bins=10, range=[[xmin, xmax], [-ymax, ymin]], cmap='inferno') plt.savefig(os.path.join(rootdir, "{0}_hist2d_D_vs_N_split_days_spt.png".format(fish_ID))) # # speed vs y position # # plt.scatter(speed_sm_mm_ps, y_nt[0:-1]) # spd_max = np.percentile(speed_sm_mm_ps, 95) # fig, (ax1, ax2) = plt.subplots(1, 2) # test_s = speed_sm_mm_ps[~np.isnan(speed_sm_mm_ps)] # test_y = y_nt[~np.isnan(y_nt)] # ax1.hist2d(test_s, neg_values(test_y[0:-1]), bins=10, range=[[0, spd_max], [-ymax, ymin]], cmap='inferno') # looking at correlations # covariance = np.cov(test_s, test_y[0:-1]) # from scipy.stats import pearsonr # corr, _ = pearsonr(test_s, test_y[0:-1]) # print(corr) # ax1.hist2d(speed_sm_mm_ps[~np.isnan(speed_sm_mm_ps)], neg_values(position_day_y[0:-1][~np.isnan(position_day_y)]), # bins=10, range=[[0, spd_max], [-ymax, ymin]], cmap='inferno') # Distribute position into categories: y [top, centre, bottom], or x [centre side]. # Assume the fish explores the whole area over the video y_bins = 10 x_bins = 5 y_bin_size = (ymax - ymin) / y_bins x_bin_size = (xmax - xmin) / x_bins vertical_pos = np.empty([y_nt.shape[0]]) previous_y_bin = 0 for y_bin in range(1, y_bins + 1): vertical_pos[np.where( np.logical_and((y_nt - ymin) >= previous_y_bin * y_bin_size, (y_nt - ymin) <= y_bin * y_bin_size))] = y_bin previous_y_bin = copy.copy(y_bin) horizontal_pos = np.empty([x_nt.shape[0]]) previous_x_bin = 0 for x_bin in range(1, x_bins + 1): horizontal_pos[np.where( np.logical_and((x_nt - xmin) >= previous_x_bin * x_bin_size, (x_nt - xmin) <= x_bin * x_bin_size))] = x_bin previous_x_bin = copy.copy(x_bin) move_thresh = 15 # Bin thresholded data (10fps = seconds, 60 seconds = min e.g. 10*60*10 = 10min bins fraction_active = (speed_sm_mm_ps > move_thresh) * 1 super_threshold_indices_bin = smooth_speed(fraction_active, 10 * 60 * min_bins) # filled plot in s plt.close() fig1, ax1 = filled_plot(tv / 10 ** 9 / 60 / 60, super_threshold_indices_bin, change_times_h, day_ns / 10 ** 9 / 60 / 60) ax1.set_ylim([0, 1]) sec_axis_h(ax1, start_total_sec) plt.xlabel("Time (h)") plt.ylabel("Fraction active in {} min sliding windows".format(min_bins)) plt.title("Fraction_active_{}_thresh_{}_mmps".format(meta["species"], move_thresh)) plt.savefig(os.path.join(rootdir, "{0}_wake_{1}_spt.png".format(fish_ID, meta["species"].replace(' ', '-')))) # win_size = fps * sec/min * mins (was 30*60)heatm smooth_win = 10 * 60 * min_bins speed_sm_bin = smooth_speed(speed_sm_tbl_ps, win_size=smooth_win) plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, speed_sm_bin, change_times_h, day_ns / 10 ** 9 / 60 / 60) sec_axis_h(ax2, start_total_sec) plt.xlabel("Time (h)") plt.ylabel("Speed body lengths/s") plt.title("Speed_{0}_smoothed_by_{1}".format(meta["species"], min_bins)) plt.savefig(os.path.join(rootdir, "{0}_speed_{1}_30m_spt.png".format(fish_ID, meta["species"].replace(' ', '-')))) # win_size = fps * sec/min * mins (was 30*60)heatm smooth_win = 10 * 60 * min_bins speed_sm_mm_bin = smooth_speed(speed_sm_mm_ps, win_size=smooth_win) plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, speed_sm_mm_bin, change_times_h, day_ns / 10 ** 9 / 60 / 60) sec_axis_h(ax2, start_total_sec) plt.xlabel("Time (h)") plt.ylabel("Speed mm/s") plt.title("Speed_{0}_smoothed_by_{1}".format(meta["species"], min_bins)) plt.savefig(os.path.join(rootdir, "{0}_speed_{1}_30m_spt.png".format(fish_ID, meta["species"].replace(' ', '-')))) plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, speed_sm_mm_bin, change_times_h, day_ns / 10 ** 9 / 60 / 60) sec_axis_h(ax2, start_total_sec) plt.xlabel("Time (h)") plt.ylabel("Speed mm/s") plt.title("Speed_{0}_smoothed_by_{1}".format(meta["species"], min_bins)) ax2.set_ylim(0, 60) plt.savefig( os.path.join(rootdir, "{0}_speed_{1}_30m_spt_0-60ylim.png".format(fish_ID, meta["species"].replace(' ', '-')))) smooth_win = 10 * 60 * 10 speed_sm_mm_bin = smooth_speed(speed_sm_mm_ps, win_size=smooth_win) plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, speed_sm_mm_bin, change_times_h, day_ns / 10 ** 9 / 60 / 60) sec_axis_h(ax2, start_total_sec) plt.xlabel("Time (h)") plt.ylabel("Speed mm/s") plt.title("Speed_{0}_smoothed_by_{1}".format(meta["species"], min_bins)) plt.savefig( os.path.join(rootdir, "{0}_speed_{1}_{2}m_spt.png".format(fish_ID, meta["species"].replace(' ', '-'), 10))) plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, speed_full, change_times_h, day_ns / 10 ** 9 / 60 / 60) plt.plot((tv / 10 ** 9 / 60 / 60)[0:-1], speed_t) sec_axis_h(ax2, start_total_sec) plt.xlabel("Time (h)") plt.ylabel("Speed pixels/0.1s") plt.title("Speed_{0}_raw-black_thresholded-blue".format(fish_ID,meta["species"].replace(' ', '-'))) plt.savefig(os.path.join(rootdir, "{0}_speed_{1}_speed_full_speed_thresholded.png".format(fish_ID, meta["species"].replace( ' ', '-')))) # area plt.close() fig2, ax2 = filled_plot(tv / 10 ** 9 / 60 / 60, track_full[0:-1, 3], change_times_h, day_ns / 10 ** 9 / 60 / 60) plt.xlabel("Time (h)") plt.ylabel("Area pixels/0.1s") plt.title("Area_{0}".format(meta["species"])) sec_axis_h(ax2, start_total_sec) plt.savefig(os.path.join(rootdir, "{0}_{1}_area.png".format(fish_ID, meta["species"].replace(' ', '-')))) plt.close() fig3, ax3 = filled_plot(tv / 10 ** 9 / 60 / 60, np.diff(tv) / 10 ** 9, change_times_h, day_ns / 10 ** 9 / 60 / 60) sec_axis_h(ax3, start_total_sec) plt.xlabel("Time (h)") plt.ylabel("Inter frame time difference (s)") plt.title("TV_{0}".format(meta["species"])) plt.savefig(os.path.join(rootdir, "{0}_{1}_TV_diff.png".format(fish_ID, meta["species"].replace(' ', '-')))) # save out track file # track file needs: FISH20200727_c1_r1_Genus-species_sex_mmpp_fishlength-mm # speed_sm_tbl_ps, tv, x, y, fraction_active # speed_sm_mm_ps, tv, x, y track_meta = {'ID': fish_ID, 'species': meta["species"], 'sex': meta["sex"], 'fish_length_mm': meta["fish_length_mm"], 'mm_per_pixel': config["mm_per_pixel"]} meta_df = pd.DataFrame(track_meta, columns=['ID', 'species', 'sex', 'fish_length_mm', 'mm_per_pixel'], index=[0]) meta_df.to_csv(os.path.join(rootdir, "{0}_meta.csv".format(fish_ID))) # start from midnight (so they all start at the same time) - need to adjust "midnight" depending on if ts were # adjusted for 30min shift (all recordings before 20201127). if int(fish_ID[4:12]) < 20201127: thirty_min_ns = 30 * 60 * 1000000000 adjusted_day_ns = day_ns - thirty_min_ns print("old recording from before 20201127 so adjusting back time before saving out als") else: adjusted_day_ns = day_ns midnight = np.max(np.where(tv < adjusted_day_ns)) track_als = np.vstack((tv[midnight:-1], speed_sm_mm_ps[midnight:, 0], x_nt[midnight:-1], y_nt[midnight:-1])) filename = os.path.join(rootdir, "{}_als.csv".format(fish_ID)) als_df = pd.DataFrame(track_als.T, columns=['tv_ns', 'speed_mm', 'x_nt', 'y_nt'], index=pd.Index(np.arange(0, len(speed_sm_tbl_ps[midnight:])))) als_df.to_csv(filename, encoding='utf-8-sig', index=False) plt.close('all') # test if saving file worked (issues with null bytes) try: data_b = pd.read_csv(filename, sep=',') except pd.errors.ParserError: print("problem parsing, probably null bytes error, trying to save with numpy instead ") np.savetxt(filename, track_als.T, delimiter=',', header='tv_ns,speed_mm,x_nt,y_nt', comments='') try: data_b = pd.read_csv(filename, sep=',') except pd.errors.ParserError: print("still couldn't save it properly, report this!") os.remove(filename) return
track_roi = 'm' while track_roi not in {'y', 'n'}: track_roi = input("Track with another roi? y/n: \n") if track_roi == 'y': if track_all == 'n': roi_on_one = input( "You are now changing the ROI for only one video, this is not recommended!\n " "y to continue, n to stop: \n") if roi_on_one == 'n': exit() # ## Define video rois ## # load recording roi rec_rois = load_yaml(cam_dir, "roi_file") curr_roi = rec_rois["roi_" + str(fish_data['roi'][1:])] # load video roi (if previously defined) or if not, then pick background and define a new ROI vid_rois = load_yaml(vid_dir, "roi_file") if not vid_rois: # allow user to pick the background image which to set the roi with root = Tk() root.withdraw() root.update() background_file = askopenfilename(title="Select background", filetypes=(("image files", "*.png"), )) root.destroy() background_full = cv2.imread(background_file)
def main(): # Allows a user to select a directory of movie root = Tk() root.withdraw() root.update() filepath = askopenfilename(title="Select movie file", filetypes=(("mp4 files", "*.mp4"), ("all files", "*.*"))) root.destroy() parts = os.path.split(filepath) add_sex(parts[0]) meta = load_yaml(parts[0], "meta_data") # check if fish has already been measured or not if 'fish_length_mm' in meta: remeasure = 'm' while remeasure not in {'y', 'n'}: remeasure = input( "Fish has already been measured and the length is {}. Do you want to overwrite this " "measurement? y/n \n".format(meta["fish_length_mm"])) if remeasure == 'n': print("Leaving previous measurement") return else: print("making new measurement") # check if pixel units have already been measured for this camera or not partss = os.path.split(parts[0]) config_file = load_yaml(partss[0], "config") if 'mm_per_pixel' not in config_file: print("No mm_per_pixel, use script: Measure_Units") return # load video global video video = cv2.VideoCapture(filepath) nr_of_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) # set up image display and trackbar for cv2.namedWindow('Measuring fish length') cv2.createTrackbar("Frame", "Measuring fish length", 0, nr_of_frames, getFrame) playing = 1 while video.isOpened(): # Get the next videoframe if playing: ret, frame = video.read() cv2.putText( frame, "Press enter to select frame, press space bar to pause", (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (10, 10, 200), 2) cv2.imshow("Measuring fish length", frame) k = cv2.waitKey(100) & 0xff if k == 27: break elif k == 32: if playing == 0: playing = 1 elif playing == 1: playing = 0 elif k == ord("a"): frame_nr = video.get(cv2.CAP_PROP_POS_FRAMES) video.set(cv2.CAP_PROP_POS_FRAMES, frame_nr - 2) ret, frame = video.read() elif k == ord("d"): frame_nr = video.get(cv2.CAP_PROP_POS_FRAMES) video.set(cv2.CAP_PROP_POS_FRAMES, frame_nr) ret, frame = video.read() elif k == 32: if playing == 0: playing = 1 elif playing == 1: playing = 0 elif k == 13: line_length_pixels = measuring(frame) fish_lengths_mm = line_length_pixels * config_file["mm_per_pixel"] fish_length_mm = round(np.mean(fish_lengths_mm), 0) print("rounding fish length to closest mm \n fish_length_mm: {}". format(fish_length_mm)) meta["fish_length_mm"] = fish_length_mm.item() with open(os.path.join(parts[0], "meta_data.yaml"), "w") as file: documents = yaml.dump(meta, file) break # release resources video.release() cv2.destroyAllWindows()
import glob from tkinter.filedialog import askdirectory from tkinter import * from cichlidanalysis.io.meta import load_yaml # Allows a user to select a directory root = Tk() root.withdraw() root.update() rootdir = askdirectory() root.destroy() # load yaml config file params = load_yaml(rootdir, "config") # find and load background file if len(glob.glob(os.path.join(rootdir, "*.png"))) != 1: print('too many or too few background files in folder:' + rootdir) sys.exit() else: background_full = cv2.imread( glob.glob(os.path.join(rootdir, "*.png"))[0], 0) # threshold position (param file??) t_pos = 35 date = datetime.datetime.now().strftime("%Y%m%d") width_trim = 1280 height_trim = 960
def tracker_checker_inputs(video_path_i): """ Get theinputs for the tracker_checker :param video_path_i: :return: """ vid_folder_path = os.path.split(video_path_i)[0] vid_timestamp = os.path.split(video_path_i)[1][0:-10] cam_folder_path = os.path.split(vid_folder_path)[0] vid_folder = os.path.split(vid_folder_path)[1] track_single_i, displacement_internal = extract_tracks_from_fld(vid_folder_path, vid_timestamp) vid_name = os.path.split(video_path_i)[1] meta = load_yaml(vid_folder_path, "meta_data") config = load_yaml(cam_folder_path, "config") fish_data = extract_meta(vid_folder) new_rois = load_yaml(vid_folder_path, "roi_file") rois = load_yaml(cam_folder_path, "roi_file") # This if statement checks if there is a new background associated with the video and loads it instead # Also needs to be able to deal with multiple background files (if the video was split by divide_tracking.py) os.chdir(vid_folder_path) video_folder_files = glob.glob(vid_timestamp + "*background.png") if len(video_folder_files) > 0: print('using background made from video') if os.path.isfile(os.path.join(vid_folder_path, video_folder_files[0])): previous_bgd_name = video_folder_files print(previous_bgd_name) if len(previous_bgd_name) != 1: print('more than 1 background files detected:' + cam_folder_path) background_full = [] # Make this a dictionary instead of a list, so it can be named? Or carry over previous_bgd_name for file in previous_bgd_name: background_path = os.path.join(vid_folder_path, "{}".format(file)) background_full.append(cv2.imread(glob.glob(background_path)[0], 0)) # Need to modify them if new roi exists but background comes from os.chdir(cam_folder_path) bkgd = background_full else: background_path = os.path.join(vid_folder_path, "{}".format(previous_bgd_name[0])) background_full = cv2.imread(glob.glob(background_path)[0], 0) os.chdir(cam_folder_path) bkgd = background_full # # if new roi and background is from video then skip this # if bool(new_rois): # roi_n = new_rois["roi_" + fish_data['roi'][1]] # bkgd = background_full[roi_n[1]:roi_n[1] + roi_n[3], roi_n[0]:roi_n[0] + roi_n[2]] # else: # bkgd = background_full else: print("weird exit not sure why") sys.exit() else: print('using background made from full recording image') os.chdir(cam_folder_path) files = glob.glob("*.png") files.sort() files.insert(0, files.pop(files.index(min(files, key=len)))) if "{}_Median.png".format(vid_timestamp) in files: previous_bgd_name = files[files.index("{}_Median.png".format(vid_timestamp)) - 1] print(previous_bgd_name) else: previous_bgd_name = files[files.index("{}_per90_Background.png".format(vid_timestamp)) - 1] print(previous_bgd_name) # find and load background file background_path = os.path.join(cam_folder_path, "{}".format(previous_bgd_name)) if len(glob.glob(background_path)) != 1: print('too many or too few background files in folder:' + cam_folder_path) sys.exit() else: background_full = cv2.imread(glob.glob(background_path)[0], 0) os.chdir(cam_folder_path) roi_n = rois["roi_" + fish_data['roi'][1]] bkgd = background_full[roi_n[1]:roi_n[1] + roi_n[3], roi_n[0]:roi_n[0] + roi_n[2]] os.chdir(cam_folder_path) roi_n = rois["roi_" + fish_data['roi'][1]] # interpolate between NaN streches try: x_n = int_nan_streches(track_single_i[:, 1]) y_n = int_nan_streches(track_single_i[:, 2]) except: x_n = track_single_i[:, 1] y_n = track_single_i[:, 2] if new_rois: # subtract the difference so that the centroids are plotted at the right coordinates # output: (x,y,w,h) # assume that if there is a new ROI, there is only one. x_n += new_rois['roi_{}'.format('0')][0] y_n += new_rois['roi_{}'.format('0')][1] track_single_i[:, 1] += new_rois['roi_{}'.format('0')][0] track_single_i[:, 2] += new_rois['roi_{}'.format('0')][1] roi_n = new_rois['roi_{}'.format('0')] # add in ROI to video start_point = (roi_n[0], roi_n[1]) end_point = (roi_n[0] + roi_n[2], roi_n[1] + roi_n[3]) else: start_point = (0, 0) end_point = (roi_n[2], roi_n[3]) # find displacement displacement_i_mm_s = displacement_internal * config["mm_per_pixel"] * config['fps'] speed_full_i = np.sqrt(np.diff(x_n) ** 2 + np.diff(y_n) ** 2) speed_t, x_nt_i, y_nt_i = remove_high_spd_xy(speed_full_i, x_n, y_n) spd_sm = smooth_speed(speed_t, win_size=5) spd_sm_mm = spd_sm * config["mm_per_pixel"] spd_sm_mm_ps = spd_sm_mm * config['fps'] thresh = 0.25 * meta["fish_length_mm"] # Make a list of ranges, by extracting it from the previous_median_name (s) if isinstance(previous_bgd_name, list) and len(previous_bgd_name) > 1: pmn = [] for file in previous_bgd_name: # only want split and divided backgrounds (not remade backgrounds) if 'frame' in file: pmn.append(np.arange(int(file.split("_")[4][5:9:]), int(file.split("_")[4][11:16:]))) else: pmn = previous_bgd_name return bkgd, pmn, spd_sm, spd_sm_mm_ps, thresh, displacement_i_mm_s, vid_name, track_single_i, start_point, end_point, \ x_nt_i, y_nt_i
track_path = video_path[0:-4] + ".csv" if not os.path.isfile(track_path): track_path = [] # movie has been retracked, so pick the right csv _, all_files = get_latest_tracks(vid_folder_path, video_path[-9:-4]) for file in all_files: if ("Range" not in file) & (video_name[0:-4] in file): track_path = os.path.join(vid_folder_path, file) elif ("Range" in file) & (video_name[0:-4] in file): print("Careful, ignoring the spilt csv {}".format(track_path)) print("Using retracked csv {}".format(track_path)) if not track_path: print("Can't find right track! - add issue to github") displacement_internal, track_single = load_track(track_path) meta = load_yaml(vid_folder_path, "meta_data") rois = load_yaml(cam_folder_path, "roi_file") new_roi = load_yaml(vid_folder_path, "roi_file") config = load_yaml(cam_folder_path, "config") fish_data = extract_meta(vid_folder_name) os.chdir(cam_folder_path) files = glob.glob("*.png") files.sort() files.insert(0, files.pop(files.index(min(files, key=len)))) if "{}_Median.png".format(vid_timestamp) in files: previous_median_name = files[ files.index("{}_Median.png".format(vid_timestamp)) - 1] print(previous_median_name) else: previous_median_name = files[
} with open(os.path.join(path, "config.yaml"), "w") as file: documents = yaml.dump(config_file, file) # # Deinitialize camera # cam.EndAcquisition() # cam.DeInit() # cv2.destroyAllWindows() # del cam # cam_list.Clear() # system.ReleaseInstance() # define ROIs define_roi(cam_ID, path) rois = load_yaml(path, "roi_file") # make a sub directory for each ROI recording for roi in range(len(rois) - 1): try: # ask for meta data of the fish meta_data = recording_input() # Create target Directory roi_path = os.path.join( path, "FISH{0}_c{1}_r{2}_{3}_s{4}".format( date, str(camera[0]), str(roi), meta_data["species"].replace(' ', '-'), meta_data["sex"])) os.mkdir(roi_path) print("Directory ", path, " Created ") with open(os.path.join(roi_path, "meta_data.yaml"), "w") as file:
def divide_video(video_path, chunk_size=20, fps=10): vid_folder_path = os.path.split(video_path)[0] cam_folder_path = os.path.split(vid_folder_path)[0] vid_folder_name = os.path.split(vid_folder_path)[1] os.chdir(vid_folder_path) video_name = os.path.split(video_path)[1] # load original track (need timestamps) # orig_file = glob.glob("*{}.csv".format(video_name[0:-4])) _, latest_file = get_latest_tracks(vid_folder_path, video_name[0:-4]) na, track_single_orig = load_track(os.path.join(vid_folder_path, latest_file[0])) rois = load_yaml(cam_folder_path, "roi_file") fish_data = extract_meta(vid_folder_name) os.chdir(cam_folder_path) files = glob.glob("*.png") files.sort() files.insert(0, files.pop(files.index(min(files, key=len)))) # make the chunks chunks = np.arange(0, track_single_orig.shape[0]+1, int(chunk_size)*fps*60) if chunks[-1] != track_single_orig.shape[0]: chunks[-1] = track_single_orig.shape[0] print("correcting last timepoint") # making roi for full video width_trim, height_trim = rois['roi_{}'.format(fish_data['roi'][-1])][2:4] vid_rois = {'roi_0': (0, 0, width_trim, height_trim)} area_s = 100 thresh = 35 print("remaking backgrounds and retracking") for chunk_n in np.arange(0, len(chunks)-1): split_ends = [chunks[chunk_n], chunks[chunk_n+1]] background = background_vid_split(video_path, 100, 90, split_ends) # in case a new ROI has been used for tracking, use this. crop_vid_rois = load_yaml(vid_folder_path, "roi_file") if crop_vid_rois: vid_rois = crop_vid_rois print("tracking with new roi") tracker(video_path, background, vid_rois, threshold=thresh, display=False, area_size=area_s, split_range=split_ends) # add in the right timepoints (of a primary track - not a full retrack) # load the newly tracked csv date = datetime.datetime.now().strftime("%Y%m%d") range_s = str(split_ends[0]).zfill(5) range_e = str(split_ends[1]).zfill(5) filename = video_path[0:-4] + "_tracks_{}_Thresh_{}_Area_{}_Range{}-{}_.csv".format(date, thresh, area_s, range_s, range_e) _, track_single_split = load_track(filename) # replace the frame col with the data from the original track (track index always starts from 0) track_single_split[0:split_ends[1] - split_ends[0], 0] = track_single_orig[split_ends[0]: split_ends[1], 0] # save over os.makedirs(os.path.dirname(filename), exist_ok=True) np.savetxt(filename, track_single_split, delimiter=",")