def run_equalizer(selected_rows, states_df, parameters, session_wise=False): ''' This function is meant to help with differences in contrast in different trials and session, to equalize general brightness or reduce photobleaching. It corrects the video and saves them in the corrected version. It can be run with the already aligned videos or trial by trial. for trial by trial, a template is required. params: selected_rows: pd.DataFrame -> A dataframe containing the analysis states you want to have equalized params: states_df: pd.DataFrame -> A dataframe containing all the analysis data base params: parameters: dict -> contains parameters concerning equalization returns : None ''' step_index = 4 # Sort the dataframe correctly df = selected_rows.sort_values(by=paths.multi_index_structure) # Determine the output path output_tif_file_path = os.environ[ 'DATA_DIR'] + f'data/interim/equalizer/main/' mouse, session, init_trial, *r = df.iloc[0].name #histogram_name = f'mouse_{mouse}_session_{session}_init_trial_{init_trial}' #output_steps_file_path = f'data/interim/equalizer/meta/figures/histograms/'+histogram_name try: df.reset_index()[['session', 'trial', 'is_rest' ]].set_index(['session', 'trial', 'is_rest'], verify_integrity=True) except ValueError: logging.error( 'You passed multiple of the same trial in the dataframe df') return df #creates an output dictionary for the data base output = { 'main': {}, 'meta': { 'analysis': { 'analyst': os.environ['ANALYST'], 'date': datetime.datetime.today().strftime("%m-%d-%Y"), 'time': datetime.datetime.today().strftime("%H:%M:%S") }, 'duration': {} } } if session_wise: row_local = df.iloc[0] input_tif_file_list = eval(row_local['alignment_output'])['main'] movie_original = cm.load( input_tif_file_list) # load video as 3d array already concatenated if parameters['make_template_from_trial'] == 0: movie_equalized = do_equalization(movie_original) else: movie_equalized = np.empty_like(movie_original) source = movie_original[0:100, :, :] # equalize all the videos loads in m_list_reshape with the histogram of source for j in range(int(movie_original.shape[0] / 100)): want_to_equalize = movie_original[j * 100:(j + 1) * 100, :, :] movie_equalized[j * 100:(j + 1) * 100, :, :] = do_equalization_from_template( reference=want_to_equalize, source=source) #Save the movie index = row_local.name new_index = db.replace_at_index1(index, 4 + 4, 2) ## version 2 is for session wise row_local.name = new_index equalized_path = movie_equalized.save( output_tif_file_path + db.create_file_name(4, row_local.name) + '.mmap', order='C') output['main'] = equalized_path #auxiliar = eval(row_local.loc['alignment_output']) #auxiliar.update({'equalizing_output' : output}) # row_local.loc['alignment_output'] = str(auxiliar) row_local.loc['equalization_output'] = output states_df = db.append_to_or_merge_with_states_df(states_df, row_local) else: # Get necessary parameters and create a list with the paths to the relevant files decoding_output_list = [] input_tif_file_list = [] trial_index_list = [] for idx, row in df.iterrows(): decoding_output = eval(row.loc['decoding_output']) decoding_output_list.append(decoding_output) input_tif_file_list.append(decoding_output['main']) trial_index_list.append(db.get_trial_name(idx[2], idx[3])) # this was something for ploting while testing, can be removed #colors = [] #for i in range(len(df)): # colors.append('#%06X' % randint(0, 0xFFFFFF)) #load the videos as np.array to be able to manipulate them m_list = [] legend = [] shape_list = [] h_step = parameters['histogram_step'] for i in range(len(input_tif_file_list)): im = io.imread(input_tif_file_list[i]) #load video as 3d array m_list.append(im) # and adds all the videos to a list shape_list.append( im.shape ) # list of sizes to cut the videos in time for making all of them having the same length #legend.append('trial = ' + f'{df.iloc[i].name[2]}') min_shape = min(shape_list) new_shape = (100 * int(min_shape[0] / 100), min_shape[1], min_shape[2] ) # new videos shape m_list_reshape = [] m_list_equalized = [] source = m_list[0][0:100, :, :] #equalize all the videos loaded in m_list_reshape with the histogram of source for i in range(len(input_tif_file_list)): video = m_list[i] if parameters['make_template_from_trial'] == 0: equalized_video = do_equalization(video) else: m_list_reshape.append(video[:new_shape[0], :, :]) equalized_video = np.empty_like(video[:new_shape[0], :, :]) for j in range(int(min_shape[0] / 100)): want_to_equalize = m_list_reshape[i][j * 100:(j + 1) * 100, :, :] equalized_video[j * 100:(j + 1) * 100, :, :] = do_equalization_from_template( reference=want_to_equalize, source=source) m_list_equalized.append(equalized_video) #convert the 3d np.array to a caiman movie and save it as a tif file, so it can be read by motion correction script. for i in range(len(input_tif_file_list)): # Save the movie row_local = df.iloc[i] movie_original = cm.movie(m_list_reshape[i]) movie_equalized = cm.movie(m_list_equalized[i]) # Write necessary variables to the trial index and row_local index = row_local.name new_index = db.replace_at_index1(index, 4 + 0, 1) ## version 1 is for trial wise row_local.name = new_index output['main'] = output_tif_file_path + db.create_file_name( 4, row_local.name) + '.tif' #auxiliar = eval(row_local.loc['decoding_output']) #auxiliar.update({'equalizing_output' : output}) #row_local.loc['decoding_output'] = str(auxiliar) row_local.loc['equalization_output'] = output movie_equalized.save(output_tif_file_path + db.create_file_name(4, row_local.name) + '.tif') states_df = db.append_to_or_merge_with_states_df( states_df, row_local) db.save_analysis_states_database(states_df, paths.analysis_states_database_path, paths.backup_path) return
selected_rows = db.select(states_df, 'cropping', mouse=mouse_number, session=session, is_rest=is_rest, decoding_v=decoding_version, cropping_v=0) for i in range(init_trial, end_trial): selection = selected_rows.query('(trial ==' + f'{i}' + ')') for j in range(len(selection)): mouse_row = selection.iloc[j] mouse_row = main_cropping(mouse_row, parameters_cropping) states_df = db.append_to_or_merge_with_states_df( states_df, mouse_row) db.save_analysis_states_database( states_df, analysis_states_database_path, backup_path) cropping_version = mouse_row.name[ 5] # set the cropping version to the one currently used # Select rows to be motion corrected using current version of cropping, define motion correction parameters # (refer to parameter_setting_motion_correction) selected_rows = db.select(states_df, 'motion_correction', mouse=mouse_number, session=session, is_rest=is_rest, decoding_v=decoding_version, cropping_v=cropping_version, motion_correction_v=0) for i in range(init_trial, end_trial): print(i)