mouse_row = selected_rows.iloc[0]

mouse_row_new = main_source_extraction(mouse_row, parameters_source_extraction, dview)
mouse_row_new = db.set_version_analysis('source_extraction', mouse_row_new)
states_df = db.append_to_or_merge_with_states_df(states_df, mouse_row_new)
db.save_analysis_states_database(states_df, path=analysis_states_database_path, backup_path = backup_path)


#%% COMPONENT EVALUATION

min_SNR = 3           # adaptive way to set threshold on the transient size
r_values_min = 0.85    # threshold on space consistency (if you lower more components
#                        will be accepted, potentially with worst quality)

parameters_component_evaluation = {'min_SNR': min_SNR,
                                   'rval_thr': r_values_min,
                                   'use_cnn': False}

cm.stop_server(dview=dview)
mouse_row_new = main_component_evaluation(mouse_row_new, parameters_component_evaluation)

component_evaluation_output = eval(mouse_row_new['component_evaluation_output'])
input_hdf5_file_path = component_evaluation_output['main']

cnm = load_CNMF(input_hdf5_file_path)

print('Accepted components = ')
print(len(cnm.estimates.idx_components))
print('Rejected components = ')
print(len(cnm.estimates.idx_components_bad))
    selection = selected_rows.query('(session ==' + f'{session}' + ')')
    for cropping_version in [1, 2, 3, 4]:
        print(cropping_version)
        selected_rows = db.select(
            states_df,
            'component_evaluation',
            mouse=mouse_number,
            session=session,
            is_rest=is_rest,
            decoding_v=decoding_version,
            cropping_v=cropping_version,
            motion_correction_v=motion_correction_version,
            alignment_v=alignment_version,
            source_extraction_v=source_extraction_version)
        mouse_row = selected_rows.iloc[0]
        mouse_row_new = main_component_evaluation(
            mouse_row, parameters_component_evaluation, session_wise=True)
        states_df = db.append_to_or_merge_with_states_df(
            states_df, mouse_row_new)
        db.save_analysis_states_database(states_df,
                                         path=analysis_states_database_path,
                                         backup_path=backup_path)

##########

decoding_version = 1
motion_correction_version = 1

for session in [1, 2, 4]:
    print(session)
    # Run decoding for group of data tha have the same cropping parameters (same mouse)
    selection = selected_rows.query('(session ==' + f'{session}' + ')')
#%%
##To see components one by one in browser
#cnm.estimates.nb_view_components(img=cn_filter, idx=cnm.estimates.idx_components, cmap = 'gray')

#select the number of components to plot
idx_array = np.arange(10)
#plot components (non deconvolved)
fig = src.steps.source_extraction.get_fig_C_stacked(cnm.estimates.C,
                                                    idx_components=idx_array)

#%%

min_SNR = 10  # adaptive way to set threshold on the transient size
r_values_min = 0.99  # threshold on space consistency (if you lower more components
#                        will be accepted, potentially with worst quality)
parameters_component_evaluation = {
    'min_SNR': min_SNR,
    'rval_thr': r_values_min,
    'use_cnn': False
}

main_component_evaluation(index, row, parameters_component_evaluation)

#%%
#load cnmf class with evaluation
component_evaluation_output = eval(row.loc['component_evaluation_output'])
input_hdf5_file_path = component_evaluation_output['main']

cnm = load_CNMF(input_hdf5_file_path)
print(len(cnm.estimates.idx_components))