exp_pair_id_arr = exp_pair_id_arr

exp_paired_param_df = pd.DataFrame()

for exp_par_id in exp_pair_id_arr:

    print('Experiment ID: ' + str(exp_par_id))

    exp_folder_name_1 = exp_paired_df.loc[exp_par_id][
        'Experiment Folder Name 1']
    exp_folder_name_2 = exp_paired_df.loc[exp_par_id][
        'Experiment Folder Name 2']

    data_set_1 = fosof_data_set_analysis.DataSetFOSOF(
        exp_folder_name=exp_folder_name_1,
        load_Q=True,
        beam_rms_rad_to_load=None)
    data_set_2 = fosof_data_set_analysis.DataSetFOSOF(
        exp_folder_name=exp_folder_name_2,
        load_Q=True,
        beam_rms_rad_to_load=None)

    exp_param_1_s = data_set_1.get_exp_parameters().copy()
    exp_param_2_s = data_set_2.get_exp_parameters().copy()

    # We first check whether the two experiments are of 0- and pi-configuration type.
    if set([exp_param_1_s['Configuration'], exp_param_2_s['Configuration']
            ]) != set(['0', 'pi']):
        raise FosofAnalysisError(
            "Not both of '0'- and 'pi' configurations are present")
def fosof_data_sets_analyze():
    global analysis_interrupted_Q, analysis_in_process_Q, stop_progress_thread, expected_analysis_duration

    analysis_in_process_Q = True
    start_button_text.set('Stop the analysis')

    # Location where the analyzed experiment is saved
    #saving_folder_location = 'C:/Research/Lamb shift measurement/Data/FOSOF analyzed data sets'
    # For Home
    saving_folder_location = fosof_for_analysis_folder_path

    # Analysis version. Needed for checking if the data set has been analyzed before.
    version_number = 0.1

    # File containing parameters and comments about all of the data sets.
    exp_info_file_name = 'fosof_data_sets_info.csv'
    exp_info_index_name = 'Experiment Folder Name'

    av_time_per_row_file_name = 'analysis_time_per_row.txt'

    os.chdir(saving_folder_location)
    exp_info_df = pd.read_csv(filepath_or_buffer=exp_info_file_name,
                              delimiter=',',
                              comment='#',
                              header=0,
                              skip_blank_lines=True,
                              dtype={'Error(s) During Analysis': np.bool})

    exp_info_df[exp_info_index_name] = exp_info_df[
        exp_info_index_name].transform(lambda x: x.strip())

    exp_info_df = exp_info_df.set_index(exp_info_index_name)

    # Pick only fully analyzed data sets that had no errors during the analysis/acquisition.
    exp_info_chosen_df = exp_info_df[
        exp_info_df['Data Set Fully Acquired']
        & ~(exp_info_df['Error(s) During Analysis']) &
        (exp_info_df['Analysis Finished'])]

    # After this date I was acquiring only the data sets without the atoms present. These data sets were analyzed with the different code.
    max_datetime = pd.to_datetime('2018-08-31')

    exp_info_chosen_df['Acquisition Start Date'] = pd.to_datetime(
        exp_info_chosen_df['Acquisition Start Date'])

    # Selecting the data sets before the max_datetime
    exp_info_chosen_df = exp_info_chosen_df[
        exp_info_chosen_df['Acquisition Start Date'] <
        max_datetime].sort_values(by='Acquisition Start Date')

    exp_name_list = exp_info_chosen_df.index

    os.chdir(saving_folder_location)

    if os.path.isfile(av_time_per_row_file_name):
        av_duration_per_row_df = pd.read_csv(
            filepath_or_buffer=av_time_per_row_file_name,
            delimiter=',',
            comment='#',
            header=0,
            skip_blank_lines=True)
        av_duration_per_row_df = av_duration_per_row_df.set_index(
            exp_info_index_name)
    else:
        av_duration_per_row_df = None

    # Perform data analysis for the list of the experiments. Stop the analysis, if the analysis has been interrupted.
    exp_counter = 0

    while exp_counter < exp_name_list.shape[
            0] and analysis_interrupted_Q == False:

        exp_folder_name = exp_name_list[exp_counter]

        experiment_current_name_tk_var.set('Experiment: (' +
                                           str(exp_counter + 1) + str('/') +
                                           str(exp_name_list.shape[0]) + ') ' +
                                           exp_folder_name)

        # List of beam rms radius values for correcting the FOSOF phases using the simulations. Value of None corresponds to not applying the correction. Note that the None value has to be first in the list. This is for checking whether the analysis has been performed before or not on the FOSOF data sets that are not of the simple Waveguide Carrier Frequency Sweep type.
        beam_rms_rad_list = [None, 0.8, 1.6, 2.4, 0.85, 1.7, 2.55]

        for beam_rms_rad in beam_rms_rad_list:

            os.chdir(saving_folder_location)
            os.chdir(exp_folder_name)

            analyzed_data_file_name = get_analysis_data_object_file_name(
                beam_rms_rad, version_number)

            if not (os.path.isfile(analyzed_data_file_name)):
                data_set = fosof_data_set_analysis.DataSetFOSOF(
                    exp_folder_name=exp_folder_name,
                    load_Q=False,
                    beam_rms_rad_to_load=beam_rms_rad)

                # In case we have FOSOF data set that is not of the simplest type AND the beam rms radius is not NONE, then there should be no analysis made, because it was already finished (None is the first value in the loop of beam rms radius values)
                if not (data_set.get_exp_parameters()['Experiment Type'] !=
                        'Waveguide Carrier Frequency Sweep' and
                        (beam_rms_rad is not None)):
                    print(exp_folder_name)
                    print(beam_rms_rad)
                    time_start = time.time()

                    if av_duration_per_row_df is None:
                        av_duration_average = 0
                    else:
                        av_duration_average = np.mean(av_duration_per_row_df[
                            'Average Analysis Time Per Row [ms]'].values) / 1E3

                    stop_progress_thread = False

                    progress_bar_thread = threading.Thread(
                        target=progress_bar_update)
                    progress_bar_thread.start()

                    n_rows = data_set.exp_data_frame.shape[0]

                    expected_analysis_duration = av_duration_average * n_rows

                    # The power correction is performed only for the simple FOSOF data sets.

                    fc_df = data_set.get_fc_data()
                    quenching_df = data_set.get_quenching_cav_data()
                    rf_pow_df = data_set.get_rf_sys_pwr_det_data()

                    digi_df = data_set.get_digitizers_data()

                    comb_phase_diff_df = data_set.get_combiners_phase_diff_data(
                    )
                    digi_delay_df = data_set.get_inter_digi_delay_data()

                    if beam_rms_rad is not None:
                        data_set.correct_phase_diff_for_RF_power(beam_rms_rad)

                    phase_diff_df = data_set.get_phase_diff_data()
                    phase_av_set_averaged_df = data_set.average_av_sets()
                    phase_A_minus_B_df, phase_freq_response_df = data_set.cancel_out_freq_response(
                    )
                    fosof_ampl_df, fosof_phase_df = data_set.average_FOSOF_over_repeats(
                    )

                    data_set.save_instance(rewrite_Q=True)

                    stop_progress_thread = True
                    progress_bar_thread.join()

                    time_end = time.time()
                    analysis_duration = time_end - time_start
                    av_duration_per_row = analysis_duration / n_rows

                    av_duration_per_row_append_df = pd.DataFrame({
                        'Experiment Folder Name':
                        pd.Series(exp_folder_name),
                        'Average Analysis Time Per Row [ms]':
                        pd.Series([av_duration_per_row * 1E3])
                    }).set_index('Experiment Folder Name')

                    if av_duration_per_row_df is None:
                        av_duration_per_row_df = av_duration_per_row_append_df

                    else:
                        av_duration_per_row_df = av_duration_per_row_df.append(
                            av_duration_per_row_append_df)

                    # Saving the averaging time per row data back to the file. This writing is done inefficiently: after every new data set we rewrite the whole file instead of just appending the new row. I am too lazy to fix this.
                    os.chdir(saving_folder_location)

                    av_duration_per_row_df.drop_duplicates().to_csv(
                        path_or_buf=av_time_per_row_file_name,
                        mode='w',
                        header=True)

        exp_counter = exp_counter + 1

    os.chdir(saving_folder_location)

    # In case the analysis has been interrupted, then after stopping the analysis, set this boolean back to False, so that the analysis could continued again if needed.
    if analysis_interrupted_Q == True:
        analysis_interrupted_Q = False

    # Set the button name back to its initial text value.
    start_button_text.set('Analyze data sets')
    analysis_in_process_Q = False
np.mean([np.mean(r1_a-r1_b), np.mean(r2_a-r2_b)])/2
#%%
np.mean([np.mean(r1_a) - np.mean(r1_b), np.mean(r2_a) - np.mean(r2_b)])/2
#%%
phase_data_file_name = 'phasedata_112305.csv'
phase_data_df =  pd.read_csv(filepath_or_buffer=phase_data_file_name, delimiter=',', comment='#', skip_blank_lines=True, index_col=[0, 1])
#%%
phase_data_df
#%%
phase_data_df.loc[('Unique Uncertainty'), slice(None)]
#%%
exp_folder_name = '180528-112305 - FOSOF Acquisition - 0 config, 14 V per cm PD ON 37.7 V, 16.27 kV'

beam_rms_rad = None
# List of beam  rms radius values for correcting the FOSOF phases using the simulations. Value of None corresponds to not applying the correction.
data_set = fosof_data_set_analysis.DataSetFOSOF(exp_folder_name=exp_folder_name, load_Q=False, beam_rms_rad_to_load=beam_rms_rad)

# The power correction is performed only for the simple FOSOF data sets.
# if data_set.get_exp_parameters()['Experiment Type'] != 'Waveguide Carrier Frequency Sweep':
#     beam_rms_rad = None
#     data_set = DataSetFOSOF(exp_folder_name=exp_folder_name, load_Q=True, beam_rms_rad_to_load=beam_rms_rad)

data_set.select_portion_of_data()
#%%
fc_df = data_set.get_fc_data()
quenching_df = data_set.get_quenching_cav_data()
rf_pow_df, rf_system_power_outlier_df = data_set.get_rf_sys_pwr_det_data()

digi_df = data_set.get_digitizers_data()

comb_phase_diff_df = data_set.get_combiners_phase_diff_data()
    for exp_id, data_df in df.groupby('Experiment ID'):

        data_s = data_df.iloc[0]

        fosof_phase_set_df = pd.DataFrame()

        # The beam rms radii are used only for the 'Waveguide Carrier Frequency Sweep' experiment type
        if data_s['Experiment Type'] == 'Waveguide Carrier Frequency Sweep':
            beam_rms_rad_list = [None, 0.8, 1.6, 2.4, 0.85, 1.7, 2.55]
        else:
            beam_rms_rad_list = [None]

        for beam_rms_rad in beam_rms_rad_list:
            data_set_0 = fosof_data_set_analysis.DataSetFOSOF(
                exp_folder_name=data_s['Experiment Name (0-config)'],
                load_Q=True,
                beam_rms_rad_to_load=beam_rms_rad)

            data_set_pi = fosof_data_set_analysis.DataSetFOSOF(
                exp_folder_name=data_s['Experiment Name (pi-config)'],
                load_Q=True,
                beam_rms_rad_to_load=beam_rms_rad)

            fosof_ampl_0_df, fosof_phase_0_df = data_set_0.average_FOSOF_over_repeats(
            )
            fosof_ampl_pi_df, fosof_phase_pi_df = data_set_pi.average_FOSOF_over_repeats(
            )

            # Calculate fosof phases + their uncertainties
            fosof_phase_df = (
                fosof_phase_0_df.loc[slice(None),