def import_error(self,): message = MessageBox() message.about(self, 'Warning!', "Data were not loaded. \n Please, be sure that:\n " "1. Data have 1 or 2 columns.\n" "2. Data are longer than 100 points.\n" "3. Delimiter is correctly specified.\n" "4. Rows in data contain only numeric values\n")
def import_error(self,): message = MessageBox() message.about(self, 'Warning!', "Data were not loaded. \n Please, be sure that:\n " "1. Data have 1 or 2 columns.\n" "2. Data are longer than 100 points.\n" "3. Delimiter is correctly specified.\n" "4. Rows in data contain only numeric values\n")
def dot_pick_enable(self, ): # if checked, user can choose peaks try: # if figure and canvas is initiated if self.chbxDotPickEnable.isChecked(): self.cid_click = self.canvas.mpl_connect('button_press_event', self.on_click) self.cid_motion = self.canvas.mpl_connect('motion_notify_event', self.on_motion) else: self.canvas.mpl_disconnect(self.cid_click) self.canvas.mpl_disconnect(self.cid_motion) except: message = MessageBox() message.about(self, 'Warning!', "File was not loaded! \n Please be sure that your file has \ \n 1) 1 or 2 columns; \n 2) check headers, footers and delimeter \n and try again.")
def dot_pick_enable(self, ): # if checked, user can choose peaks try: # if figure and canvas is initiated if self.chbxDotPickEnable.isChecked(): self.cid_click = self.canvas.mpl_connect('button_press_event', self.on_click) self.cid_motion = self.canvas.mpl_connect('motion_notify_event', self.on_motion) else: self.canvas.mpl_disconnect(self.cid_click) self.canvas.mpl_disconnect(self.cid_motion) except: message = MessageBox() message.about(self, 'Warning!', "File was not loaded! \n Please be sure that your file has \ \n 1) 1 or 2 columns; \n 2) check headers, footers and delimeter \n and try again.")
def data_preprocessing(self, data_to_preprocess): try: # Detrend dataset if self.chbxDetrendData.isChecked(): self.data_detrended = sig.detrend(data_to_preprocess) else: self.data_detrended = data_to_preprocess # Application of Savitzkyi-Golay filter for data smoothing sg_window_frame = self.BoxSGwindowFrame.value() sg_polynom_degree = self.BoxSGpolynomDegree.value() self.data_after_filter = sig.savgol_filter(self.data_detrended, sg_window_frame, sg_polynom_degree) except: message = MessageBox() message.about(self, 'Warning!', "Not possible to detrend and/or smooth data! \n Please check your dataset and try again.")
def data_preprocessing(self, data_to_preprocess): try: # Detrend dataset if self.chbxDetrendData.isChecked(): self.data_detrended = sig.detrend(data_to_preprocess) else: self.data_detrended = data_to_preprocess # Application of Savitzkyi-Golay filter for data smoothing sg_window_frame = self.BoxSGwindowFrame.value() sg_polynom_degree = self.BoxSGpolynomDegree.value() self.data_after_filter = sig.savgol_filter(self.data_detrended, sg_window_frame, sg_polynom_degree) except: message = MessageBox() message.about(self, 'Warning!', "Not possible to detrend and/or smooth data! \n Please check your dataset and try again.")
def save_data(self, ): try: file_name = QtGui.QFileDialog.getSaveFileName(self, 'Save file') writer = pd.ExcelWriter('{}.xlsx'.format(file_name)) self.multiple_data_sets.to_excel(writer, index=True, sheet_name='Results') writer.sheets['Results'].set_zoom(80) writer.sheets['Results'].set_column('A:A', 5) writer.sheets['Results'].set_column('B:X', 23) writer.save() message = MessageBox() message.about(self, 'Data saved', "Data were saved!") self.multiple_data_sets = pd.DataFrame() self.BtnSaveFullDataset.setStyleSheet("background-color: #FAF6F2") self.BtnLoadFile.setStyleSheet("background-color: #7CF2BD") except: message = MessageBox() message.about(self, 'Warning!', "Data were not exported to Excel! \n Please try again.")
def save_data(self, ): try: file_name = QFileDialog.getSaveFileName(self, 'Save file')[0] writer = pd.ExcelWriter('{}.xlsx'.format(file_name)) self.multiple_data_sets.to_excel(writer, index=True, sheet_name='Results') writer.sheets['Results'].set_zoom(80) writer.sheets['Results'].set_column('A:A', 5) writer.sheets['Results'].set_column('B:X', 23) writer.save() message = MessageBox() message.about(self, 'Data saved', "Data were saved!") self.multiple_data_sets = pd.DataFrame() self.BtnSaveFullDataset.setStyleSheet("background-color: #FAF6F2") self.BtnLoadFile.setStyleSheet("background-color: #7CF2BD") except Exception as e: message = MessageBox() message.about(self, 'Warning!', "Data were not exported to Excel! \n Please try again.") print(e)
def coordinates_analysis(self, ): """ Main function """ coord_x, coord_y = zip(*self.coordinates) leftpb_x, leftpb_y = zip(*self.left_peak_border) rightpb_x, rightpb_y= zip(*self.right_peak_border) # absolute amplitude % and MAX relative_amplitude = [] ampl_max = max(self.amplitudes) relative_amplitude[:] = [(i / ampl_max) for i in self.amplitudes] # create temporal Pandas DataFrame for sorting and calculation: temp_dataset = list( zip(coord_x, self.amplitudes, relative_amplitude, leftpb_x, leftpb_y, rightpb_x, rightpb_y, self.area)) df = pd.DataFrame(data=temp_dataset, columns=['Peak Time', 'Amplitude', 'Relative Amplitude \n (F/Fmax)', 'Peak Start Time', 'Peak Start Ordinate', 'Peak Stop Time', 'Peak Stop Ordinate', 'Area']) # Sort data in DataFrame according to the time of peak appearance df_sorted = df.sort_values(['Peak Time'], ascending=True) df_sorted.index = range(0, len(df_sorted)) # reset indexing # calculate periods periods = [] for i in range(1, len(df_sorted['Peak Time'])): periods.append(df_sorted.at[i, 'Peak Time'] - df_sorted.at[i - 1, 'Peak Time']) periods.insert(0, np.nan) # add placeholder because len(periods)=len(peaks)-1 # calculate frequencies based on calculated periods frequencies = [] frequencies[:] = [(1 / i) for i in periods] # Analise peak start - stop time (left and right peak borders) peak_full_time = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_full_time.append(df_sorted.at[i, 'Peak Stop Time'] - df_sorted.at[i, 'Peak Start Time']) peak_up_time = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_up_time.append(df_sorted.at[i, 'Peak Time'] - df_sorted.at[i, 'Peak Start Time']) peak_down_time = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_down_time.append(df_sorted.at[i, 'Peak Stop Time'] - df_sorted.at[i, 'Peak Time']) # Compute area under the peak using the composite trapezoidal rule. peak_area = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_area.append(np.trapz(df_sorted.at[i, 'Area'])) # Analise the peak decay area half_decay_time = [] half_decay_amplitude = [] for i in range(0, len(df_sorted['Peak Time']), 1): half_decay_ampl = df_sorted.at[i, 'Amplitude'] / 2 # calculate the half of the amplitude peak_index = self.x.index(df_sorted.at[i, 'Peak Time']) # find index of the peak time stop_idx = self.x.index(df_sorted.at[i, 'Peak Stop Time']) # find index of the right peak border data_decay_region = self.data_after_filter[peak_index:stop_idx] # determine the amplitude region where to search for halftime decay index time_decay_region = self.x[peak_index:stop_idx] half_decay_idx = (np.abs(data_decay_region - half_decay_ampl)).argmin() # find the closet value in data_decay_region that corresponds to the half amplitude half_decay_amplitude.append(half_decay_ampl) half_decay_time.append(time_decay_region[half_decay_idx] - df_sorted.at[i, 'Peak Time']) # Compute amplitude normalised to the baseline normalised_amplitude = [] sg_window_frame = self.BoxSGwindowFrame.value() sg_polynom_degree = self.BoxSGpolynomDegree.value() orig_data_filtered = sig.savgol_filter(self.y, sg_window_frame, sg_polynom_degree) for i in range(0, len(df_sorted['Peak Time']), 1): start_idx = self.x.index(df_sorted.at[i, 'Peak Start Time']) F0 = orig_data_filtered[start_idx] amplitude_normed_computation = df_sorted.at[i, 'Amplitude'] / F0 normalised_amplitude.append(amplitude_normed_computation) # normalised amplitude % relative_normalised_amplitude = [] maxATB = max(normalised_amplitude) relative_normalised_amplitude[:] = [(i / maxATB) for i in normalised_amplitude] # normalised amplitude MAX normalised_amplitude_max = list(range(0, len(df_sorted['Peak Time']) - 1)) normalised_amplitude_max[:] = [np.nan for _ in normalised_amplitude_max] normalised_amplitude_max.insert(0, maxATB) # add file name as first column file_name = list(range(0, len(df_sorted['Peak Time']) - 1)) file_name[:] = [np.nan for _ in file_name] file_name.insert(0, self.graph_name) # add maximum amplitude absolute_amplitude_max = list(range(0, len(df_sorted['Peak Time']) - 1)) absolute_amplitude_max[:] = [np.nan for _ in absolute_amplitude_max] absolute_amplitude_max.insert(0, max(df_sorted['Amplitude'])) # peak sorting big_peaks_number = [p for p in self.amplitudes if (p > ampl_max * 0.66)] medium_peaks_number = [p for p in self.amplitudes if (p > ampl_max * 0.33 and p <= ampl_max * 0.66)] small_peaks_number = [p for p in self.amplitudes if (p > 0 and p <= ampl_max * 0.33)] big_peaks_frequency = list(range(0, len(df_sorted['Peak Time']) - 1)) big_peaks_frequency[:] = [np.nan for _ in big_peaks_frequency] big_peaks_frequency.insert(0, len(big_peaks_number) / (self.x[-1] - self.x[0])) medium_peaks_frequency = list(range(0, len(df_sorted['Peak Time']) - 1)) medium_peaks_frequency[:] = [np.nan for _ in medium_peaks_frequency] medium_peaks_frequency.insert(0, len(medium_peaks_number) / (self.x[-1] - self.x[0])) small_peaks_frequency = list(range(0, len(df_sorted['Peak Time']) - 1)) small_peaks_frequency[:] = [np.nan for _ in small_peaks_frequency] small_peaks_frequency.insert(0, len(small_peaks_number) / (self.x[-1] - self.x[0])) final_dataset = list(zip(file_name, df_sorted['Peak Time'], df_sorted['Amplitude'], df_sorted['Relative Amplitude \n (F/Fmax)'], absolute_amplitude_max, normalised_amplitude, relative_normalised_amplitude, normalised_amplitude_max, periods, frequencies, half_decay_time, half_decay_amplitude, df_sorted['Peak Start Time'], df_sorted['Peak Start Ordinate'], df_sorted['Peak Stop Time'], df_sorted['Peak Stop Ordinate'], peak_up_time, peak_down_time, peak_full_time, peak_area, big_peaks_frequency, medium_peaks_frequency, small_peaks_frequency)) final_dataframe = pd.DataFrame(data=final_dataset, columns=['File name', 'Peak time', 'Absolute amplitude', 'Absolute amplitude (%)', 'Absolute amplitude MAX', 'Normalised amplitude', 'Normalised amplitude (%)', 'Normalised amplitude MAX', 'Period', 'Frequency', 'Half-decay time', 'Half-decay amplitude', 'Start time', 'Start ordinate', 'Stop time', 'Stop ordinate', 'Ascending time', 'Decay time', 'Full peak time', 'AUC', 'Big peaks, Hz', 'Mid peaks, Hz', 'Small peaks, Hz']) # specify data for export acording to the settings tab in GUI # and append current analysed dataset to existing ones try: columns_to_delete_for_export = [] if not self.chbxFileName.isChecked(): columns_to_delete_for_export.append('File name') if not self.chbxPeakTime.isChecked(): columns_to_delete_for_export.append('Peak time') if not self.chbxAmplAbs.isChecked(): columns_to_delete_for_export.append('Absolute amplitude') if not self.chbxAmplAbsRel.isChecked(): columns_to_delete_for_export.append('Absolute amplitude (%)') if not self.chbxAmplAbsMax.isChecked(): columns_to_delete_for_export.append('Absolute amplitude MAX') if not self.chbxAmplNorm.isChecked(): columns_to_delete_for_export.append('Normalised amplitude') if not self.chbxAmplNormRel.isChecked(): columns_to_delete_for_export.append('Normalised amplitude (%)') if not self.chbxAmplNormMax.isChecked(): columns_to_delete_for_export.append('Normalised amplitude MAX') if not self.chbxPeriod.isChecked(): columns_to_delete_for_export.append('Period') if not self.chbxFreq.isChecked(): columns_to_delete_for_export.append('Frequency') if not self.chbxHalfDecayTime.isChecked(): columns_to_delete_for_export.append('Half-decay time') if not self.chbxHalfDecayAmpl.isChecked(): columns_to_delete_for_export.append('Half-decay amplitude') if not self.chbxLeftBorderTime.isChecked(): columns_to_delete_for_export.append('Start time') if not self.chbxLeftBorder.isChecked(): columns_to_delete_for_export.append('Start ordinate') if not self.chbxRightBorderTime.isChecked(): columns_to_delete_for_export.append('Stop time') if not self.chbxRightBorder.isChecked(): columns_to_delete_for_export.append('Stop ordinate') if not self.chbxTimeToPeak.isChecked(): columns_to_delete_for_export.append('Ascending time') if not self.chbxDecayTime.isChecked(): columns_to_delete_for_export.append('Decay time') if not self.chbxFullPeakTime.isChecked(): columns_to_delete_for_export.append('Full peak time') if not self.chbxAUC.isChecked(): columns_to_delete_for_export.append('AUC') if not self.chbxSmallPeaks.isChecked(): columns_to_delete_for_export.append('Big peaks, Hz') if not self.chbxMidPeaks.isChecked(): columns_to_delete_for_export.append('Mid peaks, Hz') if not self.chbxBigPeaks.isChecked(): columns_to_delete_for_export.append('Small peaks, Hz') final_dataframe.drop(columns_to_delete_for_export, axis=1, inplace=True) self.multiple_data_sets = self.multiple_data_sets.append(final_dataframe) if self.chbxSaveFig.isChecked(): os.makedirs('_Figures', exist_ok=True) dpi = self.BoxDPI.value() plt.savefig(os.path.join('_Figures', 'Fig_{figName}.png'.format(figName=self.graph_name)), dpi=dpi) del df del df_sorted del final_dataframe dialog = MessageBox.question(self, '', "Current dataset was analysed \n and added to previous ones (if exist). \n Would you like to load next file? ", QtGui.QMessageBox.Yes, QtGui.QMessageBox.No) if dialog == QtGui.QMessageBox.Yes: self.load_file() else: self.rmmpl() self.BtnSaveFullDataset.setStyleSheet("background-color: #7CF2BD") self.BtnLoadFile.setStyleSheet("background-color: #7CF2BD") except: message = MessageBox() message.about(self, 'Warning!', "Data were not added to existing dataset. \n Plese be sure that you did not change the output settings.")
def coordinates_analysis(self, ): """ Main function """ coord_x, coord_y = zip(*self.coordinates) leftpb_x, leftpb_y = zip(*self.left_peak_border) rightpb_x, rightpb_y= zip(*self.right_peak_border) # absolute amplitude % and MAX relative_amplitude = [] ampl_max = max(self.amplitudes) relative_amplitude[:] = [(i / ampl_max) for i in self.amplitudes] # create temporal Pandas DataFrame for sorting and calculation: temp_dataset = list( zip(coord_x, self.amplitudes, relative_amplitude, leftpb_x, leftpb_y, rightpb_x, rightpb_y, self.area)) df = pd.DataFrame(data=temp_dataset, columns=['Peak Time', 'Amplitude', 'Relative Amplitude \n (F/Fmax)', 'Peak Start Time', 'Peak Start Ordinate', 'Peak Stop Time', 'Peak Stop Ordinate', 'Area']) # Sort data in DataFrame according to the time of peak appearance df_sorted = df.sort_values(['Peak Time'], ascending=True) df_sorted.index = range(0, len(df_sorted)) # reset indexing # calculate periods periods = [] for i in range(1, len(df_sorted['Peak Time'])): periods.append(df_sorted.at[i, 'Peak Time'] - df_sorted.at[i - 1, 'Peak Time']) periods.insert(0, np.nan) # add placeholder because len(periods)=len(peaks)-1 # calculate frequencies based on calculated periods frequencies = [] frequencies[:] = [(1 / i) for i in periods] # Analise peak start - stop time (left and right peak borders) peak_full_time = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_full_time.append(df_sorted.at[i, 'Peak Stop Time'] - df_sorted.at[i, 'Peak Start Time']) peak_up_time = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_up_time.append(df_sorted.at[i, 'Peak Time'] - df_sorted.at[i, 'Peak Start Time']) peak_down_time = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_down_time.append(df_sorted.at[i, 'Peak Stop Time'] - df_sorted.at[i, 'Peak Time']) # Compute area under the peak using the composite trapezoidal rule. peak_area = [] for i in range(0, len(df_sorted['Peak Time']), 1): peak_area.append(np.trapz(df_sorted.at[i, 'Area'])) # Analise the peak decay area half_decay_time = [] half_decay_amplitude = [] for i in range(0, len(df_sorted['Peak Time']), 1): half_decay_ampl = df_sorted.at[i, 'Amplitude'] / 2 # calculate the half of the amplitude peak_index = self.x.index(df_sorted.at[i, 'Peak Time']) # find index of the peak time stop_idx = self.x.index(df_sorted.at[i, 'Peak Stop Time']) # find index of the right peak border data_decay_region = self.data_after_filter[peak_index:stop_idx] # determine the amplitude region where to search for halftime decay index time_decay_region = self.x[peak_index:stop_idx] half_decay_idx = (np.abs(data_decay_region - half_decay_ampl)).argmin() # find the closet value in data_decay_region that corresponds to the half amplitude half_decay_amplitude.append(half_decay_ampl) half_decay_time.append(time_decay_region[half_decay_idx] - df_sorted.at[i, 'Peak Time']) # Compute amplitude normalised to the baseline normalised_amplitude = [] sg_window_frame = self.BoxSGwindowFrame.value() sg_polynom_degree = self.BoxSGpolynomDegree.value() orig_data_filtered = sig.savgol_filter(self.y, sg_window_frame, sg_polynom_degree) for i in range(0, len(df_sorted['Peak Time']), 1): start_idx = self.x.index(df_sorted.at[i, 'Peak Start Time']) F0 = orig_data_filtered[start_idx] amplitude_normed_computation = df_sorted.at[i, 'Amplitude'] / F0 normalised_amplitude.append(amplitude_normed_computation) # normalised amplitude % relative_normalised_amplitude = [] maxATB = max(normalised_amplitude) relative_normalised_amplitude[:] = [(i / maxATB) for i in normalised_amplitude] # normalised amplitude MAX normalised_amplitude_max = list(range(0, len(df_sorted['Peak Time']) - 1)) normalised_amplitude_max[:] = [np.nan for _ in normalised_amplitude_max] normalised_amplitude_max.insert(0, maxATB) # add file name as first column file_name = list(range(0, len(df_sorted['Peak Time']) - 1)) file_name[:] = [np.nan for _ in file_name] file_name.insert(0, self.graph_name) # add maximum amplitude absolute_amplitude_max = list(range(0, len(df_sorted['Peak Time']) - 1)) absolute_amplitude_max[:] = [np.nan for _ in absolute_amplitude_max] absolute_amplitude_max.insert(0, max(df_sorted['Amplitude'])) # peak sorting big_peaks_number = [p for p in self.amplitudes if (p > ampl_max * 0.66)] medium_peaks_number = [p for p in self.amplitudes if (p > ampl_max * 0.33 and p <= ampl_max * 0.66)] small_peaks_number = [p for p in self.amplitudes if (p > 0 and p <= ampl_max * 0.33)] big_peaks_frequency = list(range(0, len(df_sorted['Peak Time']) - 1)) big_peaks_frequency[:] = [np.nan for _ in big_peaks_frequency] big_peaks_frequency.insert(0, len(big_peaks_number) / (self.x[-1] - self.x[0])) medium_peaks_frequency = list(range(0, len(df_sorted['Peak Time']) - 1)) medium_peaks_frequency[:] = [np.nan for _ in medium_peaks_frequency] medium_peaks_frequency.insert(0, len(medium_peaks_number) / (self.x[-1] - self.x[0])) small_peaks_frequency = list(range(0, len(df_sorted['Peak Time']) - 1)) small_peaks_frequency[:] = [np.nan for _ in small_peaks_frequency] small_peaks_frequency.insert(0, len(small_peaks_number) / (self.x[-1] - self.x[0])) final_dataset = list(zip(file_name, df_sorted['Peak Time'], df_sorted['Amplitude'], df_sorted['Relative Amplitude \n (F/Fmax)'], absolute_amplitude_max, normalised_amplitude, relative_normalised_amplitude, normalised_amplitude_max, periods, frequencies, half_decay_time, half_decay_amplitude, df_sorted['Peak Start Time'], df_sorted['Peak Start Ordinate'], df_sorted['Peak Stop Time'], df_sorted['Peak Stop Ordinate'], peak_up_time, peak_down_time, peak_full_time, peak_area, big_peaks_frequency, medium_peaks_frequency, small_peaks_frequency)) final_dataframe = pd.DataFrame(data=final_dataset, columns=['File name', 'Peak time', 'Absolute amplitude', 'Absolute amplitude (%)', 'Absolute amplitude MAX', 'Normalised amplitude', 'Normalised amplitude (%)', 'Normalised amplitude MAX', 'Period', 'Frequency', 'Half-decay time', 'Half-decay amplitude', 'Start time', 'Start ordinate', 'Stop time', 'Stop ordinate', 'Ascending time', 'Decay time', 'Full peak time', 'AUC', 'Big peaks, Hz', 'Mid peaks, Hz', 'Small peaks, Hz']) # specify data for export acording to the settings tab in GUI # and append current analysed dataset to existing ones try: columns_to_delete_for_export = [] if not self.chbxFileName.isChecked(): columns_to_delete_for_export.append('File name') if not self.chbxPeakTime.isChecked(): columns_to_delete_for_export.append('Peak time') if not self.chbxAmplAbs.isChecked(): columns_to_delete_for_export.append('Absolute amplitude') if not self.chbxAmplAbsRel.isChecked(): columns_to_delete_for_export.append('Absolute amplitude (%)') if not self.chbxAmplAbsMax.isChecked(): columns_to_delete_for_export.append('Absolute amplitude MAX') if not self.chbxAmplNorm.isChecked(): columns_to_delete_for_export.append('Normalised amplitude') if not self.chbxAmplNormRel.isChecked(): columns_to_delete_for_export.append('Normalised amplitude (%)') if not self.chbxAmplNormMax.isChecked(): columns_to_delete_for_export.append('Normalised amplitude MAX') if not self.chbxPeriod.isChecked(): columns_to_delete_for_export.append('Period') if not self.chbxFreq.isChecked(): columns_to_delete_for_export.append('Frequency') if not self.chbxHalfDecayTime.isChecked(): columns_to_delete_for_export.append('Half-decay time') if not self.chbxHalfDecayAmpl.isChecked(): columns_to_delete_for_export.append('Half-decay amplitude') if not self.chbxLeftBorderTime.isChecked(): columns_to_delete_for_export.append('Start time') if not self.chbxLeftBorder.isChecked(): columns_to_delete_for_export.append('Start ordinate') if not self.chbxRightBorderTime.isChecked(): columns_to_delete_for_export.append('Stop time') if not self.chbxRightBorder.isChecked(): columns_to_delete_for_export.append('Stop ordinate') if not self.chbxTimeToPeak.isChecked(): columns_to_delete_for_export.append('Ascending time') if not self.chbxDecayTime.isChecked(): columns_to_delete_for_export.append('Decay time') if not self.chbxFullPeakTime.isChecked(): columns_to_delete_for_export.append('Full peak time') if not self.chbxAUC.isChecked(): columns_to_delete_for_export.append('AUC') if not self.chbxSmallPeaks.isChecked(): columns_to_delete_for_export.append('Big peaks, Hz') if not self.chbxMidPeaks.isChecked(): columns_to_delete_for_export.append('Mid peaks, Hz') if not self.chbxBigPeaks.isChecked(): columns_to_delete_for_export.append('Small peaks, Hz') final_dataframe.drop(columns_to_delete_for_export, axis=1, inplace=True) self.multiple_data_sets = self.multiple_data_sets.append(final_dataframe) if self.chbxSaveFig.isChecked(): os.makedirs('_Figures', exist_ok=True) dpi = self.BoxDPI.value() plt.savefig(os.path.join('_Figures', 'Fig_{figName}.png'.format(figName=self.graph_name)), dpi=dpi) del df del df_sorted del final_dataframe dialog = MessageBox.question(self, '', "Current dataset was analysed \n and added to previous ones (if exist). \n Would you like to load next file? ", QMessageBox.Yes, QMessageBox.No) if dialog == QMessageBox.Yes: self.load_file() else: self.rmmpl() self.BtnSaveFullDataset.setStyleSheet("background-color: #7CF2BD") self.BtnLoadFile.setStyleSheet("background-color: #7CF2BD") except: message = MessageBox() message.about(self, 'Warning!', "Data were not added to existing dataset. \n Plese be sure that you did not change the output settings.")