def piteca_excepthook(exctype, value, tb): """ A method to catch all unhandled exception during PITECA's run . :param exctype: the type of exception :param value: the message of the exception (use str(value)) :param tb: traceback """ # if not gb.should_exit_on_error: # # If we are on main thread but don't want to close PITECA # dialog_utils.print_error(constants.UNEXPECTED_EXCEPTION_MSG[:-1] + ": " + str(value)) # return # Show user the full error value only if it is a PITECA error if exctype == definitions.PitecaError: msg = str(value.message) else: msg = constants.UNEXPECTED_EXCEPTION_MSG if int(QThread.currentThreadId()) == main_thread_id: definitions.print_in_debug(value) traceback.print_tb(tb) dialog_utils.print_error(msg + ". PITECA will be now closed") sys.exit() else: # The exception_occurred_sig should be defined in every thread class in PITECA definitions.print_in_debug(value) definitions.print_in_debug(exctype) traceback.print_tb(tb) QThread.currentThread().exception_occurred_sig.emit(msg)
def __handle_unexpected_exception(self, dlg, thread, value): """ The function to be called when unhandled exception occurs in param thread. Terminates the thread, closing the progress dialog and pops up an error message. :param dlg: Progress dialog, to be closed. :param thread: The analysis thread where the exception occured, to be terminated. """ dlg.close() thread.quit() dialog_utils.print_error(value)
def onClick(self, event): """ The function that should be called when user clicks on the graph. Changes the label that shows the exact correlation if user clicked inside graph borders, or do nothing if user clicks elsewhere. """ if event.button == 1 and event.xdata and event.ydata: subject_x_index = math.floor(event.xdata) subject_y_index = math.floor(event.ydata) if self.analysis_task == AnalysisTask.Analysis_Correlations: # graph (1) if event.inaxes.get_xlabel( ) == self.SUBJECTS_X_LABEL and event.inaxes.get_ylabel( ) == self.SUBJECTS_Y_LABEL: correlation = self.subj_subj_data[subject_x_index, subject_y_index] between1 = "subject {}".format(self.ids[subject_x_index]) between2 = "subject {}".format(self.ids[subject_y_index]) # graph (2) # elif event.inaxes.get_xlabel() == "" and event.inaxes.get_ylabel() == MEAN_Y_LABEL: # correlation = self.subj_mean_data[subject_x_index] # between1 = self.ids[subject_x_index] # between2 = "mean activation" # graph (3) elif event.inaxes.get_xlabel( ) == "" and event.inaxes.get_ylabel() == CANONICAL_Y_LABEL: correlation = self.subj_canonical_data[subject_x_index] between1 = "subject {}".format(self.ids[subject_x_index]) between2 = "canonical activation" # not a heat map location else: return elif self.analysis_task == AnalysisTask.Compare_Correlations: # graph 4 if event.inaxes.get_xlabel( ) == self.SUBJECTS_X_LABEL and event.inaxes.get_ylabel( ) == self.SUBJECTS_Y_LABEL: correlation = self.subj_subj_data[subject_y_index, subject_x_index] between1 = "subject {}".format(self.ids[subject_x_index]) between2 = "subject {}".format(self.ids[subject_y_index]) # not a heat map location else: return else: dialog_utils.print_error(constants.UNEXPECTED_EXCEPTION_MSG) self.correlation_label.setText( "Value: {:.2f} (Correlation between {} and {})".format( correlation, between1, between2))
def onRunPredictClicked(self): inputFiles = self.ui.inputFilesLineEdit.text() outputDir = get_prediction_outputs_folder() tasks = self.findCheckedTasks() if not inputFiles: dialog_utils.print_error(PROVIDE_INPUT_FILES_MSG) return if not tasks: dialog_utils.print_error(SELECT_TASKS_MSG) return predictModel = PredictTabModel(inputFiles, outputDir, tasks) predictModel.run_prediction_flow(self.ui)
def onRunComparisonButtonClicked(self): """ The function to be called when user clicked on "Compare" button. Starts the analysis progress according to the action selected in the UI. """ predicted_files_str = self.ui.selectPredictedLineEdit.text() actual_files_str = self.ui.addActualLineEdit.text() if not predicted_files_str or not actual_files_str: dialog_utils.print_error(constants.PROVIDE_INPUT_MSG) return self.task = constants.Task[self.ui.taskComboBox.currentText()] subjects = self.__create_subjects(self.task, predicted_files_str, actual_files_str) if not subjects: return if len(subjects) > constants.MAX_SUBJECTS: dialog_utils.inform_user( "Too many files to process. Maximum number is 25 files.") return # Prepare additional analysis parameters analysis_task = None outputdir = get_analysis_results_folder() other_path = path_utils.get_canonical_path(self.task) if self.ui.comparisonCorrelationsRadioButton.isChecked(): analysis_task = AnalysisTask.Compare_Correlations elif self.ui.comparisonSignificantRadioButton.isChecked(): analysis_task = AnalysisTask.Compare_Significance else: dialog_utils.print_error(constants.SELECT_ACTION_MSG) return thread = AnalysisWorkingThread(analysis_task, subjects, self.task, outputdir, other_path) dlg = analysis_working_dlg_controller.AnalysisWorkingDlg() dlg.closeEvent = lambda event: self.wait_dlg_close_event( event, dlg, thread) dlg.setWindowModality(Qt.ApplicationModal) dlg.show() thread.progress_finished_sig.connect(lambda: self.__handle_results( analysis_task, dlg, thread.results, subjects)) thread.exception_occurred_sig.connect( lambda value: self.__handle_unexpected_exception( dlg, thread, value)) thread.start()
def __create_subjects(self, task, predicted_files_str, actual_files_str=None): ''' Creates subjects with minimal info required for analysis. :param task: The task chosen for the analysis :param predicted_files_str: The input string from the user :param actual_files_str: The input string from the user (oprional) :return: a list of Subjects ''' subjects = [] predicted_files = path_utils.extract_filenames(predicted_files_str) for file in predicted_files: curr_subject = subject.Subject() curr_subject.subject_id = path_utils.get_id(file) if curr_subject.subject_id == None: dialog_utils.inform_user(constants.NAMING_CONVENTION_ERROR) return [] curr_subject.predicted = {task: file} subjects.append(curr_subject) if actual_files_str: actual_files = path_utils.extract_filenames(actual_files_str) # Check predicted and actual match predicted_ids = [subject.subject_id for subject in subjects] actual_ids = [path_utils.get_id(file) for file in actual_files] if not set(predicted_ids) == set(actual_ids): dialog_utils.print_error( "The files provided as the actual activation do not match the predicted files." ) return None else: # Add subjects the "actual" property for file in actual_files: match_subject = next( subject for subject in subjects if subject.subject_id == path_utils.get_id(file)) match_subject.actual = {task: file} # assert only unique subjects ids = [subject.subject_id for subject in subjects] if len(ids) != len(set(ids)): dialog_utils.print_error(constants.DUP_IDS) return None return subjects
def __init__(self, analysis_task, data, subjects, title, parent=None): super(GraphicDlg, self).__init__(parent) # a figure instance to plot on self.figure = plt.figure() # this is the Canvas Widget that displays the `figure` # it takes the `figure` instance as a parameter to __init__ self.canvas = FigureCanvas(self.figure) # this is the Navigation widget # it takes the Canvas widget and a parent self.toolbar = NavigationToolbar(self.canvas, self) # save button self.save_button = QtWidgets.QPushButton('Save data') self.save_button.setMaximumWidth(max_width_button) self.save_button.clicked.connect(self.save_data) # set the layout self.layout = QVBoxLayout() self.layout.addWidget(self.toolbar) self.layout.addWidget(self.canvas) self.layout.addWidget(self.save_button) self.setLayout(self.layout) self.setWindowTitle("PITECA") icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(definitions.PITECA_ICON_PATH), QtGui.QIcon.Normal, QtGui.QIcon.Off) self.setWindowIcon(icon) # Add results illustration self.ids = [subject.subject_id for subject in subjects] self.analysis_task = analysis_task self.data = data self.title = title self.named_data = {} # calculate x tick labels font num_of_chars = len(''.join(self.ids)) + len(self.ids) self.font_size = ( 14 / (math.ceil(num_of_chars / 29))) if num_of_chars > 29 else 7 # Set graph attributes specifically by tasks if self.analysis_task == AnalysisTask.Analysis_Correlations: self.SUBJECTS_X_LABEL = "subjects" self.SUBJECTS_Y_LABEL = "subjects" self.subj_subj_data = data[0] # 2 dims # self.subj_mean_data = data[1] # 1 dim self.subj_canonical_data = data[2] # 1 dim self.named_data = { 'inter-subject predictions correlation': self.subj_subj_data, 'subjects predictions correlations with canonical': self.subj_canonical_data } self.plot_heatmap() elif analysis_task == AnalysisTask.Compare_Correlations: self.SUBJECTS_X_LABEL = "subjects: Actual" self.SUBJECTS_Y_LABEL = "subjects: Predicted" self.subj_subj_data = data # 2 dims self.named_data = { 'inter-subject predicted-actual correlations': self.subj_subj_data, } self.plot_heatmap() if analysis_task == AnalysisTask.Analysis_Correlations or analysis_task == AnalysisTask.Compare_Correlations: if analysis_task == AnalysisTask.Compare_Correlations: mean_of_diagonal = np.mean(np.diagonal(self.subj_subj_data)) self.mean_correlation_label = QtWidgets.QLabel( 'Mean correlation: {:01.2f}'.format(mean_of_diagonal)) self.layout.addWidget(self.mean_correlation_label) # a label to show correlation self.correlation_label = QtWidgets.QLabel( 'Click on entry to see the exact correlation value') self.layout.addWidget(self.correlation_label) elif analysis_task == AnalysisTask.Compare_Significance: # self.data is 2 dimensional array self.named_data = { 'subjects predicted-actual positive significance iou ': self.data[0], 'subjects predicted-actual negative significance iou ': self.data[1] } self.plot_barchart() else: dialog_utils.print_error("Unsupported analysis action") return if self.analysis_task in [ AnalysisTask.Analysis_Correlations, AnalysisTask.Compare_Correlations ]: self.plot_heatmap()