def go_from_text(self):
        newpath = Path(self.le_current_dir.text())

        # Avoid writing into history if the Enter event is the current directory
        if newpath == self.path_history[self.path_index]:
            return

        if newpath.exists():
            if newpath.is_dir():
                self.path_change(newpath=newpath, current_index=self.path_index)
                try:
                    if self.path_history[self.path_index + 1] == newpath:
                        self.path_index += 1
                    else:
                        print("go_from_text; this should never print")
                except IndexError:
                    self.path_history.append(newpath)
                    self.path_index += 1

                self.dev_path_print("Pressed Enter into a new directory")
            else:
                robust_qmsg(self, title=self.errs["CannotEnterFile"][0], body=self.errs["CannotEnterFile"][1],
                            variables=[str(newpath)])

                # Reset the lineedit back to the text prior to the Enter event
                self.le_current_dir.setText(str(self.path_history[self.path_index]))
                return
        else:
            robust_qmsg(self, title=self.errs["PathDoesNotExist"][0], body=self.errs["PathDoesNotExist"][1],
                        variables=str(newpath))
            # Reset the lineedit back to the text prior to the Enter event
            self.le_current_dir.setText(str(self.path_history[self.path_index]))
            return
    def go_down(self, filepath_modelindex: QModelIndex):
        filepath = Path(self.model_file.filePath(filepath_modelindex))
        # User wants to open a directory
        if filepath.is_dir():
            self.path_change(newpath=filepath, current_index=self.path_index)
            try:
                if self.path_history[self.path_index + 1] == filepath:
                    self.path_index += 1
                else:
                    print("go_down; this should never print")
            except IndexError:
                self.path_history.append(filepath)
                self.path_index += 1
            self.dev_path_print("Double-clicked to go down into a directory")

        # User wants to open a file
        elif filepath.is_file():
            result = QDesktopServices.openUrl(QUrl.fromLocalFile(str(filepath)))
            self.dev_path_print("Attempted to open a file")
            if not result:
                robust_qmsg(self.parent(), title=self.errs["CannotEnterFile"][0],
                            body=self.errs["CannotEnterFile"][1], variables=[str(filepath)])
        # Something went wrong
        else:
            print(f"The filepath: {filepath} is not a directory that can be entered into")
示例#3
0
    def get_directory_structure(self):
        """
        Returns the directory structure in preparation of running the import
        """
        dirnames = [le.text() for le in self.levels.values()]
        valid_dirs = []
        encountered_nonblank = False
        # Iterate backwards to remove false
        for name in reversed(dirnames):
            # Cannot have blank lines existing between the important directories
            if name == '' and encountered_nonblank:
                robust_qmsg(
                    self,
                    title=self.import_errs["InvalidStructure_Blanks"][0],
                    body=self.progbar_import["InvalidStructure_Blanks"][1])
                return False, []
            elif name == '' and not encountered_nonblank:
                continue
            else:
                encountered_nonblank = True
                valid_dirs.append(name)

        # Sanity check for false user input
        if any(["Subject" not in valid_dirs, "Scan" not in valid_dirs]):
            robust_qmsg(
                self,
                title=self.import_errs["InvalidStructure_MinSubScan"][0],
                body=self.import_errs["InvalidStructure_MinSubScan"][1])
            return False, []

        valid_dirs = list(reversed(valid_dirs))
        # print(valid_dirs)
        return True, valid_dirs
    def alter_tsv(self):

        # Last Sanity Check - Does the choice of SUBJECT columns make sense?
        subs_from_meta = set(self.df_metadata[self.le_metadata_subjectcol.text()].unique())
        subs_from_parttsv = set(self.df_parttsv[self.le_parttsv_subjectcol.text()].unique())
        if len(subs_from_meta.intersection(subs_from_parttsv)) == 0:
            robust_qmsg(self, title=self.parent.exec_errs["ImminentBadMerge"][0],
                        body=self.parent.exec_errs["ImminentBadMerge"][1])
            return

        # Prepare certain variables in the event of a revert
        self.metadatafile_duringalter = self.le_metadatafile.text()
        self.df_backupmetadata: pd.DataFrame = self.df_metadata.copy()
        self.df_backupparttsv: pd.DataFrame = self.df_parttsv.copy()

        # Get the columns that are needed for merging, subset the metadata, then merge
        which_transfer_cols = self.get_current_parttsvcolnames() - set(self.default_tsvcols)
        meta_subset = self.df_metadata.loc[:, list(which_transfer_cols) + [self.le_metadata_subjectcol.text()]]
        merge_df = pd.merge(left=self.df_parttsv, right=meta_subset,
                            left_on=self.le_parttsv_subjectcol.text(),
                            right_on=self.le_metadata_subjectcol.text(),
                            how="left", suffixes=("_from_meta", "_from_participants"))
        merge_df.drop(self.le_metadata_subjectcol.text(), axis=1, inplace=True)

        # Save backup and new merge to their respective files
        self.df_backupparttsv.to_csv(Path(self.target_dir) / "participants_orig.tsv", sep="\t", index=False)
        merge_df.to_csv(Path(self.target_dir) / "participants.tsv", sep="\t", index=False)
        robust_qmsg(self, "information", "Successfully updated participants.tsv",
                    "participants.tsv has been updated with the indicated metadata columns. "
                    "A backup of the previous content has been saved as participants_orig.tsv")
        self.btn_altertsv.setEnabled(False)
        self.btn_revert.setEnabled(True)
 def run(self):
     try:
         shutil.copytree(src=self.source_directory,
                         dst=self.backup_directory,
                         dirs_exist_ok=True)
         self.signals.signal_done_copying.emit()
     except (OSError, RuntimeError) as copytree_error:
         robust_qmsg(
             title=self.errs["BackDirNotMade"][0],
             body=self.errs["BackDirNotMade"][1],
             variables=[str(self.source_directory),
                        str(copytree_error)])
    def load_metadatafile(self, from_reset: bool = False):
        if from_reset:
            meta_path = Path(self.metadatafile_duringalter).resolve()
        else:
            meta_path = Path(self.le_metadatafile.text()).resolve()
        if any([not meta_path.exists(), meta_path.is_dir()]):
            robust_qmsg(self, msg_type="warning", title="Could not load metadata",
                        body="You have indicated either a non-existent path or a directory")
            return

        self.df_metadata: pd.DataFrame = robust_read_csv(meta_path)
        self.lst_metadata.clear()
        self.lst_metadata.addItems(self.df_metadata.columns)
        self.default_metacols = self.df_metadata.columns.tolist()
        self.is_ready_merge()
示例#7
0
    def get_run_aliases(self):
        """
        Retrieves a mapping of the run alias names and the user-specified preferred name
        @return: status, whether the operation was a success; run_aliases, the mapping
        """

        run_aliases = OrderedDict()

        # If the run aliases dict is empty, simply return the empty dict, as runs are not mandatory to outline
        if len(self.cmb_runaliases_dict) == 0:
            return True, run_aliases

        # First, make sure that every number is unique:
        current_orderset = [
            cmb.currentText() for cmb in self.cmb_runaliases_dict.values()
        ]
        if len(current_orderset) != len(set(current_orderset)):
            robust_qmsg(self,
                        title=self.import_errs["InvalidRunAliases"][0],
                        body=self.import_errs["InvalidRunAliases"][1])
            return False, run_aliases

        basename_keys = list(self.le_runaliases_dict.keys())
        aliases = list(le.text() for le in self.le_runaliases_dict.values())
        orders = list(cmb.currentText()
                      for cmb in self.cmb_runaliases_dict.values())

        if self.config["DeveloperMode"]:
            print(
                f"Inside get_run_aliases, the following variable values were in play prior to generating the "
                f"run aliases dict:\n"
                f"basename_keys: {basename_keys}\n"
                f"aliases: {aliases}\n"
                f"orders: {orders}")

        for num in range(1, len(orders) + 1):
            idx = orders.index(str(num))
            current_alias = aliases[idx]
            current_basename = basename_keys[idx]
            if current_alias == '':
                run_aliases[current_basename] = f"ASL_{num}"
            else:
                run_aliases[current_basename] = current_alias

        return True, run_aliases
    def remove_status_files(self):
        filepaths, treewidgetitems = self.return_filepaths()
        if self.parent.config["DeveloperMode"]:
            print(f"REMOVING THE FOLLOWING STATUS FILES:")
            pprint(filepaths)

        for filepath in filepaths:
            filepath.unlink(missing_ok=True)

        # Clear the tree
        self.lock_tree.clear()
        # Refresh the file structure
        self.directory_struct.clear()
        self.directory_struct["lock"] = self.get_path_directory_structure(self.root_dir / "lock")
        # Refresh the tree
        self.fill_tree(self.lock_tree.invisibleRootItem(), self.directory_struct)
        self.lock_tree.expandToDepth(2)
        self.lock_tree.itemChanged.connect(self.change_check_state)

        robust_qmsg(self.parent, msg_type="information", title="Re-run setup complete",
                    body=f"Successfully deleted the indicated .status files for the study:\n{str(self.root_dir)}")
    def end_rename_event(self):
        proposed_basename = self.editor.text()
        proposed_filepath: Path = self.orig_filepath.with_name(proposed_basename)
        if proposed_basename == self.orig_filepath.name:
            self.full_close_editor()
            return

        if proposed_filepath.exists():
            robust_qmsg(self, title=self.errs["DirectoryAlreadyExists"][0], body=self.errs["DirectoryAlreadyExists"][1],
                        variables=[str(proposed_basename), str(self.orig_filepath)])
            self.full_close_editor()
            return

        if self.orig_filepath.is_dir():
            try:
                self.orig_filepath.replace(proposed_filepath)
            except FileNotFoundError:
                robust_qmsg(self, title=self.errs["InvalidCharacterInPath"][0],
                            body=self.errs["InvalidCharacterInPath"][1], variables=[str(proposed_basename)])
            self.full_close_editor()
            return

        # If it is a file
        else:
            if proposed_filepath.suffix == "":
                proposed_filepath = proposed_filepath.with_suffix(self.orig_filepath.suffix)
            try:
                self.orig_filepath.replace(proposed_filepath)
            except FileNotFoundError:
                robust_qmsg(self, title=self.errs["InvalidCharacterInPath"][0],
                            body=self.errs["InvalidCharacterInPath"][1], variables=[str(proposed_basename)])
            self.full_close_editor()
            return
示例#10
0
    def slot_cleanup_postterminate(self):
        self.n_import_workers -= 1
        if len(self.import_summaries) > 0:
            self.import_summaries.clear()
        if len(self.failed_runs) > 0:
            self.failed_runs.clear()

        # Don't proceed until all importer workers are finished
        if self.n_import_workers > 0 or self.import_parms is None:
            return

        # Reset the widgets and cursor
        self.set_widgets_on_or_off(state=True)
        self.btn_terminate_importer.setEnabled(False)

        analysis_dir = Path(self.import_parms["RawDir"]).parent / "analysis"
        if analysis_dir.exists():
            robust_qmsg(self,
                        title=self.import_errs["CleanupImportPostTerm"][0],
                        body=self.import_errs["CleanupImportPostTerm"][1],
                        variables=[str(analysis_dir)])
        QApplication.restoreOverrideCursor()
    def revert(self):
        if not (Path(self.target_dir) / "participants_orig.tsv").exists():
            robust_qmsg(self, title="No participants_orig.tsv file found",
                        body="Could not perform a revert without the expected backup file")
            return
        # Restore the old file
        (Path(self.target_dir) / "participants.tsv").unlink()

        # Load from the restored file
        self.load_parttsvfile(from_reset=True)
        self.load_metadatafile(from_reset=True)
        self.le_metadata_subjectcol.clear()
        (Path(self.target_dir) / "participants_orig.tsv").rename(target=Path(self.target_dir) / "participants.tsv")

        # Disabled buttons as necessary
        self.metadatafile_duringalter = ""
        self.btn_altertsv.setEnabled(False)
        self.btn_revert.setEnabled(False)
        self.btn_reset.setEnabled(False)
        self.is_ready_merge()

        robust_qmsg(self, "information", title="participants.tsv has been reset",
                    body="participants_orig.tsv was used to restore the previous iteration of participants.tsv")
    def alter_json_sidecars(self):
        QApplication.setOverrideCursor(Qt.WaitCursor)

        # Ascertain which types of scans to search for regardless of method
        which_scans = []
        alter_sidecar_results = []
        for scan_type, widget in zip(["asl", "t1", "m0"], [self.chk_asl, self.chk_t1, self.chk_m0]):
            if widget.isChecked():
                which_scans.append(scan_type)
        del scan_type

        action = "remove" if self.cmb_actiontype.currentText() == "Remove a field" else "alter"

        # User wishes to alter sidecars using a pre-configured csv file
        if self.grp_fromfile.isChecked():
            start_msg = f"{str(dt.now())} - STARTING JSON SIDECAR ALTERATION"
            self.logger.info("#" * len(start_msg) + "\n" + start_msg)
            for scan_type in which_scans:
                res = alter_sidecars(root_dir=self.root_dir, subjects=self.le_fromfile.text(), which_scan=scan_type,
                                     action=action, logger=self.logger)
                alter_sidecar_results.append(res)
        # User wishes to alter sidecars using the drag & drop list
        else:
            # Get the list of subjects
            subs = []
            for idx in range(self.lst_subs.count()):
                sub_name = self.lst_subs.item(idx).text()
                if (self.root_dir / sub_name).exists():
                    subs.append(sub_name)
            if len(subs) == 0:
                QApplication.restoreOverrideCursor()
                robust_qmsg(self, title=self.parent.exec_errs["SubjectsNotFound"][0],
                            body=self.parent.exec_errs["SubjectsNotFound"][1])
                return

            start_msg = f"{str(dt.now())} - STARTING JSON SIDECAR ALTERATION"
            self.logger.info("#" * len(start_msg) + "\n" + start_msg)
            for scan_type in which_scans:
                res = alter_sidecars(root_dir=self.root_dir, subjects=subs, which_scan=scan_type,
                                     action=action, key=self.le_key.text(), value=interpret_value(self.le_value.text()),
                                     logger=self.logger)
                alter_sidecar_results.append(res)
        # Give a specific message depending on the results
        if all(alter_sidecar_results):
            robust_qmsg(self, msg_type="information", title="Finished json sidecar operation",
                        body="Completed the requested json sidecar operation on the indicated subjects")
        else:
            robust_qmsg(self, msg_type="information", title="Finished json sidecar operation with errors",
                        body="The requested json sidecar operation finished with some errors. These may be false, "
                             f"but the user is recommended to check the following log file:\n{self.log_file}")
        QApplication.restoreOverrideCursor()
        return
    def set_local_matlabroot(self):
        glob_pat = "matlab.exe" if system() == "Windows" else "matlab"
        s, d = robust_getdir(
            self,
            "Select the path to the MATLAB bin directory",
            self.config["DefaultRootDir"],
            requirements={
                "rcontains": [glob_pat, self.mwin_errs["InvalidMATLABDir"]]
            })
        if not s:
            return

        # First, get the overall path to the matlab command
        try:
            QApplication.setOverrideCursor(Qt.WaitCursor)
            matlab_cmd_path = next(d.rglob(glob_pat))
        except StopIteration:
            QApplication.restoreOverrideCursor()
            robust_qmsg(self,
                        title=self.mwin_errs["InvalidMATLABDir"][0],
                        body=self.mwin_errs["InvalidMATLABDir"][1])
            return
        if matlab_cmd_path.name not in ["matlab", "matlab.exe"]:
            QApplication.restoreOverrideCursor()
            robust_qmsg(self,
                        title=self.mwin_errs["InvalidMATLABDir"][0],
                        body=self.mwin_errs["InvalidMATLABDir"][1])
            return

        # Then derive the version from it
        QApplication.restoreOverrideCursor()
        matlabver_regex = re.compile(r"R\d{4}[ab]")
        self.config["MATLAB_CMD_PATH"] = str(matlab_cmd_path)
        try:
            self.config["MATLAB_VER"] = matlabver_regex.search(
                str(matlab_cmd_path)).group()
        except AttributeError:
            # Rare case, the matlab command path does not contain the version. Use subprocess backup
            result = subprocess.run([
                f"{str(matlab_cmd_path)}", "-nosplash", "-nodesktop", "-batch",
                "matlabroot"
            ],
                                    capture_output=True,
                                    text=True)
            match = matlabver_regex.search(result.stdout)
            if result.returncode == 0 and match:
                self.config["MATLAB_VER"] = match.group()
            else:
                robust_qmsg(self,
                            title=self.mwin_errs["MissingMATLABVER"][0],
                            body=self.mwin_errs["MissingMATLABVER"][1],
                            variables=[str(matlab_cmd_path)])
                self.save_config()
                return
        QMessageBox.information(
            self, "MATLAB Version and command path successfully located",
            f"The path to launching MATLAB was registered as:\n{str(matlab_cmd_path)}\n"
            f"The version of MATLAB on this operating system is: {self.config['MATLAB_VER']}"
        )
        self.save_config()
        return
示例#14
0
    def import_postprocessing(self):
        """
        Performs the bulk of the post-import work, especially if the import type was specified to be BIDS
        """
        print(
            "Clearing Import workers from memory, re-enabling widgets, and resetting current directory"
        )
        self.import_workers.clear()
        self.set_widgets_on_or_off(state=True)
        self.btn_terminate_importer.setEnabled(False)
        QApplication.restoreOverrideCursor()

        chdir(self.config["ScriptsDir"])
        analysis_dir = Path(self.import_parms["RawDir"]).parent / "analysis"
        if not analysis_dir.exists():
            robust_qmsg(self,
                        title=self.import_errs["StudyDirNeverMade"][0],
                        body=self.import_errs["StudyDirNeverMade"][1],
                        variables=[str(analysis_dir)])
            return

        # Concatenate the tmpImport_Converter_###.log files into a single log placed in the study directory
        # Also, remove the log files in the process
        logs = []
        log_files = sorted(
            Path(self.import_parms["RawDir"]).glob("tmpImport_Converter*.log"))
        for log_file in log_files:
            with open(log_file, "r") as log_reader:
                logs.append(log_reader.read())
            log_file.unlink(missing_ok=True)

        now_str = datetime.now().strftime("%a-%b-%d-%Y_%H-%M-%S")
        try:
            log_path = analysis_dir / "Logs" / "Import Logs" / f"Import_Log_{now_str}.log"
            log_path.parent.mkdir(parents=True, exist_ok=True)
            with open(log_path, "w") as log_writer:
                log_writer.write(f"\n{'#' * 50}\n".join(logs))
        except PermissionError:
            log_path = analysis_dir / "Logs" / "Import Logs" / f"Import_Log_{now_str}_backup.log"
            log_path.parent.mkdir(parents=True, exist_ok=True)
            with open(log_path, "w") as log_writer:
                log_writer.write("\n\n".join(logs))

        # Create the import summary
        create_import_summary(import_summaries=self.import_summaries,
                              config=self.import_parms)

        # If the settings is BIDS...
        if not self.chk_uselegacy.isChecked():
            # Ensure all M0 jsons have the appropriate "IntendedFor" field if this is in BIDS
            bids_m0_followup(analysis_dir=analysis_dir)

            # Create the template for the dataset description
            self.create_dataset_description_template(analysis_dir)

            # Create the "bidsignore" file
            with open(analysis_dir / ".bidsignore", 'w') as ignore_writer:
                to_ignore = [
                    "Import_Log_*.log\n", "Import_Failed*.txt\n",
                    "Import_Dataframe_*.tsv\n"
                ]
                ignore_writer.writelines(to_ignore)
                del to_ignore

        # If there were any failures, write them to disk now
        if len(self.failed_runs) > 0:
            try:
                with open(analysis_dir / "Import_Failed_Imports.txt",
                          "w") as failed_writer:
                    failed_writer.writelines(
                        [line + "\n" for line in self.failed_runs])
                robust_qmsg(self,
                            title=self.import_errs["ImportErrors"][0],
                            body=self.import_errs["ImportErrors"][1],
                            variables=[log_path.name,
                                       str(analysis_dir)])
            except FileNotFoundError:
                robust_qmsg(self,
                            title=self.import_errs["StudyDirNeverMade"][0],
                            body=self.import_errs["StudyDirNeverMade"][1],
                            variables=[str(analysis_dir)])
        else:
            # Finally, a Message confirming a successful import
            QMessageBox.information(
                self, "Import was a Success",
                f"You have successfully imported the DICOM dataset into NIFTI format.\n"
                f"The study directory is located at:\n{str(analysis_dir)}",
                QMessageBox.Ok)
    def specify_components(self):
        """
        Extracts the basenames at the indicated level, splits on the delimiter if indicated, and populates the left
        and/or right form layouts with the split basenames, allowing the user to start specifying the parent directory
        each component of a basename should be placed under
        """
        self.glob_path = None

        if not self.le_rootdir.text() in self.le_exampledir.text():
            return

        # We must determine the depth that this example directory exists at
        nlevels, path = 1, Path(self.le_exampledir.text())
        root_path = Path(self.le_rootdir.text())
        while path.parent != root_path:
            nlevels += 1

        sys_delimiter = "\\" if system() == "Windows" else "/"
        self.glob_path = list(
            root_path.glob(sys_delimiter.join(["*"] * nlevels)))
        # Get the basenames
        self.basenames.clear()
        for path in self.glob_path:
            if path.is_dir() and path.name not in self.basenames:
                self.basenames[path.name] = {"left": "", "right": ""}
        if len(self.basenames) < 1:
            return

        delimiter, left_bases, right_bases = self.le_delimiter.text(), [], []
        try:
            if delimiter != "":
                for basename in self.basenames:
                    left, _, right = basename.partition(delimiter)
                    if left != "":
                        self.basenames[basename]["left"] = left
                        left_bases.append(left)
                    if right != "":
                        self.basenames[basename]["right"] = right
                        right_bases.append(right)
            else:
                for basename in self.basenames:
                    self.basenames[basename]["left"] = basename
                    left_bases.append(basename)
        except ValueError:
            robust_qmsg(self,
                        title=self.dehyb_errs["ImpossibleDirDepth"][0],
                        body=self.dehyb_errs["ImpossibleDirDepth"][1])
            del delimiter, path, nlevels
            return

        left_bases = set(left_bases)
        right_bases = set(right_bases)
        pprint(self.basenames)
        print(f"Left bases: {left_bases}\nRight Bases: {right_bases}")

        # Exit early if there was no luck getting components
        if len(left_bases) == 0 and len(right_bases) == 0:
            return

        # Clear first
        self.clear_components_from_layout("left")
        self.clear_components_from_layout("right")

        # Fill left bases first
        if self.grp_leftside.isChecked():
            for left_component in sorted(left_bases):
                self.add_components_to_layout(component=left_component,
                                              side="left")

        # Then fill right bases
        if self.grp_rightside.isChecked():
            for right_component in sorted(right_bases):
                self.add_components_to_layout(component=right_component,
                                              side="right")

        if self.config["DeveloperMode"]:
            print(f'Left components: {self.leftside_name2le}')
            print(f'Right components: {self.rightside_name2le}')

        # These will be important for accounting for the user switching directories between determining delimiters and
        # the main function
        self.documented_rootdir = self.le_rootdir.text()
        self.documented_exampledir = self.le_exampledir.text()
示例#16
0
def startup():
    app = QApplication(sys.argv)
    screen = app.primaryScreen()
    screen_size = screen.availableSize()
    project_dir = Path(__file__).resolve().parent.parent
    print(f"Launching script at: {Path(__file__)} ")
    print(f"Project Directory is: {project_dir}")

    # Get the appropriate default style based on the user's operating system
    app.setStyle("Fusion") if system() in ["Windows", "Linux"
                                           ] else app.setStyle("macintosh")

    # Ensure essential directories exist
    for essential_dir in ["JSON_LOGIC", "media", "External"]:
        if not (project_dir / essential_dir).exists():
            QMessageBox().warning(
                QWidget(), f"No {essential_dir} directory found",
                f"The program directory structure is compromised. "
                f"No {essential_dir} directory was located in {project_dir}",
                QMessageBox.Ok)
            sys.exit(1)

    # Check if the master config file exists; if it doesn't, the app will initialize one on the first startup
    if (project_dir / "JSON_LOGIC" /
            "ExploreASL_GUI_masterconfig.json").exists():
        print("Loading masterconfig file.")
        with open(project_dir / "JSON_LOGIC" /
                  "ExploreASL_GUI_masterconfig.json") as master_config_reader:
            master_config = load(master_config_reader)
        # Update the ProjectDir and ScriptsDir variables in the event the user moves the location of this folder
        # First, make sure the Startup.py is located in the src folder
        if project_dir != master_config["ProjectDir"]:
            master_config["ProjectDir"] = str(project_dir)
            master_config["ScriptsDir"] = str(project_dir / "src")

    # Otherwise, this is a first time startup and additional things need to be checked
    else:
        master_config = {
            "ExploreASLRoot": "",  # The filepath to the ExploreASL directory
            "DefaultRootDir": str(Path.home(
            )),  # The default root for the navigator to watch from
            "ScriptsDir":
            str(project_dir /
                "src"),  # The location of where this script is launched from
            "ProjectDir": str(project_dir),  # The location of the src main dir
            "Platform": f"{system()}",
            "ScreenSize":
            (screen_size.width(), screen_size.height()),  # Screen dimensions
            "DeveloperMode": True
        }  # Whether to launch the app in developer mode or not

        # TODO Okay, this is no longer sufficient in light of compatibility with the compiled version. Consider a custom
        #  QMessageBox, perhaps?
        # We must also check for the MATLAB version present on the machine
        desc = "Is a standard MATLAB program installed on this machine?"
        check_for_local = QMessageBox.question(
            QWidget(), "MATLAB Detection", desc,
            (QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel))
        if check_for_local == QMessageBox.Cancel:
            sys.exit(0)

        if check_for_local == QMessageBox.Yes:
            version, cmd_path = get_local_matlab()
            master_config["MATLAB_VER"] = version
            master_config["MATLAB_CMD_PATH"] = cmd_path
            if cmd_path is None:
                robust_qmsg(
                    None, "warning", "No MATLAB Command Found",
                    "The matlab command could not be located on this system. Please use the main window's "
                    "menu to manually specify where it is located if you wish to use a non-compiled ExploreASL"
                )
            elif cmd_path is not None and version is None:
                # This should never print. Once matlab is located properly, the matlabroot command will display R####
                robust_qmsg(None, "warning", "No MATLAB Version Found", [
                    "The matlab command was found at:\n",
                    "\nHowever, the version could not be determined."
                ], [cmd_path])
            else:
                robust_qmsg(None, "information",
                            "Local MATLAB Location & Version discerned", [
                                "Detected the matlab path to be:\n",
                                "\nDetected the matlab version to be: "
                            ], [cmd_path, version])
        else:
            body_txt = "See which applies to you:\n1) If you intend to use MATLAB at a later point in time, you " \
                       "will have the option to specify its location in the Main Window of this program." \
                       "\n\n2) If you do not intend use a MATLAB Installation, you will need to download the MATLAB " \
                       "Runtime as well as the compiled version of ExploreASL, then specify the filepaths to these" \
                       "when defining Study Parameters. At the current time, the compiled version only supports the " \
                       "2019a Runtime."
            robust_qmsg(None, "information",
                        "Instructions for non-MATLAB cases", body_txt)

        # Assuming the above was successful, dcm2niix may not have executable permission; add execute permissions
        dcm2niix_dir = project_dir / "External" / "DCM2NIIX" / f"DCM2NIIX_{system()}"
        dcm2niix_file = next(dcm2niix_dir.glob("dcm2niix*"))
        stat = oct(dcm2niix_file.stat().st_mode)
        if not stat.endswith("775"):
            dcm2niix_file.chmod(0o775)
        else:
            print(f"dcm2niix already has execute permissions")

    # If all was successful, launch the GUI
    app.setWindowIcon(QIcon(str(project_dir / "media" /
                                "ExploreASL_logo.ico")))
    chdir(project_dir / "src")

    # Memory cleanup
    del project_dir

    main_win = xASL_MainWin(master_config)
    main_win.show()
    sys.exit(app.exec_())
示例#17
0
    def get_nth_level_dirs(self, dir_type: str, level: int):
        """
        :param dir_type: whether this is a subject, visit, run or scan
        :param level: which lineedit, in python index terms, emitted this signal
        """
        # Requirements to proceed
        if any([
                self.rawdir == "", not Path(self.rawdir).exists(),
                not Path(self.rawdir).is_dir()
        ]):
            return

        # Check if a reset is needed
        self.check_if_reset_needed()

        # If this was a clearing, the dir_type will be an empty string and the function should exit after any resetting
        # has been performed
        if dir_type == '':
            return

        try:
            delimiter = "\\" if system() == "Windows" else "/"
            glob_string = delimiter.join(["*"] * (level + 1))
            paths = [(str(direc), str(direc.name))
                     for direc in Path(self.rawdir).glob(glob_string)]
            directories, basenames = zip(*paths)

        except ValueError:
            robust_qmsg(self,
                        title=self.import_errs["ImpossibleDirDepth"][0],
                        body=self.import_errs["ImpossibleDirDepth"][1])
            # Clear the appropriate lineedit that called this function after the error message
            list(self.levels.values())[level].clear()
            return

        # Do not proceed if no directories were found and clear the linedit that emitted the textChanged signal
        if len(directories) == 0:
            idx = list(self.levels.keys())[level]
            print(f"{idx=}")
            self.levels[idx].clear()
            return

        # Otherwise, make the appropriate adjustment depending on which label was dropped in
        if dir_type == "Subject":
            self.subject_regex = self.infer_regex(list(basenames))
            print(f"Subject regex: {self.subject_regex}")
            del directories, basenames

        elif dir_type == "Visit":
            self.visit_regex = self.infer_regex(list(basenames))
            print(f"Visit regex: {self.visit_regex}")
            del directories, basenames

        elif dir_type == "Run":
            self.run_regex = self.infer_regex(list(set(basenames)))
            print(f"Run regex: {self.run_regex}")
            self.reset_run_aliases(basenames=list(set(basenames)))
            del directories, basenames

        elif dir_type == "Scan":
            self.scan_regex = self.infer_regex(list(set(basenames)))
            print(f"Scan regex: {self.scan_regex}")
            self.reset_scan_alias_cmbs(basenames=sorted(set(basenames)))
            del directories, basenames

        elif dir_type == "Dummy":
            del directories, basenames
            return

        else:
            del directories, basenames
            print("Error. This should never print")
            return
    def load_exploreasl_data(self):
        # Cautionary measures
        from src.xASL_GUI_Plotting import xASL_Plotting
        self.parent_cw: xASL_Plotting
        stats_dir = Path(self.parent_cw.le_analysis_dir.text()) / "Population" / "Stats"
        if any([not stats_dir.exists(), not stats_dir.is_dir(), len(list(stats_dir.glob("*.tsv"))) == 0]):
            robust_qmsg(self.parent_cw, title=self.parent_cw.plot_errs["BadStudyDir"][0],
                        body=self.parent_cw.plot_errs["BadStudyDir"][1])
            return
        print("Loading in Data")
        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        # First Section - Load in the ExploreASL Stats directory data
        atlas = self.atlas_guide[self.parent_cw.cmb_atlas_selection.currentText()]
        pvc = {"With PVC": "PVC2", "Without PVC": "PVC0"}[self.parent_cw.cmb_pvc_selection.currentText()]
        stat = {"Mean": "mean", "Median": "median",
                "Coefficient of Variation": "CoV"}[self.parent_cw.cmb_stats_selection.currentText()]

        # Clearing of appropriate widgets to accomodate new data
        self.parent_cw.lst_varview.clear()
        # Extract each as a dataframe and merge them
        pat_gm = f'{stat}_*_TotalGM*{pvc}.tsv'
        pat_wm = f'{stat}_*_DeepWM*{pvc}.tsv'
        pat_atlas = f'{stat}_*_{atlas}*{pvc}.tsv'
        dfs = []
        for pattern in [pat_gm, pat_wm, pat_atlas]:
            try:
                file = next(stats_dir.glob(pattern))
            except StopIteration:
                continue
            df = pd.read_csv(file, sep='\t')
            df.drop(0, axis=0, inplace=True)  # First row is unnecessary
            df = df.loc[:, [col for col in df.columns if "Unnamed" not in col]]
            dfs.append(df)
        if len(dfs) == 0:
            robust_qmsg(self.parent_cw, title="No Relevant Dataframes Found",
                        body="Could not locate any of the indicated atlas/pvc/stat .tsv files in the Stats directory "
                             "of this study. Has the user run the Population Module? If not, please run that module "
                             "before re-attempting.")
            return
        df: pd.DataFrame = pd.concat(dfs, axis=1)
        df = df.T.drop_duplicates().T

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        # Second Section - Fix the ExploreASL native data dtypes
        for col in df.columns:
            if col in self.dtype_guide.keys():
                df[col] = df[col].astype(self.dtype_guide[col])
            else:
                df[col] = df[col].astype("float64")
        self.loaded_wide_data = df
        self.backup_data = df.copy()

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        # Third Section - If there is any ancillary data specified, load it in
        meta_path = Path(self.parent_cw.le_metadata.text())
        if all([meta_path.exists(), meta_path.is_file(), meta_path.suffix in [".tsv", ".csv", ".xlsx"]]):
            result = self.load_ancillary_data(df)
            if result is not None:
                self.loaded_wide_data = result
            # If the merging failed, default to just using the ExploreASL datasets. In a future update, add some
            # sort of user feedback that this went wrong
            else:
                self.loaded_wide_data = self.backup_data

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        # Fourth Section - Convert the wide format data into a long format
        vars_to_keep_constant = [col for col in self.loaded_wide_data.columns if not any([col.endswith("_B"),
                                                                                          col.endswith("_L"),
                                                                                          col.endswith("_R")])]
        vars_to_melt = [col for col in self.loaded_wide_data.columns if col not in vars_to_keep_constant]
        self.loaded_long_data = self.loaded_wide_data.melt(id_vars=vars_to_keep_constant,
                                                           value_vars=vars_to_melt,
                                                           var_name="Atlas Location",
                                                           value_name="CBF")
        self.loaded_long_data["CBF"] = self.loaded_long_data["CBF"].astype("float64")
        atlas_location = self.loaded_long_data.pop("Atlas Location")
        atlas_loc_df: pd.DataFrame = atlas_location.str.extract("(.*)_(B|L|R)", expand=True)
        atlas_loc_df.rename(columns={0: "Anatomical Area", 1: "Side of the Brain"}, inplace=True)
        atlas_loc_df["Side of the Brain"] = atlas_loc_df["Side of the Brain"].apply(lambda x: {"B": "Bilateral",
                                                                                               "R": "Right",
                                                                                               "L": "Left"}[x])
        atlas_loc_df = atlas_loc_df.astype("category")
        self.loaded_long_data: pd.DataFrame = pd.concat([self.loaded_long_data, atlas_loc_df], axis=1)
        self.loaded_long_data = self.loaded_long_data.infer_objects()
        self.current_dtypes = self.loaded_long_data.dtypes
        self.current_dtypes = {col: str(str_name) for col, str_name in
                               zip(self.current_dtypes.index, self.current_dtypes.values)}
        self.parent_cw.lst_varview.addItems(self.loaded_long_data.columns.tolist())

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        # Fifth Section - Subset the data accordingly if the criteria is set
        self.parent_cw.subsetter.update_subsetable_fields_on_load(self.loaded_long_data)
        self.loaded_long_data: pd.DataFrame = self.parent_cw.subsetter.subset_data_on_load(self.loaded_long_data)

        # %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
        # Sixth Section - Housekeeping and Finishing touches
        # Alter this when Section 5 is completed; long_data is the "good copy" of the data that will be plotted
        self.long_data_orig = self.loaded_long_data.copy()  # THIS IS THE TARGET OF SUBSETTING
        self.long_data = self.loaded_long_data.copy()  # THIS IS OVERWRITTEN BY SUBSETTING THE ORIGINAL

        # Allow to dtype indicator to be aware of the newly loaded data if a legitimate covariates file was provided
        if all([meta_path.exists(), meta_path.is_file(), meta_path.suffix in [".tsv", ".csv", ".xlsx"]]):
            self.parent_cw.dtype_indicator.update_known_covariates(self.long_data)
            self.parent_cw.btn_indicate_dtype.setEnabled(True)
            for cmb in self.parent_cw.dtype_indicator.covariate_cols.values():
                cmb.activate()
                cmb.signal_sendupdateddtype.connect(self.update_datatype)
            print("Connected dtype indicator to subsetter")

        self.parent_cw.cmb_figuretypeselection.setEnabled(True)  # Data is loaded; figure selection settings enabled
        self.parent_cw.btn_subset_data.setEnabled(True)  # Data is loaded; subsetting is allowed

        # In case any of this was done again (data was already loaded once before), we must account for what may
        # have already been plotted or set; everything must be cleared. This should be as easy as setting the
        # figureselection to the first index, as plots & settings can only exist if its current index is non-zero,
        # and setting it to zero has the benefit of clearing everything else already
        if self.parent_cw.cmb_figuretypeselection.currentIndex() != 0:
            self.parent_cw.cmb_figuretypeselection.setCurrentIndex(0)
        print(f"DATAFRAME SHAPE UPON LOADING: {self.long_data.shape}")