class TestClinicalDataView: """ Class to set up variables required for testing the Clinical Data view. """ __test__ = False def __init__(self): # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'testdata') # list of DICOM test files selected_files = find_DICOM_files(desired_path) # file path of DICOM files self.file_path = os.path.dirname(os.path.commonprefix(selected_files)) read_data_dict, file_names_dict = \ ImageLoading.get_datasets(selected_files) # Create patient dict container object self.patient_dict_container = PatientDictContainer() self.patient_dict_container.clear() self.patient_dict_container.set_initial_values(self.file_path, read_data_dict, file_names_dict) self.file_path = self.patient_dict_container.path self.file_path = Path(self.file_path).joinpath("Clinical-Data-SR.dcm") # Test data to write self.data = [['123456789', 'Jim', 'Jimson']]
def __init__(self): # Load test DICOM files and set path variable path = Path.cwd().joinpath('test', 'testdata') files = get_dicom_files(path) # list of DICOM test files file_path = os.path.dirname(os.path.commonprefix(files)) read_data_dict, file_names_dict = ImageLoading.get_datasets(files) # Create patient dict container object patient_dict_container = PatientDictContainer() patient_dict_container.clear() patient_dict_container.set_initial_values(file_path, read_data_dict, file_names_dict) # Set additional attributes in patient dict container # This prevents crashes if "rtss" in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) self.rois = ImageLoading.get_roi_info(dataset_rtss) patient_dict_container.set("rois", self.rois) # Open the main window self.main_window = MainWindow() self.main_window.right_panel.setCurrentWidget( self.main_window.dicom_tree) self.dicom_tree = self.main_window.dicom_tree
def __init__(self): # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'testdata') selected_files = find_DICOM_files(desired_path) # list of DICOM test files file_path = os.path.dirname(os.path.commonprefix(selected_files)) # file path of DICOM files read_data_dict, file_names_dict = ImageLoading.get_datasets(selected_files) # Create patient dict container object patient_dict_container = PatientDictContainer() patient_dict_container.clear() patient_dict_container.set_initial_values(file_path, read_data_dict, file_names_dict) # Set additional attributes in patient dict container (otherwise program will crash and test will fail) if "rtss" in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) self.rois = ImageLoading.get_roi_info(dataset_rtss) dict_raw_contour_data, dict_numpoints = ImageLoading.get_raw_contour_data(dataset_rtss) dict_pixluts = ImageLoading.get_pixluts(read_data_dict) patient_dict_container.set("rois", self.rois) patient_dict_container.set("raw_contour", dict_raw_contour_data) patient_dict_container.set("num_points", dict_numpoints) patient_dict_container.set("pixluts", dict_pixluts) # Open the main window self.main_window = MainWindow()
def test_save_radiomics_data(): """ Test for saving pyradiomics data to a DICOM SR file. """ # Get test data files # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'testdata') # list of DICOM test files selected_files = find_DICOM_files(desired_path) # file path of DICOM files file_path = os.path.dirname(os.path.commonprefix(selected_files)) read_data_dict, file_names_dict = \ ImageLoading.get_datasets(selected_files) # Create patient dict container object patient_dict_container = PatientDictContainer() patient_dict_container.clear() patient_dict_container.set_initial_values(file_path, read_data_dict, file_names_dict) file_path = patient_dict_container.path file_path = Path(file_path).joinpath("PyRadiomics-SR.dcm") ds = patient_dict_container.dataset[0] dicom_sr = DICOMStructuredReport.generate_dicom_sr(file_path, ds, "text", "PYRADIOMICS") dicom_sr.save_as(file_path) # Assert that the new SR exists assert os.path.isfile(file_path) # Delete the created DICOM SR os.remove(file_path)
def cleanup(self): patient_dict_container = PatientDictContainer() patient_dict_container.clear() # Close 3d vtk widget self.three_dimension_view.close() self.cleanup_image_fusion() self.cleanup_pt_ct_viewer()
class TestSuv2Roi: """ Class to set up the OnkoDICOM main window for testing SUV2ROI functionality. Assumes there is test data containing PET CTAC files, in /test/pt-testdata/. Tests will all fail without this data. """ __test__ = False def __init__(self): # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'pet-testdata') # list of DICOM test files selected_files = find_dicom_files(desired_path) # file path of DICOM files file_path = os.path.dirname(os.path.commonprefix(selected_files)) read_data_dict, file_names_dict = \ ImageLoading.get_datasets(selected_files) # Create patient dict container object self.patient_dict_container = PatientDictContainer() self.patient_dict_container.clear() self.patient_dict_container.set_initial_values \ (file_path, read_data_dict, file_names_dict) # Create variables to be initialised later self.dicom_files = None self.suv_data = [] # Create SUV2ROI object self.suv2roi = SUV2ROI() # Set patient weight. Not actual weight, just for testing # purposes. self.suv2roi.patient_weight = 70000
class TestSuv2RoiGui: """ Class that initializes an OnkoDICOM window for testing SUV2ROI GUI. This uses files from the pet-testdata directory. """ __test__ = False def __init__(self): # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'pet-testdata') # List of DICOM test files selected_files = find_DICOM_files(desired_path) # File path of DICOM files file_path = os.path.dirname(os.path.commonprefix(selected_files)) read_data_dict, file_names_dict = \ ImageLoading.get_datasets(selected_files) # Create patient dict container object self.patient_dict_container = PatientDictContainer() self.patient_dict_container.clear() self.patient_dict_container.set_initial_values \ (file_path, read_data_dict, file_names_dict) # Set additional attributes in patient dict container # (otherwise program will crash and test will fail) self.patient_dict_container.set("existing_rtss_files", []) if "rtss" in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) self.rois = ImageLoading.get_roi_info(dataset_rtss) dict_raw_contour_data, dict_numpoints = \ ImageLoading.get_raw_contour_data(dataset_rtss) dict_pixluts = ImageLoading.get_pixluts(read_data_dict) self.patient_dict_container.set("rois", self.rois) self.patient_dict_container.set("raw_contour", dict_raw_contour_data) self.patient_dict_container.set("num_points", dict_numpoints) self.patient_dict_container.set("pixluts", dict_pixluts) else: img_loader = ImageLoader(selected_files, None, None) img_loader.load_temp_rtss(file_path, DummyProgressWindow, DummyProgressWindow) # Open the main window self.main_window = MainWindow() # Get the initial structure and ROI count self.initial_structure_count = \ self.main_window.structures_tab.layout_content.count() self.initial_roi_count = len(self.main_window.structures_tab.rois)
def __init__(self): # Load test DICOM files if platform.system() == "Windows": desired_path = "\\testdata\\DICOM-RT-TEST" elif platform.system() == "Linux" or platform.system() == "Darwin": desired_path = "/testdata/DICOM-RT-TEST" desired_path = os.path.dirname( os.path.realpath(__file__)) + desired_path selected_files = find_DICOM_files( desired_path) # list of DICOM test files file_path = os.path.dirname( os.path.commonprefix(selected_files)) # file path of DICOM files read_data_dict, file_names_dict = ImageLoading.get_datasets( selected_files) # Create patient dict container object patient_dict_container = PatientDictContainer() patient_dict_container.clear() patient_dict_container.set_initial_values(file_path, read_data_dict, file_names_dict) # Set additional attributes in patient dict container (otherwise program will crash and test will fail) if "rtss" in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) self.rois = ImageLoading.get_roi_info(dataset_rtss) dict_raw_contour_data, dict_numpoints = ImageLoading.get_raw_contour_data( dataset_rtss) dict_pixluts = ImageLoading.get_pixluts(read_data_dict) patient_dict_container.set("rois", self.rois) patient_dict_container.set("raw_contour", dict_raw_contour_data) patient_dict_container.set("num_points", dict_numpoints) patient_dict_container.set("pixluts", dict_pixluts) # Open the main window self.main_window = MainWindow() self.main_window.show() self.dicom_view = self.main_window.dicom_view self.new_polygons = {} slider_id = self.dicom_view.slider.value() self.curr_slice = self.dicom_view.patient_dict_container.get( "dict_uid")[slider_id]
class TestIso2Roi: """ Class to set up the OnkoDICOM main window for testing ISO2ROI functionality. """ __test__ = False def __init__(self): # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'testdata') # list of DICOM test files selected_files = find_DICOM_files(desired_path) # file path of DICOM files file_path = os.path.dirname(os.path.commonprefix(selected_files)) read_data_dict, file_names_dict = \ ImageLoading.get_datasets(selected_files) # Create patient dict container object self.patient_dict_container = PatientDictContainer() self.patient_dict_container.clear() self.patient_dict_container.set_initial_values\ (file_path, read_data_dict, file_names_dict) # Set additional attributes in patient dict container # (otherwise program will crash and test will fail) if "rtss" in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) self.rois = ImageLoading.get_roi_info(dataset_rtss) dict_raw_contour_data, dict_numpoints = \ ImageLoading.get_raw_contour_data(dataset_rtss) dict_pixluts = ImageLoading.get_pixluts(read_data_dict) self.patient_dict_container.set("rois", self.rois) self.patient_dict_container.set("raw_contour", dict_raw_contour_data) self.patient_dict_container.set("num_points", dict_numpoints) self.patient_dict_container.set("pixluts", dict_pixluts) # Set location of rtss file file_paths = self.patient_dict_container.filepaths self.patient_dict_container.set("file_rtss", file_paths['rtss']) # Create ISO2ROI object self.iso2roi = ISO2ROI()
class TestIsodosesTab: """ Class to set up the OnkoDICOM main window for testing the structures tab. """ __test__ = False def __init__(self): # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'testdata') # List of DICOM test files selected_files = find_DICOM_files(desired_path) # File path of DICOM files file_path = os.path.dirname(os.path.commonprefix(selected_files)) read_data_dict, file_names_dict = \ ImageLoading.get_datasets(selected_files) # Create patient dict container object self.patient_dict_container = PatientDictContainer() self.patient_dict_container.clear() self.patient_dict_container.set_initial_values\ (file_path, read_data_dict, file_names_dict) # Set additional attributes in patient dict container # (otherwise program will crash and test will fail) if "rtss" in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) self.rois = ImageLoading.get_roi_info(dataset_rtss) dict_raw_contour_data, dict_numpoints = \ ImageLoading.get_raw_contour_data(dataset_rtss) dict_pixluts = ImageLoading.get_pixluts(read_data_dict) self.patient_dict_container.set("rois", self.rois) self.patient_dict_container.set("raw_contour", \ dict_raw_contour_data) self.patient_dict_container.set("num_points", \ dict_numpoints) self.patient_dict_container.set("pixluts", dict_pixluts) # Open the main window self.main_window = MainWindow()
class TestDvh2RtDose: """ This class is to set up data and variables needed for the DVH2RTDOSE functionality. """ __test__ = False def __init__(self): self.dvh_data = None # Load test DICOM files desired_path = Path.cwd().joinpath('test', 'testdata') selected_files = find_DICOM_files(desired_path) file_path = os.path.dirname(os.path.commonprefix(selected_files)) read_data_dict, file_names_dict = \ ImageLoading.get_datasets(selected_files) # Create patient dict container object self.patient_dict_container = PatientDictContainer() self.patient_dict_container.clear() self.patient_dict_container.set_initial_values(file_path, read_data_dict, file_names_dict)
class BatchProcessPyRad2PyRadSR(BatchProcess): """ This class handles batch processing for the PyRad2PyRad-SR process. Inherits from the BatchProcess class. """ # Allowed classes for PyRadCSV allowed_classes = { # RT Structure Set "1.2.840.10008.5.1.4.1.1.481.3": { "name": "rtss", "sliceable": False }, # RT Dose "1.2.840.10008.5.1.4.1.1.481.2": { "name": "rtdose", "sliceable": False }, } def __init__(self, progress_callback, interrupt_flag, patient_files): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. :param output_path: output of the resulting .csv file. """ # Call the parent class super(BatchProcessPyRad2PyRadSR, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = 'rtss'.split() self.ready = self.load_images(patient_files, self.required_classes) self.output_path = "" def start(self): """ Goes through the steps of the PyRad2Pyrad-SR conversion. """ # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not self.ready: self.summary = "SKIP" return False rtss_path = self.patient_dict_container.filepaths.get('rtss') patient_id = self.patient_dict_container.dataset.get('rtss').PatientID patient_id = Radiomics.clean_patient_id(patient_id) patient_path = self.patient_dict_container.path file_name = Radiomics.clean_patient_id(patient_id) + '.nrrd' patient_nrrd_folder_path = patient_path + '/nrrd/' patient_nrrd_file_path = patient_nrrd_folder_path + file_name output_csv_path = patient_path + '/CSV/' # If folder does not exist if not os.path.exists(patient_nrrd_folder_path): # Create folder os.makedirs(patient_nrrd_folder_path) # If folder does not exist if not os.path.exists(output_csv_path): # Create folder os.makedirs(output_csv_path) self.progress_callback.emit(("Converting dicom to nrrd..", 25)) # Convert dicom files to nrrd for pyradiomics processing Radiomics.convert_to_nrrd(patient_path, patient_nrrd_file_path) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Location of folder where converted masks saved mask_folder_path = patient_nrrd_folder_path + 'structures' if not os.path.exists(mask_folder_path): os.makedirs(mask_folder_path) self.progress_callback.emit(("Converting ROIs to nrrd..", 45)) # Convert ROIs to nrrd Radiomics.convert_rois_to_nrrd(patient_path, rtss_path, mask_folder_path) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False self.progress_callback.emit(("Running pyradiomics..", 70)) # Run pyradiomics, convert to dataframe radiomics_df = Radiomics.get_radiomics_df(patient_path, patient_id, patient_nrrd_file_path, mask_folder_path) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if radiomics_df is None: self.summary = "PYRAD_NO_DF" return False # Convert the dataframe to CSV file self.progress_callback.emit(("Converting to CSV..", 85)) Radiomics.convert_df_to_csv(radiomics_df, output_csv_path, patient_id) # Convert resulting CSV to DICOM-SR self.progress_callback.emit(("Exporting to DICOM-SR..", 90)) self.export_to_sr(output_csv_path, patient_id) # Delete CSV file and NRRD folder shutil.rmtree(patient_nrrd_folder_path) os.remove(output_csv_path + 'Pyradiomics_' + patient_id + '.csv') return True def export_to_sr(self, csv_path, patient_hash): """ Save CSV data into DICOM SR. Reads in CSV data and saves it to a DICOM SR file :param csv_path: the path that the CSV has been saved to. :param patient_hash: the patient's hash as a string. """ # Check CSV path exists file_path = Path(csv_path).joinpath('Pyradiomics_' + patient_hash + ".csv") if file_path == "": return # Get CSV data with open(file_path, newline="") as stream: data = list(csv.reader(stream)) # Write raw CSV data to DICOM SR text = "" for line in data: for item in line: text += str(item) + "," text += "\n" # Create and save DICOM SR file file_path = self.patient_dict_container.path file_path = Path(file_path).joinpath("Pyradiomics-SR.dcm") ds = next(iter(self.patient_dict_container.dataset.values())) dicom_sr = DICOMStructuredReport.generate_dicom_sr( file_path, ds, text, "PYRADIOMICS") dicom_sr.save_as(file_path) # Update patient dict container self.patient_dict_container.dataset['sr-pyrad'] = dicom_sr self.patient_dict_container.filepaths['sr-pyrad'] = file_path
class BatchProcessClinicalDataSR2CSV(BatchProcess): """ This class handles batch processing for the Clinical Data 2 CSV process. Inherits from the BatchProcessing class. """ # Allowed classes for ClinicalDataSR2CSV allowed_classes = { # Comprehensive SR "1.2.840.10008.5.1.4.1.1.88.33": { "name": "sr", "sliceable": False } } def __init__(self, progress_callback, interrupt_flag, patient_files, output_path): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. :param output_path: Path of the output CSV file. """ # Call the parent class super(BatchProcessClinicalDataSR2CSV, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = ['sr'] self.ready = self.load_images(patient_files, self.required_classes) self.output_path = output_path def start(self): """ Goes through the steps of the ClinicalData-SR2CSV conversion. :return: True if successful, False if not. """ # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not self.ready: self.summary = "SKIP" return False # See if SR contains clinical data self.progress_callback.emit(("Checking SR file...", 20)) cd_sr = self.find_clinical_data_sr() if cd_sr is None: self.summary = "CD_NO_SR" return False # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Read in clinical data from SR self.progress_callback.emit(("Reading clinical data...", 50)) data_dict = self.read_clinical_data_from_sr(cd_sr) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Write clinical data to CSV self.progress_callback.emit(("Writing clinical data to CSV...", 80)) self.write_to_csv(data_dict) return True def find_clinical_data_sr(self): """ Searches the patient dict container for any SR files containing clinical data. Returns the first SR with clinical data found. :return: ds, SR dataset containing clinical data, or None if nothing found. """ datasets = self.patient_dict_container.dataset for ds in datasets: # Check for SR files if datasets[ds].SOPClassUID == '1.2.840.10008.5.1.4.1.1.88.33': # Check to see if it is a clinical data SR if datasets[ds].SeriesDescription == "CLINICAL-DATA": return datasets[ds] return None def read_clinical_data_from_sr(self, sr_cd): """ Reads clinical data from the found SR file. :param sr_cd: the clinical data SR dataset. :return: dictionary of clinical data, where keys are attributes and values are data. """ data = sr_cd.ContentSequence[0].TextValue data_dict = {} data_list = data.split("\n") for row in range(len(data_list)): if data_list[row] == '': continue # Assumes neither data nor attributes have colons row_data = data_list[row].split(":") data_dict[row_data[0]] = row_data[1][1:] return data_dict def write_to_csv(self, data_dict): """ Append data to the clinical data CSV file. Create it if it doesn't exist. Assumes that all data dicts have the same keys in the same order and that SR files were generated by OnkODICOM. Data will still write, but be jumbled if this is not the case. It is recommended that functionality to make data writing consistent is implemented in the future, however this requires that SR files generated by OnkoDICOM are made to be far more structured than they currently are. :param data_dict: dictionary of clinical data, where keys are attributes and values are data. """ attribs = [] values = [] # Put keys and values into separate lists for attrib in data_dict: attribs.append(attrib) values.append(data_dict[attrib]) # File path path = Path(self.output_path).joinpath("ClinicalData.csv") # Set whether we need to write the header or not write_header = False if not os.path.exists(path): write_header = True # Write to CSV with open(path, 'a', newline="") as stream: writer = csv.writer(stream) if write_header: writer.writerow(attribs) writer.writerow(values)
class BatchProcessROINameCleaning(BatchProcess): """ This class handles batch processing for the ROI Name Cleaning process. Inherits from the BatchProcess class. """ # Allowed classes for ROI Name Cleaning allowed_classes = { # RT Structure Set "1.2.840.10008.5.1.4.1.1.481.3": { "name": "rtss", "sliceable": False } } def __init__(self, progress_callback, interrupt_flag, roi_options): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param roi_options: Dictionary of ROI names and what is to be done to them """ # Call the parent class super(BatchProcessROINameCleaning, self).__init__(progress_callback, interrupt_flag, roi_options) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = ['rtss'] self.roi_options = roi_options def start(self): """ Goes through the steps of the ROI Name Cleaning process. :return: True if successful, False if not. """ # Stop loading if self.interrupt_flag.is_set(): # TODO: convert print to logging print("Stopped Batch ROI Name Cleaning") self.patient_dict_container.clear() self.summary = "Batch ROI Name Cleaning was interrupted." return False self.summary = "==Batch ROI Name Cleaning==\n" step = len(self.roi_options) / 100 progress = 0 # Loop through each dataset for roi in self.roi_options: # Stop loading if self.interrupt_flag.is_set(): # TODO: convert print to logging print("Stopped Batch ROI Name Cleaning") self.patient_dict_container.clear() self.summary = "Batch ROI Name Cleaning was interrupted." return False self.summary += "ROI: " + roi + "\n" roi_step = len(self.roi_options[roi]) / step progress += roi_step self.progress_callback.emit(("Cleaning ROIs...", progress)) # Append dataset locations to summary self.summary += "Dataset(s): " for ds in self.roi_options[roi]: self.summary += ds[2] + ", " self.summary += "\n" # Loop through each dataset in the ROI for info in self.roi_options[roi]: # If ignore if info[0] == 0: self.summary += "Process: Ignored\n\n" continue # Rename elif info[0] == 1: old_name = roi new_name = info[1] self.rename(info[2], old_name, new_name) self.summary += "Process: Renamed from \'" + old_name \ + "\' to \'" + new_name + "\'\n\n" # Delete elif info[0] == 2: name = roi rtss = dcmread(info[2]) rtss = ROI.delete_roi(rtss, name) rtss.save_as(info[2]) self.summary += "Process: Deleted\n\n" return True def rename(self, dataset, old_name, new_name): """ Rename an ROI in an RTSS. :param dataset: file path of the RT Struct to work on. :param old_name: old ROI name to change. :param new_name: name to change the ROI to. """ # Load dataset rtss = dcmread(dataset) # Find ROI with old name roi_id = None for sequence in rtss.StructureSetROISequence: if sequence.ROIName == old_name: roi_id = sequence.ROINumber break # Return if not found if not roi_id: return # Change name of ROI to new name rtss = ROI.rename_roi(rtss, roi_id, new_name) # Save dataset rtss.save_as(dataset)
class BatchProcessCSV2ClinicalDataSR(BatchProcess): """ This class handles batch processing for the CSV2ClinicalData-SR process. Inherits from the BatchProcess class. """ # Allowed classes for CSV2ClinicalDataSR allowed_classes = { # CT Image "1.2.840.10008.5.1.4.1.1.2": { "name": "ct", "sliceable": True }, # PET Image "1.2.840.10008.5.1.4.1.1.128": { "name": "pet", "sliceable": True }, # RT Dose "1.2.840.10008.5.1.4.1.1.481.2": { "name": "rtdose", "sliceable": False } } def __init__(self, progress_callback, interrupt_flag, patient_files, input_path): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. :param output_path: Path of the input CSV file. """ # Call the parent class super(BatchProcessCSV2ClinicalDataSR, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = ['ct', 'rtdose'] self.required_classes_2 = ['pet', 'rtdose'] # Only need one of either ct or pet (and rtdose) self.ready = False ready = self.load_images(patient_files, self.required_classes) if ready: self.ready = True else: self.ready = \ self.load_images(patient_files, self.required_classes_2) self.input_path = input_path def start(self): """ Goes through the steps of the CSV2ClinicalData-SR conversion. :return: True if successful, False if not. """ # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not self.ready: self.summary = "SKIP" return # Import CSV data self.progress_callback.emit(("Importing CSV data...", 60)) data_dict = self.import_clinical_data() if data_dict is None: self.summary = "CSV_NO_PATIENT" return # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Save clinical data to an SR self.progress_callback.emit( ("Exporting Clinical Data to DICOM-SR...", 90)) self.save_clinical_data(data_dict) return True def import_clinical_data(self): """ Attempt to import clinical data from the CSV stored in the program's settings database. """ # Clear data dictionary and table data_dict = {} # Current patient's ID patient_dict_container = PatientDictContainer() patient_id = patient_dict_container.dataset[0].PatientID # Check that the clinical data CSV exists, load data if so if self.input_path == "" or self.input_path is None \ or not os.path.exists(self.input_path): return None with open(self.input_path, newline="") as stream: data = list(csv.reader(stream)) # See if CSV data matches patient ID patient_in_file = False row_num = 0 for i, row in enumerate(data): if row[0] == patient_id: patient_in_file = True row_num = i break # Return if patient's data not in the CSV file if not patient_in_file: return None # Put patient data into dictionary headings = data[0] attribs = data[row_num] for i, heading in enumerate(headings): data_dict[heading] = attribs[i] # Return clinical data dictionary return data_dict def save_clinical_data(self, data_dict): """ Saves clinical data to a DICOM-SR file. Overwrites any existing clinical data SR files in the dataset. """ # Create string from clinical data dictionary text = "" for key in data_dict: text += str(key) + ": " + str(data_dict[key]) + "\n" # Create and save DICOM SR file file_path = self.patient_dict_container.path file_path = Path(file_path).joinpath("Clinical-Data-SR.dcm") ds = self.patient_dict_container.dataset[0] dicom_sr = DICOMStructuredReport.generate_dicom_sr( file_path, ds, text, "CLINICAL-DATA") dicom_sr.save_as(file_path)
class BatchProcessISO2ROI(BatchProcess): """ This class handles batch processing for the ISO2ROI process. Inherits from the BatchProcess class. """ # Allowed classes for ISO2ROI allowed_classes = { # CT Image "1.2.840.10008.5.1.4.1.1.2": { "name": "ct", "sliceable": True }, # RT Structure Set "1.2.840.10008.5.1.4.1.1.481.3": { "name": "rtss", "sliceable": False }, # RT Dose "1.2.840.10008.5.1.4.1.1.481.2": { "name": "rtdose", "sliceable": False }, # RT Plan "1.2.840.10008.5.1.4.1.1.481.5": { "name": "rtplan", "sliceable": False } } def __init__(self, progress_callback, interrupt_flag, patient_files): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. """ # Call the parent class super(BatchProcessISO2ROI, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = ('ct', 'rtdose', 'rtplan') self.ready = self.load_images(patient_files, self.required_classes) def start(self): """ Goes through the steps of the ISO2ROI conversion. :return: True if successful, False if not. """ # Stop loading if self.interrupt_flag.is_set(): # TODO: convert print to logging print("Stopped ISO2ROI") self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not self.ready: self.summary = "SKIP" return False # Update progress self.progress_callback.emit(("Setting up...", 30)) # Initialise InitialModel.create_initial_model_batch() # Stop loading if self.interrupt_flag.is_set(): # TODO: convert print to logging print("Stopped ISO2ROI") self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Check if the dataset is complete self.progress_callback.emit(("Checking dataset...", 40)) dataset_complete = ImageLoading.is_dataset_dicom_rt( self.patient_dict_container.dataset) # Create ISO2ROI object iso2roi = ISO2ROI() self.progress_callback.emit(("Performing ISO2ROI... ", 50)) # Stop loading if self.interrupt_flag.is_set(): # TODO: convert print to logging print("Stopped ISO2ROI") self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not dataset_complete: # Check if RT struct file is missing. If yes, create one and # add its data to the patient dict container. Otherwise # return if not self.patient_dict_container.get("file_rtss"): self.progress_callback.emit(("Generating RT Struct", 55)) self.create_new_rtstruct(self.progress_callback) # Get isodose levels to turn into ROIs isodose_levels = \ iso2roi.get_iso_levels(data_path('batch_isodoseRoi.csv')) # Stop loading if self.interrupt_flag.is_set(): # TODO: convert print to logging print("Stopped ISO2ROI") self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Calculate boundaries self.progress_callback.emit(("Calculating boundaries...", 60)) boundaries = iso2roi.calculate_isodose_boundaries(isodose_levels) # Return if boundaries could not be calculated if not boundaries: print("Boundaries could not be calculated.") self.summary = "ISO_NO_RX_DOSE" return False # Generate ROIs self.progress_callback.emit(("Generating ROIs...", 80)) iso2roi.generate_roi(boundaries, self.progress_callback) # Save new RTSS self.progress_callback.emit(("Saving RT Struct...", 90)) self.save_rtss() return True
def load(self, interrupt_flag, progress_callback): """ :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param progress_callback: A signal that receives the current progress of the loading. :return: PatientDictContainer object containing all values related to the loaded DICOM files. """ progress_callback.emit(("Creating datasets...", 0)) try: path = os.path.dirname(os.path.commonprefix(self.selected_files)) # Gets the common root folder. read_data_dict, file_names_dict = ImageLoading.get_datasets(self.selected_files) except ImageLoading.NotAllowedClassError: raise ImageLoading.NotAllowedClassError # Populate the initial values in the PatientDictContainer singleton. patient_dict_container = PatientDictContainer() patient_dict_container.clear() patient_dict_container.set_initial_values(path, read_data_dict, file_names_dict) # As there is no way to interrupt a QRunnable, this method must check after every step whether or not the # interrupt flag has been set, in which case it will interrupt this method after the currently processing # function has finished running. It's not very pretty, and the thread will still run some functions for, in some # cases, up to a couple seconds after the close button on the Progress Window has been clicked, however it's # the best solution I could come up with. If you have a cleaner alternative, please make your contribution. if interrupt_flag.is_set(): print("stopped") return False if 'rtss' in file_names_dict and 'rtdose' in file_names_dict: self.parent_window.signal_advise_calc_dvh.connect(self.update_calc_dvh) self.signal_request_calc_dvh.emit() while not self.advised_calc_dvh: pass if 'rtss' in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) progress_callback.emit(("Getting ROI info...", 10)) rois = ImageLoading.get_roi_info(dataset_rtss) if interrupt_flag.is_set(): # Stop loading. print("stopped") return False progress_callback.emit(("Getting contour data...", 30)) dict_raw_contour_data, dict_numpoints = ImageLoading.get_raw_contour_data(dataset_rtss) # Determine which ROIs are one slice thick dict_thickness = ImageLoading.get_thickness_dict(dataset_rtss, read_data_dict) if interrupt_flag.is_set(): # Stop loading. print("stopped") return False progress_callback.emit(("Getting pixel LUTs...", 50)) dict_pixluts = ImageLoading.get_pixluts(read_data_dict) if interrupt_flag.is_set(): # Stop loading. print("stopped") return False # Add RTSS values to PatientDictContainer patient_dict_container.set("rois", rois) patient_dict_container.set("raw_contour", dict_raw_contour_data) patient_dict_container.set("num_points", dict_numpoints) patient_dict_container.set("pixluts", dict_pixluts) if 'rtdose' in file_names_dict and self.calc_dvh: dataset_rtdose = dcmread(file_names_dict['rtdose']) # Spawn-based platforms (i.e Windows and MacOS) have a large overhead when creating a new process, which # ends up making multiprocessing on these platforms more expensive than linear calculation. As such, # multiprocessing is only available on Linux until a better solution is found. fork_safe_platforms = ['Linux'] if platform.system() in fork_safe_platforms: progress_callback.emit(("Calculating DVHs...", 60)) raw_dvh = ImageLoading.multi_calc_dvh(dataset_rtss, dataset_rtdose, rois, dict_thickness) else: progress_callback.emit(("Calculating DVHs... (This may take a while)", 60)) raw_dvh = ImageLoading.calc_dvhs(dataset_rtss, dataset_rtdose, rois, dict_thickness, interrupt_flag) if interrupt_flag.is_set(): # Stop loading. print("stopped") return False progress_callback.emit(("Converging to zero...", 80)) dvh_x_y = ImageLoading.converge_to_0_dvh(raw_dvh) if interrupt_flag.is_set(): # Stop loading. print("stopped") return False # Add DVH values to PatientDictContainer patient_dict_container.set("raw_dvh", raw_dvh) patient_dict_container.set("dvh_x_y", dvh_x_y) patient_dict_container.set("dvh_outdated", False) return True else: return True return True
class BatchProcessDVH2CSV(BatchProcess): """ This class handles batch processing for the DVH2CSV process. Inherits from the BatchProcess class. """ # Allowed classes for ISO2ROI allowed_classes = { # RT Structure Set "1.2.840.10008.5.1.4.1.1.481.3": { "name": "rtss", "sliceable": False }, # RT Dose "1.2.840.10008.5.1.4.1.1.481.2": { "name": "rtdose", "sliceable": False }, } def __init__(self, progress_callback, interrupt_flag, patient_files, output_path): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. :param output_path: output of the resulting .csv file. """ # Call the parent class super(BatchProcessDVH2CSV, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = ('rtss', 'rtdose') self.ready = self.load_images(patient_files, self.required_classes) self.output_path = output_path self.filename = "DVHs_.csv" def start(self): """ Goes through the steps of the DVH2CSV conversion. :return: True if successful, False if not. """ # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not self.ready: self.summary = "SKIP" return False # Check if the dataset is complete self.progress_callback.emit(("Checking dataset...", 40)) # Attempt to get DVH data from RT Dose self.progress_callback.emit( ("Attempting to get DVH from RTDOSE...", 50)) # Get DVH data raw_dvh = CalculateDVHs.rtdose2dvh() # If there is DVH data dvh_outdated = True if bool(raw_dvh): incomplete = raw_dvh["diff"] raw_dvh.pop("diff") if not incomplete: dvh_outdated = False self.progress_callback.emit(("DVH data in RT Dose.", 80)) else: raw_dvh.pop("diff") if dvh_outdated: # Calculate DVH if not in RT Dose self.progress_callback.emit(("Calculating DVH...", 60)) read_data_dict = self.patient_dict_container.dataset dataset_rtss = self.patient_dict_container.dataset['rtss'] dataset_rtdose = self.patient_dict_container.dataset['rtdose'] rois = self.patient_dict_container.get("rois") try: dict_thickness = \ ImageLoading.get_thickness_dict(dataset_rtss, read_data_dict) raw_dvh = ImageLoading.calc_dvhs(dataset_rtss, dataset_rtdose, rois, dict_thickness, self.interrupt_flag) except TypeError: self.summary = "DVH_TYPE_ERROR" return False # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Export DVH to CSV self.progress_callback.emit(("Exporting DVH to CSV...", 90)) # Get path to save to path = self.output_path + '/CSV/' # Get patient ID patient_id = self.patient_dict_container.dataset['rtss'].PatientID # Make CSV directory if it doesn't exist if not os.path.isdir(path): os.mkdir(path) # Save the DVH to a CSV file self.progress_callback.emit(("Exporting DVH to RT Dose...", 95)) self.dvh2csv(raw_dvh, path, self.filename, patient_id) # Save the DVH to the RT Dose CalculateDVHs.dvh2rtdose(raw_dvh) return True def dvh2csv(self, dict_dvh, path, csv_name, patient_id): """ Export dvh data to csv file. Append to existing file :param dict_dvh: A dictionary of DVH {ROINumber: DVH} :param path: Target path of CSV export :param csv_name: CSV file name :param patient_id: Patient Identifier """ # full path of the target csv file tar_path = path + csv_name create_header = not os.path.isfile(tar_path) dvh_csv_list = [] csv_header = [] csv_header.append('Patient ID') csv_header.append('ROI') csv_header.append('Volume (mL)') max_roi_dose = 0 for i in dict_dvh: dvh_roi_list = [] dvh = dict_dvh[i] name = dvh.name volume = dvh.volume dvh_roi_list.append(patient_id) dvh_roi_list.append(name) dvh_roi_list.append(volume) dose = dvh.relative_volume.counts for i in range(0, len(dose), 10): dvh_roi_list.append(dose[i]) # Update the maximum dose value, if current dose # exceeds the current maximum dose if i > max_roi_dose: max_roi_dose = i dvh_csv_list.append(dvh_roi_list) for i in range(0, max_roi_dose + 1, 10): csv_header.append(str(i) + 'cGy') # Convert the list into pandas dataframe, with 2 digit rounding. pddf_csv = pd.DataFrame(dvh_csv_list, columns=csv_header).round(2) # Fill empty blocks with 0.0 pddf_csv.fillna(0.0, inplace=True) pddf_csv.set_index('Patient ID', inplace=True) # Convert and export pandas dataframe to CSV file pddf_csv.to_csv(tar_path, mode='a', header=create_header) def set_filename(self, name): if name != '': self.filename = name else: self.filename = "DVHs_.csv"
class BatchProcessPyRad2CSV(BatchProcess): """ This class handles batch processing for the PyRadCSV process. Inherits from the BatchProcess class. """ # Allowed classes for PyRadCSV allowed_classes = { # RT Structure Set "1.2.840.10008.5.1.4.1.1.481.3": { "name": "rtss", "sliceable": False }, # RT Dose "1.2.840.10008.5.1.4.1.1.481.2": { "name": "rtdose", "sliceable": False }, } def __init__(self, progress_callback, interrupt_flag, patient_files, output_path): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. :param output_path: output of the resulting .csv file. """ # Call the parent class super(BatchProcessPyRad2CSV, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = 'rtss'.split() self.ready = self.load_images(patient_files, self.required_classes) self.output_path = output_path self.filename = "Pyradiomics_.csv" def start(self): """ Goes through the steps of the PyRadCSV conversion. """ # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not self.ready: self.summary = "SKIP" return False rtss_path = self.patient_dict_container.filepaths.get('rtss') patient_id = self.patient_dict_container.dataset.get('rtss').PatientID patient_id = Radiomics.clean_patient_id(patient_id) patient_path = self.patient_dict_container.path file_name = Radiomics.clean_patient_id(patient_id) + '.nrrd' patient_nrrd_folder_path = patient_path + '/nrrd/' patient_nrrd_file_path = patient_nrrd_folder_path + file_name output_csv_path = self.output_path.joinpath('CSV') # If folder does not exist if not os.path.exists(patient_nrrd_folder_path): # Create folder os.makedirs(patient_nrrd_folder_path) # If folder does not exist if not os.path.exists(output_csv_path): # Create folder os.makedirs(output_csv_path) self.progress_callback.emit(("Converting dicom to nrrd..", 25)) # Convert dicom files to nrrd for pyradiomics processing Radiomics.convert_to_nrrd(patient_path, patient_nrrd_file_path) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Location of folder where converted masks saved mask_folder_path = patient_nrrd_folder_path + 'structures' if not os.path.exists(mask_folder_path): os.makedirs(mask_folder_path) self.progress_callback.emit(("Converting ROIs to nrrd..", 45)) # Convert ROIs to nrrd Radiomics.convert_rois_to_nrrd( patient_path, rtss_path, mask_folder_path) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False self.progress_callback.emit(("Running pyradiomics..", 70)) # Run pyradiomics, convert to dataframe radiomics_df = Radiomics.get_radiomics_df( patient_path, patient_id, patient_nrrd_file_path, mask_folder_path) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if radiomics_df is None: self.summary = "PYRAD_NO_DF" return False # Convert the dataframe to CSV file self.progress_callback.emit(("Converting to CSV..", 90)) self.convert_df_to_csv(radiomics_df, output_csv_path) return True def set_filename(self, name): if name != '': self.filename = name else: self.filename = "Pyradiomics_.csv" def convert_df_to_csv(self, radiomics_df, csv_path): """ Export dataframe as a csv file. :param radiomics_df: dataframe containing radiomics data. :param csv_path: output folder path. """ # If folder does not exist if not os.path.exists(csv_path): # Create folder os.makedirs(csv_path) target_path = csv_path.joinpath(self.filename) create_header = not os.path.isfile(target_path) # Export dataframe as csv radiomics_df.to_csv(target_path, mode='a', header=create_header)
def load_images(cls, patient_files, required_classes): """ Loads required datasets for the selected patient. :param patient_files: dictionary of classes and patient files. :param required_classes: list of classes required for the selected/current process. :return: True if all required datasets found, false otherwise. """ files = [] found_classes = set() # Loop through each item in patient_files for key, value in patient_files.items(): # If the item is an allowed class if key in cls.allowed_classes: for i in range(len(value)): # Add item's files to the files list files.extend(value[i].get_files()) # Get the modality name modality_name = cls.allowed_classes.get(key).get('name') # If the modality name is not found_classes, add it if modality_name not in found_classes \ and modality_name in required_classes: found_classes.add(modality_name) # Get the difference between required classes and found classes class_diff = set(required_classes).difference(found_classes) # If the dataset is missing required files, pass on it if len(class_diff) > 0: print("Skipping dataset. Missing required file(s) {}".format( class_diff)) return False # Try to get the datasets from the selected files try: # Convert paths to a common file system representation for i, file in enumerate(files): files[i] = Path(file).as_posix() read_data_dict, file_names_dict = cls.get_datasets(files) path = os.path.dirname( os.path.commonprefix(list(file_names_dict.values()))) # Otherwise raise an exception (OnkoDICOM does not support the # selected file type) except ImageLoading.NotAllowedClassError: raise ImageLoading.NotAllowedClassError # Populate the initial values in the PatientDictContainer patient_dict_container = PatientDictContainer() patient_dict_container.clear() patient_dict_container.set_initial_values(path, read_data_dict, file_names_dict) # If an RT Struct is included, set relevant values in the # PatientDictContainer if 'rtss' in file_names_dict: dataset_rtss = dcmread(file_names_dict['rtss']) rois = ImageLoading.get_roi_info(dataset_rtss) dict_raw_contour_data, dict_numpoints = \ ImageLoading.get_raw_contour_data(dataset_rtss) dict_pixluts = ImageLoading.get_pixluts(read_data_dict) # Add RT Struct values to PatientDictContainer patient_dict_container.set("rois", rois) patient_dict_container.set("raw_contour", dict_raw_contour_data) patient_dict_container.set("num_points", dict_numpoints) patient_dict_container.set("pixluts", dict_pixluts) return True
class BatchProcessROIName2FMAID(BatchProcess): """ This class handles batch processing for the ROI Name to FMA ID process. Inherits from the BatchProcess class. """ # Allowed classes for ROI Name Cleaning allowed_classes = { # RT Structure Set "1.2.840.10008.5.1.4.1.1.481.3": { "name": "rtss", "sliceable": False } } def __init__(self, progress_callback, interrupt_flag, patient_files): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. """ # Call the parent class super(BatchProcessROIName2FMAID, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.required_classes = ['rtss'] self.organ_names = [] self.fma_ids = {} self.ready = self.load_images(patient_files, self.required_classes) self.patient_dict_container = PatientDictContainer() def start(self): """ Goes through the steps of the ROI Name Cleaning process. :return: True if successful, False if not. """ # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Lookup ROI names in Organ List self.progress_callback.emit(("Reading ROIs...", 40)) roi_names = self.find_roi_names() # Return false if RTSS has no ROIs if not roi_names: self.summary = "FMA_NO_ROI" return False # Convert ROI name to FMA ID rtss = self.patient_dict_container.dataset['rtss'] total = 0 progress = 40 step = (90 - 40)/len(roi_names) for name in roi_names: self.progress_callback.emit(("Renaming ROIs...", progress)) progress += step rtss = self.rename(rtss, name, self.fma_ids[name]) total += 1 rtss.save_as(self.patient_dict_container.filepaths['rtss']) # Set the summary to be the number of ROIs modified and return self.summary = "FMA_ID_" + str(total) return True def find_roi_names(self): """ Return a list of ROI names in the RTSS that are standard organ names. :return: list of ROI names. """ # Get organ names and FMA IDs if they have not been populated if not self.organ_names: # Get standard organ names with open(data_path('organName.csv'), 'r') as f: csv_input = csv.reader(f) header = next(f) # Ignore the "header" of the column for row in csv_input: self.organ_names.append(row[0]) self.fma_ids[row[0]] = row[1] f.close() rtss = self.patient_dict_container.dataset['rtss'] rois = [] # Loop through each ROI in the RT Struct for i in range(len(rtss.StructureSetROISequence)): # Get the ROI name roi_name = rtss.StructureSetROISequence[i].ROIName # Add ROI name to the list if roi_name in self.organ_names: rois.append(roi_name) return rois def rename(self, rtss, old_name, new_name): """ Rename an ROI in an RTSS. :param rtss: RTSS dataset. :param old_name: old ROI name to change. :param new_name: name to change the ROI to. :return: the new RTSS. """ # Find ROI with old name roi_id = None for sequence in rtss.StructureSetROISequence: if sequence.ROIName == old_name: roi_id = sequence.ROINumber break # Return if not found if not roi_id: return # Change name of ROI to new name rtss = ROI.rename_roi(rtss, roi_id, new_name) return rtss
def cleanup(self): patient_dict_container = PatientDictContainer() patient_dict_container.clear()
class BatchProcessSUV2ROI(BatchProcess): """ This class handles batch processing for the ISO2ROI process. Inherits from the BatchProcess class. """ # Allowed classes for SUV2ROI allowed_classes = { # PET Image "1.2.840.10008.5.1.4.1.1.128": { "name": "pet", "sliceable": True }, # RT Structure Set "1.2.840.10008.5.1.4.1.1.481.3": { "name": "rtss", "sliceable": False } } def __init__(self, progress_callback, interrupt_flag, patient_files, patient_weight): """ Class initialiser function. :param progress_callback: A signal that receives the current progress of the loading. :param interrupt_flag: A threading.Event() object that tells the function to stop loading. :param patient_files: List of patient files. :param patient_weight: Weight of the patient in grams. """ # Call the parent class super(BatchProcessSUV2ROI, self).__init__(progress_callback, interrupt_flag, patient_files) # Set class variables self.patient_dict_container = PatientDictContainer() self.required_classes = ['pet'] self.ready = self.load_images(patient_files, self.required_classes) self.patient_weight = patient_weight def start(self): """ Goes through the steps of the SUV2ROI conversion. :return: True if successful, False if not. """ # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not self.ready: self.summary = "SKIP" return False # Update progress self.progress_callback.emit(("Setting up...", 30)) # Initialise InitialModel.create_initial_model_batch() # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False # Check if the dataset is complete self.progress_callback.emit(("Checking dataset...", 40)) dataset_complete = ImageLoading.is_dataset_dicom_rt( self.patient_dict_container.dataset) # Create SUV2ROI object suv2roi = SUV2ROI() suv2roi.set_patient_weight(self.patient_weight) self.progress_callback.emit(("Performing SUV2ROI... ", 50)) # Stop loading if self.interrupt_flag.is_set(): self.patient_dict_container.clear() self.summary = "INTERRUPT" return False if not dataset_complete: # Check if RT struct file is missing. If yes, create one and # add its data to the patient dict container. Otherwise # return if not self.patient_dict_container.get("file_rtss"): self.progress_callback.emit(("Generating RT Struct", 55)) self.create_new_rtstruct(self.progress_callback) # Calculate boundaries self.progress_callback.emit(("Calculating Boundaries", 60)) contour_data = suv2roi.calculate_contours() if not contour_data: self.summary = "SUV_" + suv2roi.failure_reason return False # Stop loading if self.interrupt_flag.is_set(): self.summary = "INTERRUPT" return False # Generate ROIs self.progress_callback.emit(("Generating ROIs...", 80)) suv2roi.generate_ROI(contour_data, self.progress_callback) # Save new RTSS self.progress_callback.emit(("Saving RT Struct...", 90)) self.save_rtss() return True