Esempio n. 1
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputRelPath = 'LumenMasks_WorkData'

    nameInputFiles = 'grndtru*.npz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputMasksPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputRelPath)

    listInputMasksFiles = findFilesDirAndCheck(InputMasksPath, nameInputFiles)


    list_ratio_back_foregrnd_class = []

    for i, in_mask_file in enumerate(listInputMasksFiles):
        print("\nInput: \'%s\'..." % (basename(in_mask_file)))

        mask_array  = FileReader.getImageArray(in_mask_file)

        if (args.masksToRegionInterest):

            (num_foregrnd_class, num_backgrnd_class) = compute_balance_classes_with_exclusion(mask_array)
        else:
            (num_foregrnd_class, num_backgrnd_class) = compute_balance_classes(mask_array)

        ratio_back_foregrnd_class = num_backgrnd_class / num_foregrnd_class

        list_ratio_back_foregrnd_class.append(ratio_back_foregrnd_class)

        print("Number of voxels of foreground masks: \'%s\', and background masks: \'%s\'..." %(num_foregrnd_class, num_backgrnd_class))
        print("Balance classes background / foreground masks: \'%s\'..." %(ratio_back_foregrnd_class))
    # endfor


    average_ratio_back_foregrnd_class = sum(list_ratio_back_foregrnd_class) / len(list_ratio_back_foregrnd_class)

    print("Average balance classes negative / positive: \'%s\'..." % (average_ratio_back_foregrnd_class))
def main(args):
    # ---------- SETTINGS ----------
    nameInputRelPath = '<input_dir>'
    nameOutputRelPath = '<output_dir>'

    nameInputFiles = '*.nii.gz'

    def nameOutputFiles(in_name):
        return in_name.replace('<suffix_ini>', '<suffix_end>')

    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(BASEDIR)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                 nameInputRelPath)
    OutputPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                nameOutputRelPath)

    listInputFiles = findFilesDirAndCheck(InputPath, nameInputFiles)

    for i, in_file in enumerate(listInputFiles):
        print("\nInput: \'%s\'..." % (basename(in_file)))

        in_array = FileReader.getImageArray(in_file)

        # ...
        # write here the code
        # ...

        out_array = None

        out_file = joinpathnames(OutputPath,
                                 nameOutputFiles(basename(in_file)))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_file), str(out_array.shape)))

        FileReader.writeImageArray(out_file, out_array)
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'RawReferResults'
    nameInputReferImagesRelPath = 'Images_Full'
    nameOutputImagesRelPath = 'RawReferResults_Orig_Full'

    nameInputImagesFiles = '*.nii.gz'
    nameInputReferImagesFiles = '*.nii.gz'
    # prefixPatternInputFiles = 'vol[0-9][0-9]_*'
    nameBoundingBoxes = 'found_boundingBoxes_original.npy'
    nameOutputImagesFiles = lambda in_name: filenamenoextension(in_name) + '.nii.gz'
    # ---------- SETTINGS ----------


    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputImagesRelPath)
    InputReferImagesPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputReferImagesRelPath)
    OutputImagesPath = workDirsManager.getNameNewPath(BaseDataPath, nameOutputImagesRelPath)

    listInputImagesFiles = findFilesDirAndCheck(InputImagesPath, nameInputImagesFiles)
    listInputReferImagesFiles = findFilesDirAndCheck(InputReferImagesPath, nameInputReferImagesFiles)

    dict_bounding_boxes = readDictionary(joinpathnames(BaseDataPath, nameBoundingBoxes))



    for i, in_image_file in enumerate(listInputImagesFiles):
        print("\nInput: \'%s\'..." % (basename(in_image_file)))

        in_referimage_file = findFileWithSamePrefix(basename(in_image_file), listInputReferImagesFiles,
                                                    prefix_pattern='vol[0-9][0-9]_')
        print("Refer image file: \'%s\'..." % (basename(in_referimage_file)))
        bounding_box = dict_bounding_boxes[filenamenoextension(in_referimage_file)]

        cropped_image_array = FileReader.getImageArray(in_image_file)
        print("Input cropped image size: \'%s\'..." %(str(cropped_image_array.shape)))

        # 1 step: invert image
        cropped_image_array = FlippingImages.compute(cropped_image_array, axis=0)
        # 2 step: extend image
        full_image_shape = FileReader.getImageSize(in_referimage_file)
        full_image_array = ExtendImages.compute3D(cropped_image_array, bounding_box, full_image_shape)
        print("Output full image size: \'%s\'..." % (str(full_image_array.shape)))

        out_image_file = joinpathnames(OutputImagesPath, nameOutputImagesFiles(in_image_file))
        print("Output: \'%s\', of dims \'%s\'..." %(basename(out_image_file), str(full_image_array.shape)))

        FileReader.writeImageArray(out_image_file, full_image_array)
def main(args):
    # First thing, set session in the selected(s) devices: CPU or GPU
    set_session_in_selected_device(use_GPU_device=True,
                                   type_GPU_installed=args.typeGPUinstalled)

    # ---------- SETTINGS ----------
    nameModelsRelPath = args.modelsdir

    # Get the file list:
    nameImagesFiles = 'images*' + getFileExtension(FORMATTRAINDATA)
    nameGroundTruthFiles = 'grndtru*' + getFileExtension(FORMATTRAINDATA)
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    TrainingDataPath = workDirsManager.getNameExistPath(
        workDirsManager.getNameTrainingDataPath())
    if args.use_restartModel:
        ModelsPath = workDirsManager.getNameExistPath(args.basedir,
                                                      nameModelsRelPath)
    else:
        ModelsPath = workDirsManager.getNameUpdatePath(args.basedir,
                                                       nameModelsRelPath)

    listTrainImagesFiles = findFilesDir(TrainingDataPath, nameImagesFiles)
    listTrainGroundTruthFiles = findFilesDir(TrainingDataPath,
                                             nameGroundTruthFiles)

    if args.useValidationData:
        ValidationDataPath = workDirsManager.getNameExistPath(
            workDirsManager.getNameValidationDataPath())

        listValidImagesFiles = findFilesDir(ValidationDataPath,
                                            nameImagesFiles)
        listValidGroundTruthFiles = findFilesDir(ValidationDataPath,
                                                 nameGroundTruthFiles)

        if not listValidImagesFiles or not listValidGroundTruthFiles:
            use_validation_data = False
            message = "No validation data used for training the model..."
            CatchWarningException(message)
        else:
            use_validation_data = True
    else:
        use_validation_data = False

    # BUILDING MODEL
    # ----------------------------------------------
    print("_" * 30)
    print("Building model...")
    print("_" * 30)

    if args.use_restartModel:
        initial_epoch = args.epoch_restart
        args.num_epochs += initial_epoch
    else:
        initial_epoch = 0

    if TYPE_DNNLIBRARY_USED == 'Keras':
        if (not args.use_restartModel) or (args.use_restartModel
                                           and args.restart_only_weights):
            model_constructor = DICTAVAILMODELS3D(
                IMAGES_DIMS_Z_X_Y,
                tailored_build_model=args.tailored_build_model,
                num_layers=args.num_layers,
                num_featmaps_base=args.num_featmaps_base,
                type_network=args.type_network,
                type_activate_hidden=args.type_activate_hidden,
                type_activate_output=args.type_activate_output,
                type_padding_convol=args.type_padding_convol,
                is_disable_convol_pooling_lastlayer=args.
                disable_convol_pooling_lastlayer,
                isuse_dropout=args.isUse_dropout,
                isuse_batchnormalize=args.isUse_batchnormalize)
            optimizer = DICTAVAILOPTIMIZERS(args.optimizer, lr=args.learn_rate)
            loss_fun = DICTAVAILLOSSFUNS(
                args.lossfun, is_masks_exclude=args.masksToRegionInterest).loss
            metrics = [
                DICTAVAILMETRICFUNS(imetrics,
                                    is_masks_exclude=args.masksToRegionInterest
                                    ).get_renamed_compute()
                for imetrics in args.listmetrics
            ]
            model = model_constructor.get_model()
            # compile model
            model.compile(optimizer=optimizer, loss=loss_fun, metrics=metrics)
            # output model summary
            model.summary()

            if args.use_restartModel:
                print("Loading saved weights and restarting...")
                modelSavedPath = joinpathnames(
                    ModelsPath, 'model_' + args.restart_modelFile + '.hdf5')
                print("Restarting from file: \'%s\'..." % (modelSavedPath))
                model.load_weights(modelSavedPath)

        else:  #args.use_restartModel and args.restart_only_weights:
            print(
                "Loading full model: weights, optimizer, loss, metrics ... and restarting..."
            )
            modelSavedPath = joinpathnames(
                ModelsPath, 'model_' + args.restart_modelFile + '.hdf5')
            print("Restarting from file: \'%s\'..." % (modelSavedPath))

            loss_fun = DICTAVAILLOSSFUNS(
                args.lossfun, is_masks_exclude=args.masksToRegionInterest).loss
            metrics = [
                DICTAVAILMETRICFUNS(imetrics,
                                    is_masks_exclude=args.masksToRegionInterest
                                    ).get_renamed_compute()
                for imetrics in args.listmetrics
            ]
            custom_objects = dict(
                map(lambda fun: (fun.__name__, fun), [loss_fun] + metrics))
            # load and compile model
            model = NeuralNetwork.get_load_saved_model(
                modelSavedPath, custom_objects=custom_objects)

        # Callbacks:
        callbacks_list = []
        callbacks_list.append(
            RecordLossHistory(ModelsPath, [
                DICTAVAILMETRICFUNS(imetrics,
                                    is_masks_exclude=args.masksToRegionInterest
                                    ).get_renamed_compute()
                for imetrics in args.listmetrics
            ]))
        filename = joinpathnames(
            ModelsPath, 'model_{epoch:02d}_{loss:.5f}_{val_loss:.5f}.hdf5')
        callbacks_list.append(
            callbacks.ModelCheckpoint(filename, monitor='loss', verbose=0))
        # callbacks_list.append(callbacks.EarlyStopping(monitor='val_loss', patience=10, mode='max'))

        # output model summary
        model.summary()

    elif TYPE_DNNLIBRARY_USED == 'Pytorch':
        if (not args.use_restartModel) or (args.use_restartModel
                                           and args.restart_only_weights):
            model_net = DICTAVAILMODELS3D(IMAGES_DIMS_Z_X_Y)
            optimizer = DICTAVAILOPTIMIZERS(args.optimizer,
                                            model_net.parameters(),
                                            lr=args.learn_rate)
            loss_fun = DICTAVAILLOSSFUNS(
                args.lossfun, is_masks_exclude=args.masksToRegionInterest)
            trainer = Trainer(model_net, optimizer, loss_fun)

            if args.use_restartModel:
                print("Loading saved weights and restarting...")
                modelSavedPath = joinpathnames(
                    ModelsPath, 'model_' + args.restart_modelFile + '.pt')
                print("Restarting from file: \'%s\'..." % (modelSavedPath))
                trainer.load_model_only_weights(modelSavedPath)

        else:  #args.use_restartModel and args.restart_only_weights:
            print(
                "Loading full model: weights, optimizer, loss, metrics ... and restarting..."
            )
            modelSavedPath = joinpathnames(
                ModelsPath, 'model_' + args.restart_modelFile + '.pt')
            print("Restarting from file: \'%s\'..." % (modelSavedPath))
            trainer = Trainer.load_model_full(modelSavedPath)

        trainer.setup_losshistory_filepath(
            ModelsPath, isexists_lossfile=args.use_restartModel)
        trainer.setup_validate_model(freq_validate_model=FREQVALIDATEMODEL)
        trainer.setup_savemodel_filepath(
            ModelsPath,
            type_save_models='full_model',
            freq_save_intermodels=FREQSAVEINTERMODELS)

        # output model summary
        #trainer.get_summary_model()
    # ----------------------------------------------

    # LOADING DATA
    # ----------------------------------------------
    print("-" * 30)
    print("Loading data...")
    print("-" * 30)

    print("Load Training data...")
    if (args.slidingWindowImages or args.transformationImages
            or args.elasticDeformationImages):
        print(
            "Generate Training images with Batch Generator of Training data..."
        )
        (train_xData, train_yData) = LoadDataManager.loadData_ListFiles(
            listTrainImagesFiles, listTrainGroundTruthFiles)
        train_images_generator = getImagesDataGenerator3D(
            args.slidingWindowImages, args.prop_overlap_Z_X_Y,
            args.transformationImages, args.elasticDeformationImages)
        train_batch_data_generator = TrainingBatchDataGenerator(
            IMAGES_DIMS_Z_X_Y,
            train_xData,
            train_yData,
            train_images_generator,
            batch_size=args.batch_size,
            shuffle=SHUFFLETRAINDATA)
        print("Number volumes: %s. Total Data batches generated: %s..." %
              (len(listTrainImagesFiles), len(train_batch_data_generator)))
    else:
        (train_xData, train_yData
         ) = LoadDataManagerInBatches(IMAGES_DIMS_Z_X_Y).loadData_ListFiles(
             listTrainImagesFiles, listTrainGroundTruthFiles)
        print("Number volumes: %s. Total Data batches generated: %s..." %
              (len(listTrainImagesFiles), len(train_xData)))

    if use_validation_data:
        print("Load Validation data...")
        if (args.slidingWindowImages or args.transformationImages
                or args.elasticDeformationImages):
            print(
                "Generate Validation images with Batch Generator of Validation data..."
            )
            args.transformationImages = args.transformationImages and args.useTransformOnValidationData
            args.elasticDeformationImages = args.elasticDeformationImages and args.useTransformOnValidationData
            (valid_xData, valid_yData) = LoadDataManager.loadData_ListFiles(
                listValidImagesFiles, listValidGroundTruthFiles)
            valid_images_generator = getImagesDataGenerator3D(
                args.slidingWindowImages, args.prop_overlap_Z_X_Y,
                args.transformationImages, args.elasticDeformationImages)
            valid_batch_data_generator = TrainingBatchDataGenerator(
                IMAGES_DIMS_Z_X_Y,
                valid_xData,
                valid_yData,
                valid_images_generator,
                batch_size=args.batch_size,
                shuffle=SHUFFLETRAINDATA)
            validation_data = valid_batch_data_generator
            print("Number volumes: %s. Total Data batches generated: %s..." %
                  (len(listValidImagesFiles), len(valid_batch_data_generator)))
        else:
            (valid_xData, valid_yData) = LoadDataManagerInBatches(
                IMAGES_DIMS_Z_X_Y).loadData_ListFiles(
                    listValidImagesFiles, listValidGroundTruthFiles)
            validation_data = (valid_xData, valid_yData)
            print("Number volumes: %s. Total Data batches generated: %s..." %
                  (len(listTrainImagesFiles), len(valid_xData)))
    else:
        validation_data = None

    # TRAINING MODEL
    # ----------------------------------------------
    print("-" * 30)
    print("Training model...")
    print("-" * 30)

    if TYPE_DNNLIBRARY_USED == 'Keras':
        if (args.slidingWindowImages or args.transformationImages):
            model.fit_generator(train_batch_data_generator,
                                nb_epoch=args.num_epochs,
                                steps_per_epoch=args.max_steps_epoch,
                                verbose=1,
                                callbacks=callbacks_list,
                                validation_data=validation_data,
                                shuffle=SHUFFLETRAINDATA,
                                initial_epoch=initial_epoch)
        else:
            model.fit(train_xData,
                      train_yData,
                      batch_size=args.batch_size,
                      epochs=args.num_epochs,
                      steps_per_epoch=args.max_steps_epoch,
                      verbose=1,
                      callbacks=callbacks_list,
                      validation_data=validation_data,
                      shuffle=SHUFFLETRAINDATA,
                      initial_epoch=initial_epoch)

    elif TYPE_DNNLIBRARY_USED == 'Pytorch':
        trainer.train(train_batch_data_generator,
                      num_epochs=args.num_epochs,
                      max_steps_epoch=args.max_steps_epoch,
                      valid_data_generator=validation_data,
                      initial_epoch=initial_epoch)
Esempio n. 5
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputRelPath = 'RawImages'

    nameInputFiles = '*.dcm'

    nameOrigVoxelSize_FileNpy = 'original_vozelSize.npy'
    nameOrigVoxelSize_FileCsv = 'original_vozelSize.csv'
    nameRescaleFactors_FileNpy = 'rescaleFactors_images_0.6x0.6x0.6.npy'
    nameRescaleFactors_FileCsv = 'rescaleFactors_images_0.6x0.6x0.6.csv'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(BASEDIR)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                 nameInputRelPath)

    listInputFiles = findFilesDirAndCheck(InputPath, nameInputFiles)

    dict_voxelSizes = OrderedDict()

    for i, in_file in enumerate(listInputFiles):
        print("\nInput: \'%s\'..." % (basename(in_file)))

        voxel_size = DICOMreader.getVoxelSize(in_file)
        print("Voxel Size: \'%s\'..." % (str(voxel_size)))

        dict_voxelSizes[filenamenoextension(in_file)] = voxel_size
    #endfor

    # Save dictionary in file
    nameoutfile = joinpathnames(BaseDataPath, nameOrigVoxelSize_FileNpy)
    saveDictionary(nameoutfile, dict_voxelSizes)
    nameoutfile = joinpathnames(BaseDataPath, nameOrigVoxelSize_FileCsv)
    saveDictionary_csv(nameoutfile, dict_voxelSizes)

    data = np.array(dict_voxelSizes.values())
    mean = np.mean(data, axis=0)
    print("Mean value: \'%s\'..." % (mean))
    median = np.median(data, axis=0)
    print("Median value: \'%s\'..." % (median))

    if args.fixedRescaleRes:
        final_rescale_res = args.fixedRescaleRes
    else:
        #if not fixed scale specified, take median over dataset
        final_rescale_res = median
    print("Final aimed resolution: \'%s\'..." % (str(final_rescale_res)))

    dict_rescaleFactors = OrderedDict()

    for key, value in dict_voxelSizes.iteritems():
        print("\nKey: \'%s\'..." % (key))

        rescale_factor = tuple(np.array(value) / np.array(final_rescale_res))
        print("Computed rescale factor: \'%s\'..." % (str(rescale_factor)))

        dict_rescaleFactors[key] = rescale_factor
    #endfor

    # Save dictionary in file
    nameoutfile = joinpathnames(BaseDataPath, nameRescaleFactors_FileNpy)
    saveDictionary(nameoutfile, dict_rescaleFactors)
    nameoutfile = joinpathnames(BaseDataPath, nameRescaleFactors_FileCsv)
    saveDictionary_csv(nameoutfile, dict_rescaleFactors)
Esempio n. 6
0
def main(args):

    # ---------- SETTINGS ----------
    nameRawImagesRelPath = 'RawImages'
    nameComputeMasksRelPath = 'ProbNnetoutMasks'

    # Get the file list:
    nameImagesFiles = '*.dcm'
    nameOutMasksFiles = lambda in_name: filenamenoextension(
        in_name) + '_probnnetout.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameRawImagesRelPath)
    ComputeMasksPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                      nameComputeMasksRelPath)

    listImagesFiles = findFilesDir(InputImagesPath, nameImagesFiles)
    nbImagesFiles = len(listImagesFiles)

    # Retrieve training model
    modelConstructor = DICTAVAILNETWORKS3D(IMAGES_DIMS_Z_X_Y, args.model)
    modelConstructor.type_padding = 'valid'

    if args.size_out_nnet == None:
        args.size_out_nnet = modelConstructor.get_size_output_full_Unet()

    print(
        "For input images of size: %s; Output of Neural Networks_Keras are images of size: %s..."
        % (IMAGES_DIMS_Z_X_Y, args.size_out_nnet))

    for images_file in listImagesFiles:

        print('\'%s\'...' % (images_file))

        images_array = FileReader.getImageArray(images_file)

        if (args.invertImageAxial):
            images_array = FlippingImages.compute(images_array, axis=0)

        print("Compute masks proportion output...")

        if (args.slidingWindowImages):

            images_reconstructor = SlidingWindowReconstructorImages3D(
                IMAGES_DIMS_Z_X_Y,
                images_array.shape,
                args.prop_overlap_Z_X_Y,
                size_outUnet_sample=args.size_out_nnet)
        else:
            images_reconstructor = SlidingWindowReconstructorImages3D(
                IMAGES_DIMS_Z_X_Y,
                images_array.shape, (0.0, 0.0, 0.0),
                size_outUnet_sample=args.size_out_nnet)

        masks_probValidConvNnet_output_array = images_reconstructor.get_filtering_map_array(
        )

        out_masksFilename = joinpathnames(ComputeMasksPath,
                                          nameOutMasksFiles(images_file))

        FileReader.writeImageArray(out_masksFilename,
                                   masks_probValidConvNnet_output_array)
def main(args):
    # ---------- SETTINGS ----------
    nameInputMasksRelPath  = 'Myrian-opfronted_3b_i5'
    nameCentrelinesRelPath = 'Myrian-opfronted_3b_i5'
    namePredictMasksFiles  = '*thres0-5.nii.gz'
    nameInputMasksFiles    = '*outerwall.nii.gz'
    nameCentrelinesFiles   = '*centrelines_smoothed.nii.gz'

    # template search files
    tempSearchInputFiles  = 'av[0-9]*'

    # create file to save FROC values
    temp_outfilename  = 'res_completeness_voleakage.txt'
    # ---------- SETTINGS ----------


    workDirsManager      = WorkDirsManager(args.basedir)
    BaseDataPath         = workDirsManager.getNameBaseDataPath()
    InputPredictMasksPath= workDirsManager.getNameExistPath(args.basedir, args.predictionsdir)
    InputMasksPath       = workDirsManager.getNameExistPath(BaseDataPath, nameInputMasksRelPath)
    CentrelinesPath      = workDirsManager.getNameExistPath(BaseDataPath, nameCentrelinesRelPath)

    listPredictMasksFiles    = findFilesDir(InputPredictMasksPath,namePredictMasksFiles)
    listGrndTruthMasksFiles  = findFilesDir(InputMasksPath,       nameInputMasksFiles)
    listCentrelinesFiles     = findFilesDir(CentrelinesPath,      nameCentrelinesFiles)

    nbPredictionsFiles    = len(listPredictMasksFiles)
    nbGrndTruthMasksFiles = len(listGrndTruthMasksFiles)
    nbCentrelinesFiles    = len(listCentrelinesFiles)

    # Run checkers
    if (nbPredictionsFiles == 0):
        message = "0 Predictions found in dir \'%s\'" %(InputPredictMasksPath)
        CatchErrorException(message)
    if (nbGrndTruthMasksFiles == 0):
        message = "0 Ground-truth Masks found in dir \'%s\'" %(InputMasksPath)
        CatchErrorException(message)
    if (nbGrndTruthMasksFiles != nbCentrelinesFiles):
        message = "num Ground-truth Masks %i not equal to num Centrelines %i" %(nbGrndTruthMasksFiles, nbCentrelinesFiles)
        CatchErrorException(message)


    out_filename = joinpathnames(InputPredictMasksPath, temp_outfilename)
    fout = open(out_filename, 'w')

    strheader = '/case/ /completeness/ /volume_leakage/ /dice_coeff/' +'\n'
    fout.write(strheader)


    completeness_list  = []
    volumeleakage_list = []
    dicecoeff_list     = []


    for i, predict_masks_file in enumerate(listPredictMasksFiles):

        print('\'%s\'...' %(predict_masks_file))

        name_prefix_case = getExtractSubstringPattern(basename(predict_masks_file),
                                                      tempSearchInputFiles)

        for iterfile_1, iterfile_2 in zip(listGrndTruthMasksFiles,
                                          listCentrelinesFiles):
            if name_prefix_case in iterfile_1:
                grndtruth_masks_file = iterfile_1
                centrelines_file     = iterfile_2
        #endfor
        print("assigned to \'%s\' and \'%s\'..." %(basename(grndtruth_masks_file), basename(centrelines_file)))

        predict_masks_array   = FileReader.getImageArray(predict_masks_file)
        grndtruth_masks_array = FileReader.getImageArray(grndtruth_masks_file)
        centrelines_array     = FileReader.getImageArray(centrelines_file)


        dicecoeff = DiceCoefficient().compute_np(grndtruth_masks_array, predict_masks_array)

        completeness = AirwayCompleteness().compute_np(centrelines_array, predict_masks_array) * 100

        volumeleakage = AirwayVolumeLeakage().compute_np(grndtruth_masks_array, predict_masks_array) * 100


        completeness_list .append(completeness)
        volumeleakage_list.append(volumeleakage)
        dicecoeff_list   .append(dicecoeff)

        strdata = '\'%s\' %0.3f %0.3f %0.6f\n'%(name_prefix_case, completeness, volumeleakage, dicecoeff)
        fout.write(strdata)

        print("Computed Dice coefficient: \'%s\'..." %(dicecoeff))
        print("Computed Completeness: \'%s\'..." %(completeness))
        print("Computed Volume Leakage: \'%s\'..." % (volumeleakage))
    #endfor


    # completeness_mean = np.mean(completeness_list)
    # volumeleakage_mean= np.mean(volumeleakage_list)
    # dicecoeff_mean    = np.mean(dicecoeff_list)
    #
    # strdata = str(name_prefix_case) + ' ' + str(completeness_mean) + ' ' + str(volumeleakage_mean) + ' ' + str(dicecoeff_mean) +'\n'
    # fout.write(strdata)
    #
    # print("Mean Dice coefficient: %s..." % (dicecoeff))
    # print("Mean Completeness: %s..." % (completeness))
    # print("Mean Volume Leakage 1: %s..." % (volumeleakage))

    fout.close()
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'Images_Full'
    nameInputGroundTruthRelPath = 'Airways_DistTrans_Full'
    nameInputRoiMasksRelPath = 'Airways_DilatedMasks_Full'
    nameOutputImagesRelPath = 'Images_WorkData_2'
    nameOutputGroundTruthRelPath = 'LumenDistTransClipped_WorkData'

    nameInputImagesFiles = '*.nii.gz'
    nameInputGroundTruthFiles = '*dist_clipdila_normal_power2.nii.gz'
    nameInputRoiMasksFiles = '*_dilated10.nii.gz'

    nameRescaleFactors = 'rescaleFactors_images.npy'
    nameCropBoundingBoxes = 'cropBoundingBoxes_images.npy'

    nameOutputImageFiles = 'images-%0.2i_dim%s' + getFileExtension(
        FORMATTRAINDATA)
    nameOutputGroundTruthFiles = 'grndtru-%0.2i_dim%s' + getFileExtension(
        FORMATTRAINDATA)

    if (args.saveVisualizeProcData):
        nameVisualOutputRelPath = 'VisualizeWorkData'
        nameOutputVisualFiles = lambda filename: basename(filename).replace(
            '.npz', '.nii.gz')
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameInputImagesRelPath)
    InputGroundTruthPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameInputGroundTruthRelPath)
    OutputImagesPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                      nameOutputImagesRelPath)
    OutputGroundTruthPath = workDirsManager.getNameNewPath(
        BaseDataPath, nameOutputGroundTruthRelPath)

    if (args.saveVisualizeProcData):
        VisualOutputPath = workDirsManager.getNameNewPath(
            BaseDataPath, nameVisualOutputRelPath)

    listInputImagesFiles = findFilesDirAndCheck(InputImagesPath,
                                                nameInputImagesFiles)
    listInputGroundTruthFiles = findFilesDirAndCheck(
        InputGroundTruthPath, nameInputGroundTruthFiles)

    if (len(listInputImagesFiles) != len(listInputGroundTruthFiles)):
        message = 'num files in dir 1 \'%s\', not equal to num files in dir 2 \'%i\'...' % (
            len(listInputImagesFiles), len(listInputGroundTruthFiles))
        CatchErrorException(message)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        listInputRoiMaskFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                     nameInputRoiMasksFiles)

    if (args.rescaleImages):
        dict_rescaleFactors = readDictionary(
            joinpathnames(BaseDataPath, nameRescaleFactors))

    if (args.cropImages):
        dict_cropBoundingBoxes = readDictionary(
            joinpathnames(BaseDataPath, nameCropBoundingBoxes))

    # START ANALYSIS
    # ------------------------------
    print("-" * 30)
    print("Preprocessing...")
    print("-" * 30)

    for i, (in_image_file, in_grndtru_file) in enumerate(
            zip(listInputImagesFiles, listInputGroundTruthFiles)):
        print("\nInput: \'%s\'..." % (basename(in_image_file)))
        print("And: \'%s\'..." % (basename(in_grndtru_file)))

        (image_array, grndtru_array) = FileReader.get2ImageArraysAndCheck(
            in_image_file, in_grndtru_file)
        print("Original dims : \'%s\'..." % (str(image_array.shape)))

        if (args.isClassificationData):
            print("Convert to binary masks (0, 1)...")
            grndtru_array = OperationBinaryMasks.process_masks(grndtru_array)

        if (args.masksToRegionInterest):
            print("Mask input to RoI: lungs...")
            in_roimask_file = listInputRoiMaskFiles[i]
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))

            roimask_array = FileReader.getImageArray(in_roimask_file)
            grndtru_array = OperationBinaryMasks.apply_mask_exclude_voxels(
                grndtru_array, roimask_array)

        if (args.rescaleImages):
            rescale_factor = dict_rescaleFactors[filenamenoextension(
                in_image_file)]
            print("Rescale image with a factor: \'%s\'..." %
                  (str(rescale_factor)))

            image_array = RescaleImages.compute3D(image_array, rescale_factor)
            grndtru_array = RescaleImages.compute3D(grndtru_array,
                                                    rescale_factor)
            print("Final dims: %s..." % (str(image_array.shape)))

        if (args.cropImages):
            crop_bounding_box = dict_cropBoundingBoxes[filenamenoextension(
                in_image_file)]
            print("Crop image to bounding-box: \'%s\'..." %
                  (str(crop_bounding_box)))

            image_array = CropImages.compute3D(image_array, crop_bounding_box)
            grndtru_array = CropImages.compute3D(grndtru_array,
                                                 crop_bounding_box)
            print("Final dims: %s..." % (str(image_array.shape)))

        # if (args.extendSizeImages):
        #     print("Extend images to fixed size \'%s\':..." %(str(CROPSIZEBOUNDINGBOX)))
        #     size_new_image = (image_array.shape[0], CROPSIZEBOUNDINGBOX[0], CROPSIZEBOUNDINGBOX[1])
        #     backgr_val_images = -1000
        #     backgr_val_masks = -1 if args.masksToRegionInterest else 0
        #     bounding_box = dict_bounding_boxes[filenamenoextension(in_image_file)]
        #
        #     image_array = ExtendImages.compute3D(image_array, bounding_box, size_new_image, background_value=backgr_val_images)
        #     grndtru_array = ExtendImages.compute3D(grndtru_array, bounding_box, size_new_image, background_value=backgr_val_masks)
        #     print("Final dims: %s..." % (str(image_array.shape)))

        out_image_file = joinpathnames(
            OutputImagesPath,
            nameOutputImageFiles % (i + 1, tuple2str(image_array.shape)))
        out_grndtru_file = joinpathnames(
            OutputGroundTruthPath, nameOutputGroundTruthFiles %
            (i + 1, tuple2str(grndtru_array.shape)))
        print("Output: \'%s\', of dims \'%s\'..." % (basename(out_image_file),
                                                     (image_array.shape)))
        print("And: \'%s\', of dims \'%s\'..." %
              (basename(out_grndtru_file), str(grndtru_array.shape)))

        FileReader.writeImageArray(out_image_file, image_array)
        FileReader.writeImageArray(out_grndtru_file, grndtru_array)

        if (args.saveVisualizeProcData):
            print("Saving working data to visualize...")
            out_image_file = joinpathnames(
                VisualOutputPath, nameOutputVisualFiles(out_image_file))
            out_grndtru_file = joinpathnames(
                VisualOutputPath, nameOutputVisualFiles(out_grndtru_file))

            FileReader.writeImageArray(out_image_file, image_array)
            FileReader.writeImageArray(out_grndtru_file, grndtru_array)
Esempio n. 9
0
def main(args):
    # ---------- SETTINGS ----------
    max_size_bounding_box = (0, 0, 0)
    min_size_bounding_box = (1.0e+03, 1.0e+03, 1.0e+03)
    voxels_buffer_border = (0, 0, 0, 0)

    nameInputRelPath = 'Images_Rescaled_Full'
    nameRoiMaskRelPath = 'Lungs_Rescaled_Full'

    nameInputFiles = '*.nii.gz'
    nameRoiMasksFiles = '*.nii.gz'

    nameCropBoundingBoxes_FileNpy = 'cropBoundingBoxes_352x240_rescaled0.6x0.6x0.6_images.npy'
    nameCropBoundingBoxes_FileCsv = 'cropBoundingBoxes_352x240_rescaled0.6x0.6x0.6_images.csv'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                 nameInputRelPath)
    RoiMasksPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                    nameRoiMaskRelPath)

    listInputImageFiles = findFilesDirAndCheck(InputPath, nameInputFiles)
    listRoiMaskFiles = findFilesDirAndCheck(RoiMasksPath, nameRoiMasksFiles)

    dict_cropBoundingBoxes = OrderedDict()

    for in_file, roimask_file in zip(listInputImageFiles, listRoiMaskFiles):
        print("\nInput: \'%s\'..." % (basename(in_file)))
        print("RoI mask (lungs) file: \'%s\'..." % (basename(in_file)))

        roimask_array = FileReader.getImageArray(roimask_file)

        bounding_box = BoundingBoxes.compute_bounding_box_contain_masks_with_border_effects_2D(
            roimask_array, voxels_buffer_border=voxels_buffer_border)

        size_bounding_box = BoundingBoxes.compute_size_bounding_box(
            bounding_box)
        print("Bounding-box: \'%s\', of size: \'%s\'" %
              (bounding_box, size_bounding_box))

        max_size_bounding_box = BoundingBoxes.compute_max_size_bounding_box(
            size_bounding_box, max_size_bounding_box)

        # Compute new bounding-box, of fixed size 'args.cropSizeBoundingBox', and with same center as original 'bounding_box'
        processed_bounding_box = BoundingBoxes.compute_bounding_box_centered_bounding_box_2D(
            bounding_box, args.cropSizeBoundingBox, roimask_array.shape)

        size_processed_bounding_box = BoundingBoxes.compute_size_bounding_box(
            processed_bounding_box)
        print("Processed bounding-box: \'%s\', of size: \'%s\'" %
              (processed_bounding_box, size_processed_bounding_box))

        dict_cropBoundingBoxes[filenamenoextension(
            in_file)] = processed_bounding_box
    #endfor

    print("max size bounding-box found: \'%s\'; set size bounding-box \'%s\'" %
          (max_size_bounding_box, args.cropSizeBoundingBox))

    # Save dictionary in file
    nameoutfile = joinpathnames(BaseDataPath, nameCropBoundingBoxes_FileNpy)
    saveDictionary(nameoutfile, dict_cropBoundingBoxes)
    nameoutfile = joinpathnames(BaseDataPath, nameCropBoundingBoxes_FileCsv)
    saveDictionary_csv(nameoutfile, dict_cropBoundingBoxes)
Esempio n. 10
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputPredictionsRelPath = args.predictionsdir
    nameInputReferMasksRelPath = 'Airways_Full'
    nameInputRoiMasksRelPath = 'Lungs_Full'
    nameInputCentrelinesRelPath = 'Centrelines_Full'
    nameOutputPredictionsRelPath = nameInputPredictionsRelPath

    nameInputPredictionsFiles = 'predict-probmaps_*.nii.gz'
    nameInputReferMasksFiles = '*_lumen.nii.gz'
    nameInputRoiMasksFiles = '*_lungs.nii.gz'
    nameInputCentrelinesFiles = '*_centrelines.nii.gz'
    # prefixPatternInputFiles = 'av[0-9][0-9]*'

    if (args.calcMasksThresholding):
        suffixPostProcessThreshold = '_thres%s' % (str(
            args.thresholdValue).replace('.', '-'))
        if (args.attachTracheaToCalcMasks):
            suffixPostProcessThreshold += '_withtrachea'
        else:
            suffixPostProcessThreshold += '_notrachea'
    else:
        suffixPostProcessThreshold = ''

    nameAccuracyPredictFiles = 'predict_accuracy_tests%s.txt' % (
        suffixPostProcessThreshold)

    def nameOutputFiles(in_name, in_acc):
        out_name = filenamenoextension(in_name).replace(
            'predict-probmaps',
            'predict-binmasks') + '_acc%2.0f' % (np.round(100 * in_acc))
        return out_name + '%s.nii.gz' % (suffixPostProcessThreshold)

    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPredictionsPath = workDirsManager.getNameExistPath(
        args.basedir, nameInputPredictionsRelPath)
    InputReferenceMasksPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameInputReferMasksRelPath)
    OutputPredictionsPath = workDirsManager.getNameNewPath(
        args.basedir, nameOutputPredictionsRelPath)

    listInputPredictionsFiles = findFilesDirAndCheck(
        InputPredictionsPath, nameInputPredictionsFiles)
    listInputReferenceMasksFiles = findFilesDirAndCheck(
        InputReferenceMasksPath, nameInputReferMasksFiles)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        listInputRoiMasksFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                      nameInputRoiMasksFiles)

        if (args.attachTracheaToCalcMasks):

            def compute_trachea_masks(refermask_array, roimask_array):
                return np.where(roimask_array == 1, 0, refermask_array)

    listPostProcessMetrics = OrderedDict()
    list_isUseCenlineFiles = []
    for imetrics in args.listPostprocessMetrics:
        listPostProcessMetrics[imetrics] = DICTAVAILMETRICFUNS(
            imetrics).compute_np_safememory
        list_isUseCenlineFiles.append(
            DICTAVAILMETRICFUNS(imetrics)._is_cenline_grndtru)
    #endfor
    isuse_centreline_files = any(list_isUseCenlineFiles)

    if isuse_centreline_files:
        InputCentrelinesPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputCentrelinesRelPath)
        listInputCentrelinesFiles = findFilesDirAndCheck(
            InputCentrelinesPath, nameInputCentrelinesFiles)

    out_predictAccuracyFilename = joinpathnames(InputPredictionsPath,
                                                nameAccuracyPredictFiles)
    fout = open(out_predictAccuracyFilename, 'w')
    strheader = '/case/ ' + ' '.join(
        ['/%s/' % (key)
         for (key, _) in listPostProcessMetrics.iteritems()]) + '\n'
    fout.write(strheader)

    for i, in_prediction_file in enumerate(listInputPredictionsFiles):
        print("\nInput: \'%s\'..." % (basename(in_prediction_file)))

        in_refermask_file = findFileWithSamePrefix(
            basename(in_prediction_file).replace('predict-probmaps', ''),
            listInputReferenceMasksFiles,
            prefix_pattern='vol[0-9][0-9]_')
        print("Refer mask file: \'%s\'..." % (basename(in_refermask_file)))

        prediction_array = FileReader.getImageArray(in_prediction_file)
        refermask_array = FileReader.getImageArray(in_refermask_file)
        print("Predictions of size: %s..." % (str(prediction_array.shape)))

        if (args.calcMasksThresholding):
            print(
                "Compute prediction masks by thresholding probability maps to value %s..."
                % (args.thresholdValue))
            prediction_array = ThresholdImages.compute(prediction_array,
                                                       args.thresholdValue)

        if isuse_centreline_files:
            in_centreline_file = findFileWithSamePrefix(
                basename(in_prediction_file).replace('predict-probmaps', ''),
                listInputCentrelinesFiles,
                prefix_pattern='vol[0-9][0-9]_')
            print("Centrelines file: \'%s\'..." %
                  (basename(in_centreline_file)))
            centrelines_array = FileReader.getImageArray(in_centreline_file)

        if (args.masksToRegionInterest):
            in_roimask_file = findFileWithSamePrefix(
                basename(in_prediction_file).replace('predict-probmaps', ''),
                listInputRoiMasksFiles,
                prefix_pattern='vol[0-9][0-9]_')
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))
            roimask_array = FileReader.getImageArray(in_roimask_file)

            if (args.attachTracheaToCalcMasks):
                print("Attach trachea mask to computed prediction masks...")
                trachea_masks_array = compute_trachea_masks(
                    refermask_array, roimask_array)
                prediction_array = OperationBinaryMasks.join_two_binmasks_one_image(
                    prediction_array, trachea_masks_array)
            else:
                prediction_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                    prediction_array, roimask_array)
                refermask_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                    refermask_array, roimask_array)
                if isuse_centreline_files:
                    centrelines_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                        centrelines_array, roimask_array)

        # ---------- COMPUTE POST PROCESSING MEASURES ----------
        list_postprocess_measures = OrderedDict()
        for i, (key, value) in enumerate(listPostProcessMetrics.iteritems()):
            if list_isUseCenlineFiles[i]:
                acc_value = value(centrelines_array, prediction_array)
            else:
                acc_value = value(refermask_array, prediction_array)
            list_postprocess_measures[key] = acc_value
        # endfor
        main_postprocess_accuracy = list_postprocess_measures.values()[0]

        # print list accuracies on screen and in file
        prefix_casename = getSubstringPatternFilename(
            basename(in_prediction_file), substr_pattern='vol[0-9][0-9]_')[:-1]
        strdata = '\'%s\'' % (prefix_casename)
        for (key, value) in list_postprocess_measures.iteritems():
            print("Metric \'%s\': %s..." % (key, value))
            strdata += ' %s' % (str(value))
        #endfor
        strdata += '\n'
        fout.write(strdata)
        # ---------- COMPUTE POST PROCESSING MEASURES ----------

        out_file = joinpathnames(
            OutputPredictionsPath,
            nameOutputFiles(basename(in_prediction_file),
                            main_postprocess_accuracy))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_file), str(prediction_array.shape)))

        FileReader.writeImageArray(out_file, prediction_array)
    #endfor

    #close list accuracies file
    fout.close()
def main(args):
    # ---------- SETTINGS ----------
    nameOrigImagesDataRelPath = 'Images_WorkData'
    nameOrigMasksDataRelPath = 'LumenDistTrans_WorkData'

    nameOriginImagesFiles = 'images*' + getFileExtension(FORMATTRAINDATA)
    nameOriginMasksFiles = 'grndtru*' + getFileExtension(FORMATTRAINDATA)
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)

    OrigImagesDataPath = workDirsManager.getNameExistPath(
        workDirsManager.getNameBaseDataPath(), nameOrigImagesDataRelPath)
    OrigGroundTruthDataPath = workDirsManager.getNameExistPath(
        workDirsManager.getNameBaseDataPath(), nameOrigMasksDataRelPath)
    TrainingDataPath = workDirsManager.getNameNewPath(
        workDirsManager.getNameTrainingDataPath())
    ValidationDataPath = workDirsManager.getNameNewPath(
        workDirsManager.getNameValidationDataPath())
    TestingDataPath = workDirsManager.getNameNewPath(
        workDirsManager.getNameTestingDataPath())

    listImagesFiles = findFilesDir(OrigImagesDataPath, nameOriginImagesFiles)
    listGroundTruthFiles = findFilesDir(OrigGroundTruthDataPath,
                                        nameOriginMasksFiles)

    numImagesFiles = len(listImagesFiles)
    numGroundTruthFiles = len(listGroundTruthFiles)

    if (numImagesFiles != numGroundTruthFiles):
        message = "num image files \'%s\' not equal to num ground-truth files \'%s\'..." % (
            numImagesFiles, numGroundTruthFiles)
        CatchErrorException(message)

    if (args.distribute_fixed_names):
        print("Split dataset with Fixed Names...")
        names_repeated = find_element_repeated_two_indexes_names(
            NAME_IMAGES_TRAINING, NAME_IMAGES_VALIDATION)
        names_repeated += find_element_repeated_two_indexes_names(
            NAME_IMAGES_TRAINING, NAME_IMAGES_TESTING)
        names_repeated += find_element_repeated_two_indexes_names(
            NAME_IMAGES_VALIDATION, NAME_IMAGES_TESTING)

        if names_repeated:
            message = "found names repeated in list Training / Validation / Testing names: %s" % (
                names_repeated)
            CatchErrorException(message)

        indexesTraining = find_indexes_names_images_files(
            NAME_IMAGES_TRAINING, listImagesFiles)
        indexesValidation = find_indexes_names_images_files(
            NAME_IMAGES_VALIDATION, listImagesFiles)
        indexesTesting = find_indexes_names_images_files(
            NAME_IMAGES_TESTING, listImagesFiles)
        print(
            "Training (%s files)/ Validation (%s files)/ Testing (%s files)..."
            % (len(indexesTraining), len(indexesValidation),
               len(indexesTesting)))
    else:
        numTrainingFiles = int(args.prop_data_training * numImagesFiles)
        numValidationFiles = int(args.prop_data_validation * numImagesFiles)
        numTestingFiles = int(args.prop_data_testing * numImagesFiles)
        print(
            "Training (%s files)/ Validation (%s files)/ Testing (%s files)..."
            % (numTrainingFiles, numValidationFiles, numTestingFiles))
        if (args.distribute_random):
            print("Split dataset Randomly...")
            indexesAllFiles = np.random.choice(range(numImagesFiles),
                                               size=numImagesFiles,
                                               replace=False)
        else:
            print("Split dataset In Order...")
            indexesAllFiles = range(numImagesFiles)

        indexesTraining = indexesAllFiles[0:numTrainingFiles]
        indexesValidation = indexesAllFiles[numTrainingFiles:numTrainingFiles +
                                            numValidationFiles]
        indexesTesting = indexesAllFiles[numTrainingFiles +
                                         numValidationFiles::]

    print("Files assigned to Training Data: \'%s\'" %
          ([basename(listImagesFiles[index]) for index in indexesTraining]))
    print("Files assigned to Validation Data: \'%s\'" %
          ([basename(listImagesFiles[index]) for index in indexesValidation]))
    print("Files assigned to Testing Data: \'%s\'" %
          ([basename(listImagesFiles[index]) for index in indexesTesting]))

    # ******************** TRAINING DATA ********************
    for index in indexesTraining:
        makelink(
            listImagesFiles[index],
            joinpathnames(TrainingDataPath, basename(listImagesFiles[index])))
        makelink(
            listGroundTruthFiles[index],
            joinpathnames(TrainingDataPath,
                          basename(listGroundTruthFiles[index])))
    #endfor
    # ******************** TRAINING DATA ********************

    # ******************** VALIDATION DATA ********************
    for index in indexesValidation:
        makelink(
            listImagesFiles[index],
            joinpathnames(ValidationDataPath,
                          basename(listImagesFiles[index])))
        makelink(
            listGroundTruthFiles[index],
            joinpathnames(ValidationDataPath,
                          basename(listGroundTruthFiles[index])))
    #endfor
    # ******************** VALIDATION DATA ********************

    # ******************** TESTING DATA ********************
    for index in indexesTesting:
        makelink(
            listImagesFiles[index],
            joinpathnames(TestingDataPath, basename(listImagesFiles[index])))
        makelink(
            listGroundTruthFiles[index],
            joinpathnames(TestingDataPath,
                          basename(listGroundTruthFiles[index])))
Esempio n. 12
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'RawAirways'
    nameInputRoiMasksRelPath = 'RawLungs'
    nameReferenceImgRelPath = 'RawImages'
    nameOutputImagesRelPath = 'Airways_Rescaled_0.6x0.6x0.6_Full'
    nameOutputRoiMasksRelPath = 'Lungs_Rescaled_0.6x0.6x0.6_Full'

    nameInputImagesFiles = '*surface0.dcm'
    nameInputRoiMasksFiles = '*.dcm'
    nameReferenceImgFiles = '*.dcm'
    # prefixPatternInputFiles = 'av[0-9][0-9]*'

    nameRescaleFactors = 'rescaleFactors_images_0.6x0.6x0.6.npy'

    def nameOutputImagesFiles(in_name):
        in_name = in_name.replace('surface0', 'lumen')
        in_name = in_name.replace('surface1', 'outwall')
        #in_name = in_name.replace('-result','_noopfront')
        #in_name = in_name.replace('-centrelines','_centrelines')
        return filenamenoextension(in_name) + '.nii.gz'

    nameOutputRoiMasksFiles = lambda in_name: filenamenoextension(
        in_name).replace('-lungs', '_lungs') + '.nii.gz'
    nameOutputImagesMaskedToRoiFiles = lambda in_name: filenamenoextension(
        nameOutputImagesFiles(in_name)) + '_maskedToLungs.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameInputImagesRelPath)
    ReferenceImgPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameReferenceImgRelPath)
    OutputImagesPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                      nameOutputImagesRelPath)

    listInputImagesFiles = findFilesDirAndCheck(InputImagesPath,
                                                nameInputImagesFiles)
    listReferenceImgFiles = findFilesDirAndCheck(ReferenceImgPath,
                                                 nameReferenceImgFiles)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        OutputRoiMasksPath = workDirsManager.getNameNewPath(
            BaseDataPath, nameOutputRoiMasksRelPath)

        listInputRoiMasksFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                      nameInputRoiMasksFiles)

    if (args.rescaleImages):
        dict_rescaleFactors = readDictionary(
            joinpathnames(BaseDataPath, nameRescaleFactors))

    for i, in_image_file in enumerate(listInputImagesFiles):
        print("\nInput: \'%s\'..." % (basename(in_image_file)))

        image_array = FileReader.getImageArray(in_image_file)

        if (args.isClassificationData):
            print("Convert to binary masks (0, 1)...")
            image_array = OperationBinaryMasks.process_masks(image_array)

        if (args.masksToRegionInterest):
            print("Mask input to RoI: lungs...")
            in_roimask_file = findFileWithSamePrefix(basename(in_image_file),
                                                     listInputRoiMasksFiles)
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))

            roimask_array = FileReader.getImageArray(in_roimask_file)

            if (args.isClassificationData):
                print("Convert to binary masks (0, 1)...")
                roimask_array = OperationBinaryMasks.process_masks(
                    roimask_array)

            # Image masked to RoI: exclude voxels not contained in lungs
            image_maskedToRoi_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                image_array, roimask_array)

        if (args.rescaleImages):
            in_referimg_file = findFileWithSamePrefix(basename(in_image_file),
                                                      listReferenceImgFiles)
            rescale_factor = dict_rescaleFactors[filenamenoextension(
                in_referimg_file)]
            print("Rescale image with a factor: \'%s\'..." %
                  (str(rescale_factor)))

            image_array = RescaleImages.compute3D(image_array,
                                                  rescale_factor,
                                                  is_binary_mask=True)
            print("Final dims: %s..." % (str(image_array.shape)))

            if (args.masksToRegionInterest):
                roimask_array = RescaleImages.compute3D(roimask_array,
                                                        rescale_factor,
                                                        is_binary_mask=True)
                image_maskedToRoi_array = RescaleImages.compute3D(
                    image_maskedToRoi_array,
                    rescale_factor,
                    is_binary_mask=True)

        out_file = joinpathnames(
            OutputImagesPath, nameOutputImagesFiles(basename(in_image_file)))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_file), str(image_array.shape)))

        FileReader.writeImageArray(out_file, image_array)

        if (args.masksToRegionInterest):
            out_roimask_file = joinpathnames(
                OutputRoiMasksPath,
                nameOutputRoiMasksFiles(basename(in_roimask_file)))
            out_maskedToRoi_file = joinpathnames(
                OutputImagesPath,
                nameOutputImagesMaskedToRoiFiles(basename(in_image_file)))

            FileReader.writeImageArray(out_roimask_file, roimask_array)
            FileReader.writeImageArray(out_maskedToRoi_file,
                                       image_maskedToRoi_array)
def main(args):
    # First thing, set session in the selected(s) devices: CPU or GPU
    set_session_in_selected_device(use_GPU_device=True,
                                   type_GPU_installed=args.typeGPUinstalled)

    # ---------- SETTINGS ----------
    nameInputRoiMasksRelPath = 'Lungs_Full'
    nameReferenceImgRelPath = 'Images_Full'

    # Get the file list:
    nameImagesFiles = 'images*' + getFileExtension(FORMATTRAINDATA)
    nameGroundTruthFiles = 'grndtru*' + getFileExtension(FORMATTRAINDATA)

    nameInputRoiMasksFiles = '*_lungs.nii.gz'
    nameReferenceImgFiles = '*.nii.gz'

    nameRescaleFactors = 'rescaleFactors_images.npy'
    nameCropBoundingBoxes = 'cropBoundingBoxes_images.npy'

    nameOutputPredictionFiles = 'predict-probmaps_%s.nii.gz'

    if (args.saveFeatMapsLayers):
        nameOutputFeatureMapsDirs = 'featureMaps-%s_lay_%s'
        nameOutputFeatureMapsFiles = 'featmaps-%s_lay_%s_map%0.2i.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    TestingDataPath = workDirsManager.getNameExistPath(
        workDirsManager.getNameDataPath(args.typedata))
    ReferenceImgPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameReferenceImgRelPath)
    ModelsPath = workDirsManager.getNameExistPath(args.basedir, args.modelsdir)
    OutputPredictionPath = workDirsManager.getNameNewPath(
        args.basedir, args.predictionsdir)

    listTestImagesFiles = findFilesDir(TestingDataPath, nameImagesFiles)
    listTestGroundTruthFiles = findFilesDir(TestingDataPath,
                                            nameGroundTruthFiles)
    listReferenceImgsFiles = findFilesDirAndCheck(ReferenceImgPath,
                                                  nameReferenceImgFiles)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        listInputRoiMasksFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                      nameInputRoiMasksFiles)

    if (args.rescaleImages):
        dict_rescaleFactors = readDictionary(
            joinpathnames(BaseDataPath, nameRescaleFactors))

    if (args.cropImages):
        dict_cropBoundingBoxes = readDictionary(
            joinpathnames(BaseDataPath, nameCropBoundingBoxes))

    test_images_generator = getImagesDataGenerator3D(
        args.slidingWindowImages, args.prop_overlap_Z_X_Y,
        args.transformationImages, args.elasticDeformationImages)

    images_reconstructor = getImagesReconstructor3D(
        args.slidingWindowImages,
        args.prop_overlap_Z_X_Y,
        use_TransformationImages=False,
        isfilterImages=args.filterPredictProbMaps,
        prop_valid_outUnet=args.prop_valid_outUnet)

    # LOADING MODEL
    # ----------------------------------------------
    print("-" * 30)
    print("Loading saved model...")
    print("-" * 30)

    if TYPE_DNNLIBRARY_USED == 'Keras':
        print(
            "Loading full model: weights, optimizer, loss, metrics ... and restarting..."
        )
        modelSavedPath = joinpathnames(
            ModelsPath, 'model_' + args.prediction_modelFile + '.hdf5')
        print("Restarting from file: \'%s\'..." % (modelSavedPath))

        loss_fun = DICTAVAILLOSSFUNS(
            args.lossfun, is_masks_exclude=args.masksToRegionInterest).loss
        metrics = [
            DICTAVAILMETRICFUNS(imetrics,
                                is_masks_exclude=args.masksToRegionInterest).
            get_renamed_compute() for imetrics in args.listmetrics
        ]
        custom_objects = dict(
            map(lambda fun: (fun.__name__, fun), [loss_fun] + metrics))
        # load and compile model
        model = NeuralNetwork.get_load_saved_model(
            modelSavedPath, custom_objects=custom_objects)

        # output model summary
        model.summary()

    elif TYPE_DNNLIBRARY_USED == 'Pytorch':
        print(
            "Loading full model: weights, optimizer, loss, metrics ... and restarting..."
        )
        modelSavedPath = joinpathnames(
            ModelsPath, 'model_' + args.prediction_modelFile + '.pt')
        print("Restarting from file: \'%s\'..." % (modelSavedPath))
        # load and compile model
        trainer = Trainer.load_model_full(modelSavedPath)

        # output model summary
        trainer.get_summary_model()

    if (args.saveFeatMapsLayers):
        if TYPE_DNNLIBRARY_USED == 'Keras':
            visual_model_params = VisualModelParams(model, IMAGES_DIMS_Z_X_Y)
            if args.firstSaveFeatMapsLayers:
                get_index_featmap = lambda i: args.firstSaveFeatMapsLayers + i
            else:
                get_index_featmap = lambda i: i

        elif TYPE_DNNLIBRARY_USED == 'Pytorch':
            message = 'Visualize a model feature maps still not implemented...'
            CatchErrorException(message)
    # ----------------------------------------------

    # START ANALYSIS
    # ----------------------------------------------
    print("-" * 30)
    print("Predicting model...")
    print("-" * 30)

    for ifile, test_xData_file in enumerate(listTestImagesFiles):
        print("\nInput: \'%s\'..." % (basename(test_xData_file)))

        # COMPUTE PREDICTION
        # ------------------------------------------
        print("Loading data...")
        if (args.slidingWindowImages or args.transformationImages):
            if TYPE_DNNLIBRARY_USED == 'Keras':
                test_xData = LoadDataManagerInBatches_DataGenerator(
                    IMAGES_DIMS_Z_X_Y,
                    test_images_generator).loadData_1File(test_xData_file,
                                                          shuffle_images=False)
            elif TYPE_DNNLIBRARY_USED == 'Pytorch':
                test_xData = LoadDataManager.loadData_1File(test_xData_file)
                test_batch_data_generator = TrainingBatchDataGenerator(
                    IMAGES_DIMS_Z_X_Y, [test_xData], [test_xData],
                    test_images_generator,
                    batch_size=1,
                    shuffle=False)
                (test_yData, test_xData) = DataSampleGenerator(
                    IMAGES_DIMS_Z_X_Y, [test_xData], [test_xData],
                    test_images_generator).get_full_data()
        else:
            test_xData = LoadDataManagerInBatches(
                IMAGES_DIMS_Z_X_Y).loadData_1File(test_xData_file)
            test_xData = np.expand_dims(test_xData, axis=0)

        print("Total Data batches generated: %s..." % (len(test_xData)))

        print("Evaluate model...")
        if TYPE_DNNLIBRARY_USED == 'Keras':
            predict_yData = model.predict(test_xData, batch_size=1)
        elif TYPE_DNNLIBRARY_USED == 'Pytorch':
            predict_yData = trainer.predict(test_batch_data_generator)

        if (args.saveFeatMapsLayers):
            print("Compute feature maps of evaluated model...")
            featuremaps_data = visual_model_params.get_feature_maps(
                test_xData,
                args.nameSaveModelLayer,
                max_num_feat_maps=args.maxNumSaveFeatMapsLayers,
                first_feat_maps=args.firstSaveFeatMapsLayers)
        # ------------------------------------------

        # RECONSTRUCT FULL-SIZE PREDICTION
        # ------------------------------------------
        print("Reconstruct prediction to full size...")
        # Assign original images and masks files
        index_refer_img = getIndexOriginImagesFile(basename(test_xData_file),
                                                   beginString='images',
                                                   firstIndex='01')
        reference_img_file = listReferenceImgsFiles[index_refer_img]
        print("Reference image file: \'%s\'..." %
              (basename(reference_img_file)))

        # init reconstructor with size of "ifile"
        predict_fullsize_shape = FileReader.getImageSize(test_xData_file)
        images_reconstructor.complete_init_data(predict_fullsize_shape)

        prediction_array = images_reconstructor.compute(predict_yData)

        if (args.saveFeatMapsLayers):
            featuremaps_array = images_reconstructor.compute(featuremaps_data)

        # reconstruct from cropped / rescaled images
        reference_img_shape = FileReader.getImageSize(reference_img_file)

        if (args.cropImages):
            crop_bounding_box = dict_cropBoundingBoxes[filenamenoextension(
                reference_img_file)]
            print(
                "Predicted data are cropped. Extend array size to original. Bounding-box: \'%s\'..."
                % (str(crop_bounding_box)))

            prediction_array = ExtendImages.compute3D(prediction_array,
                                                      crop_bounding_box,
                                                      reference_img_shape)
            print("Final dims: %s..." % (str(prediction_array.shape)))

        if (args.masksToRegionInterest):
            print("Mask predictions to RoI: lungs...")
            in_roimask_file = listInputRoiMasksFiles[index_refer_img]
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))

            roimask_array = FileReader.getImageArray(in_roimask_file)
            prediction_array = OperationBinaryMasks.reverse_mask_exclude_voxels_fillzero(
                prediction_array, roimask_array)

        if (args.saveFeatMapsLayers):
            print("Reconstruct predicted feature maps to full size...")
            if (args.cropImages):
                num_featmaps = featuremaps_array.shape[-1]
                featuremaps_shape = list(reference_img_shape) + [num_featmaps]
                featuremaps_array = ExtendImages.compute3D(
                    featuremaps_array, crop_bounding_box, featuremaps_shape)

            if (args.masksToRegionInterest):
                featuremaps_array = OperationBinaryMasks.reverse_mask_exclude_voxels_fillzero(
                    featuremaps_array, roimask_array)
        # ------------------------------------------

        out_prediction_file = joinpathnames(
            OutputPredictionPath, nameOutputPredictionFiles %
            (filenamenoextension(reference_img_file)))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_prediction_file), prediction_array.shape))

        FileReader.writeImageArray(out_prediction_file, prediction_array)

        if (args.saveFeatMapsLayers):
            nameOutputFeatureMapsRelPath = nameOutputFeatureMapsDirs % (
                filenamenoextension(reference_img_file),
                args.nameSaveModelLayer)
            OutputFeatureMapsPath = workDirsManager.getNameNewPath(
                OutputPredictionPath, nameOutputFeatureMapsRelPath)

            num_featmaps = featuremaps_array.shape[-1]
            for ifeatmap in range(num_featmaps):
                out_featuremaps_file = joinpathnames(
                    OutputFeatureMapsPath, nameOutputFeatureMapsFiles %
                    (filenamenoextension(reference_img_file),
                     args.nameSaveModelLayer, get_index_featmap(ifeatmap) + 1))
                print("Output: \'%s\', of dims \'%s\'..." %
                      (basename(out_featuremaps_file),
                       featuremaps_array[..., ifeatmap].shape))

                FileReader.writeImageArray(out_featuremaps_file,
                                           featuremaps_array[..., ifeatmap])
Esempio n. 14
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'ProcImages'
    nameInputMasksRelPath = 'ProcMasks'

    namePredictMasksFiles = 'predict_binmasks*thres0-5_withtraquea.nii.gz'
    nameInputImagesFiles = '*.nii.gz'
    nameInputMasksFiles = '*outerwall*traquea.nii.gz'

    # template search files
    tempSearchInputFiles = 'av[0-9]*'

    # create file to save FROC values
    temp_outfilename = '%s_video.gif'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPredictMasksPath = workDirsManager.getNameExistPath(
        args.basedir, args.predictionsdir)
    InputImagesPath = workDirsManager.getNameExistPath(args.basedir,
                                                       nameInputImagesRelPath)
    InputMasksPath = workDirsManager.getNameExistPath(args.basedir,
                                                      nameInputMasksRelPath)
    OutputPath = workDirsManager.getNameNewPath(args.basedir, 'movies_results')

    listPredictMasksFiles = findFilesDir(InputPredictMasksPath,
                                         namePredictMasksFiles)
    listImagesCTFiles = findFilesDir(InputImagesPath, nameInputImagesFiles)
    listGrndTruthMasksFiles = findFilesDir(InputMasksPath, nameInputMasksFiles)

    nbPredictionsFiles = len(listPredictMasksFiles)
    nbImagesCTFiles = len(listImagesCTFiles)
    nbGrndTruthMasksFiles = len(listGrndTruthMasksFiles)

    # Run checkers
    if (nbPredictionsFiles == 0):
        message = "0 Predictions found in dir \'%s\'" % (InputPredictMasksPath)
        CatchErrorException(message)
    if (nbImagesCTFiles == 0):
        message = "0 Images CT found in dir \'%s\'" % (InputImagesPath)
        CatchErrorException(message)
    if (nbGrndTruthMasksFiles == 0):
        message = "0 Ground-truth Masks found in dir \'%s\'" % (InputMasksPath)
        CatchErrorException(message)

    for i, predict_masks_file in enumerate(listPredictMasksFiles):

        print('\'%s\'...' % (predict_masks_file))

        name_prefix_case = getExtractSubstringPattern(
            basename(predict_masks_file), tempSearchInputFiles)

        for iterfile_1, iterfile_2 in zip(listImagesCTFiles,
                                          listGrndTruthMasksFiles):
            if name_prefix_case in iterfile_1:
                images_CT_file = iterfile_1
                grndtruth_masks_file = iterfile_2
        #endfor
        print("assigned to '%s' and '%s'..." %
              (basename(images_CT_file), basename(grndtruth_masks_file)))

        predict_masks_array = FileReader.getImageArray(predict_masks_file)
        images_CT_array = FileReader.getImageArray(images_CT_file)
        grndtruth_masks_array = FileReader.getImageArray(grndtruth_masks_file)

        if (args.invertImageAxial):
            predict_masks_array = FlippingImages.compute(predict_masks_array,
                                                         axis=0)
            images_CT_array = FlippingImages.compute(images_CT_array, axis=0)
            grndtruth_masks_array = FlippingImages.compute(
                grndtruth_masks_array, axis=0)

        print("Rendering animations...")
        list_frames = []

        for i in range(images_CT_array.shape[0]):

            images_CT_slice = images_CT_array[i, :, :]
            grndtruth_masks_slice = grndtruth_masks_array[i, :, :]
            predict_masks_slice = predict_masks_array[i, :, :]

            frame_image_CT = (
                images_CT_slice - np.min(images_CT_slice)
            ) / float(np.max(images_CT_slice) - np.min(images_CT_slice))

            frame_new = np.zeros(
                (images_CT_slice.shape[0], images_CT_slice.shape[1], 3),
                dtype=np.uint8)

            frame_new[:, :, :] = 255 * frame_image_CT[:, :, None]

            if (TYPE_ANIMATION == '1'):

                index_frame_TP_mask = np.argwhere(grndtruth_masks_slice *
                                                  predict_masks_slice)
                index_frame_FN_mask = np.argwhere(grndtruth_masks_slice *
                                                  (1.0 - predict_masks_slice))
                index_frame_FP_mask = np.argwhere(
                    (1.0 - grndtruth_masks_slice) * predict_masks_slice)

                # paint True Positives, False Negatives and False Positives in yellow, blue and red colour, respectively
                for index in index_frame_TP_mask:
                    frame_new[tuple(index)] = YELLOW_COLOR
                for index in index_frame_FN_mask:
                    frame_new[tuple(index)] = BLUE_COLOR
                for index in index_frame_FP_mask:
                    frame_new[tuple(index)] = RED_COLOR

                is_valid_frame = len(index_frame_TP_mask) > 0 or len(
                    index_frame_FN_mask) > 0 or len(index_frame_FP_mask) > 0

            elif (TYPE_ANIMATION == '2'):

                index_frame_predict_bound_mask = skimage.segmentation.find_boundaries(
                    predict_masks_slice)
                index_frame_grndtruth_bound_mask = skimage.segmentation.find_boundaries(
                    grndtruth_masks_slice)

                # draw boundaries of prediction / ground-truth masks with green / red colour, respectively
                for index in index_frame_predict_bound_mask:
                    frame_new[tuple(index)] = GREEN_COLOR
                for index in index_frame_grndtruth_bound_mask:
                    frame_new[tuple(index)] = RED_COLOR

                is_valid_frame = len(
                    index_frame_predict_bound_mask) > 0 or len(
                        index_frame_grndtruth_bound_mask) > 0

            # skip frames that do not contain any predictions and/or ground-truth masks
            if is_valid_frame:
                list_frames.append(frame_new)

        if len(list_frames) > 0:
            print("Good movie...")
            outfilename = joinpathnames(OutputPath,
                                        temp_outfilename % (name_prefix_case))
            imageio.mimsave(outfilename, list_frames, fps=20)
Esempio n. 15
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesFullRelPath    = 'RawImages_Full'
    nameInputImagesCroppedRelPath = 'RawImages_Cropped'

    nameImagesFiles = '*.dcm'
    #test_range_boundbox = ((16, 352), (109, 433), (45, 460))
    _eps = 1.0e-06
    _alpha_relax = 0.6
    _z_min_top = 15
    _z_numtest = 10

    nameTempOutResFile = 'temp_found_boundBoxes_vol16.csv'
    nameOutResultFileNPY = 'found_boundBoxes_vol16.npy'
    nameOutResultFileCSV = 'found_boundBoxes_vol16.csv'
    # ---------- SETTINGS ----------


    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputFullImagesPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputImagesFullRelPath)
    InputCroppedImagesPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputImagesCroppedRelPath)

    listInputFullImagesFiles = findFilesDirAndCheck(InputFullImagesPath, nameImagesFiles)
    listInputCroppedImagesFiles = findFilesDirAndCheck(InputCroppedImagesPath, nameImagesFiles)

    dict_found_boundingBoxes = {}

    nameTempOutResFile = joinpathnames(BaseDataPath, nameTempOutResFile)

    fout = open(nameTempOutResFile, 'w')


    for in_full_image_file, in_cropped_image_file in zip(listInputFullImagesFiles, listInputCroppedImagesFiles):
        print("\nInput: \'%s\'..." %(basename(in_full_image_file)))
        print("And: \'%s\'..." %(basename(in_cropped_image_file)))

        full_image_array = FileReader.getImageArray(in_full_image_file)
        cropped_image_array = FileReader.getImageArray(in_cropped_image_file)
        cropped_image_array = FlippingImages.compute(cropped_image_array, axis=0)

        full_image_shape = np.array(full_image_array.shape)
        cropped_image_shape = np.array(cropped_image_array.shape)
        test_range_boundbox = compute_test_range_boundbox(full_image_shape,
                                                          cropped_image_shape,
                                                          alpha_relax=_alpha_relax,
                                                          z_min_top=_z_min_top,
                                                          z_numtest=_z_numtest)

        test_range_boundbox_shape = BoundingBoxes.compute_size_bounding_box(test_range_boundbox)
        if (test_range_boundbox_shape < cropped_image_array.shape):
            message = 'size test range of Bounding Boxes than cropped Image: \'%s\' < \'%s\'...' %(test_range_boundbox_shape,
                                                                                                   cropped_image_array.shape)
            CatchErrorException(message)
        else:
            test_range_boundbox_shape = np.array(test_range_boundbox_shape)

        (num_test_boundbox, num_tests_total) = compute_num_tests_boundbox(test_range_boundbox_shape,
                                                                          cropped_image_array)
        print("size full image: \'%s\'..." %(full_image_shape))
        print("size cropped image: \'%s\'..." %(cropped_image_shape))
        print("test range bounding boxes: \'%s\'..." %(test_range_boundbox))
        print("size test range bounding boxes: \'%s\'..." %(test_range_boundbox_shape))
        print("num test bounding boxes: \'%s\'..." %(num_test_boundbox))
        print("num tests total: \'%s\'..." %(num_tests_total))


        flag_found_boundbox = False
        min_sum_test_res = 1.0e+10
        found_boundbox = None
        counter = 1
        for k in range(num_test_boundbox[0]):
            (z0, zm) = get_limits_test_boundbox(test_range_boundbox[0], cropped_image_shape[0], k, option='start_end')
            for j in range(num_test_boundbox[1]):
                (y0, ym) = get_limits_test_boundbox(test_range_boundbox[1], cropped_image_shape[1], j, option='start_begin')
                for i in range(num_test_boundbox[2]):
                    (x0, xm) = get_limits_test_boundbox(test_range_boundbox[2], cropped_image_shape[2], i, option='start_begin')
                    #print("test \"%s\" of \"%s\"..." %(counter, num_tests_total))
                    #counter = counter + 1
                    test_bounding_box = ((z0,zm),(y0,ym),(x0,xm))
                    #print("test bounding box: %s..." %(test_bounding_box))
                    test_res_matrix = full_image_array[test_bounding_box[0][0]:test_bounding_box[0][1],
                                                       test_bounding_box[1][0]:test_bounding_box[1][1],
                                                       test_bounding_box[2][0]:test_bounding_box[2][1]] - cropped_image_array
                    sum_test_res = np.abs(np.sum(test_res_matrix))
                    if (sum_test_res <_eps):
                        flag_found_boundbox = True
                        min_sum_test_res = 0.0
                        found_boundbox = test_bounding_box
                        break
                    elif (sum_test_res < min_sum_test_res):
                        min_sum_test_res = sum_test_res
                        found_boundbox = test_bounding_box
                if (flag_found_boundbox):
                    break
            if (flag_found_boundbox):
                break
                #endfor
            #endfor
        #endfor

        if (flag_found_boundbox):
            print("SUCESS: found perfect bounding-box: \'%s\', with null error: \'%s\'..." % (str(found_boundbox), sum_test_res))
            rootimagescroppedname = filenamenoextension(in_cropped_image_file)
            dict_found_boundingBoxes[rootimagescroppedname] = found_boundbox
            message = "%s,\"%s\"\n" %(rootimagescroppedname, str(found_boundbox))
            fout.write(message)
        else:
            print("ERROR: not found perfect bounding-box. Closest found is: \'%s\', with error: \'%s\'..." % (str(found_boundbox), min_sum_test_res))
            rootimagescroppedname = filenamenoextension(in_cropped_image_file)
            dict_found_boundingBoxes[rootimagescroppedname] = found_boundbox
            message = "%s,\"%s\" ...NOT PERFECT...\n" % (rootimagescroppedname, str(found_boundbox))
            fout.write(message)
    #endfor


    # Save dictionary in csv file
    nameoutfile = joinpathnames(BaseDataPath, nameOutResultFileNPY)
    saveDictionary(nameoutfile, dict_found_boundingBoxes)
    nameoutfile = joinpathnames(BaseDataPath, nameOutResultFileCSV)
    saveDictionary_csv(nameoutfile, dict_found_boundingBoxes)

    fout.close()
Esempio n. 16
0
def main(args):

    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'ProcImagesExperData'
    nameInputMasksRelPath = 'ProcMasksExperData'
    nameVisualImagesRelPath = 'VisualInputData'

    # Get the file list:
    nameInImagesFiles = '*.npy'
    nameInMasksFiles = '*.npy'
    nameOutImagesFiles_type1 = 'visualImages-%0.2i_dim%s.nii.gz'
    nameOutMasksFiles_type1 = 'visualMasks-%0.2i_dim%s.nii.gz'
    nameOutImagesFiles_type2 = 'visualImages-%0.2i_dim%s-batch%0.2i.nii.gz'
    nameOutMasksFiles_type2 = 'visualMasks-%0.2i_dim%s-batch%0.2i.nii.gz'
    nameOutImagesFiles_type3 = 'visualImages-%0.2i_dim%s-batch%s.nii.gz'
    nameOutMasksFiles_type3 = 'visualMasks-%0.2i_dim%s-batch%s.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameInputImagesRelPath)
    InputMasksPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                      nameInputMasksRelPath)
    VisualImagesPath = workDirsManager.getNameNewPath(args.basedir,
                                                      nameVisualImagesRelPath)

    listImagesFiles = findFilesDir(InputImagesPath, nameInImagesFiles)
    listMasksFiles = findFilesDir(InputMasksPath, nameInMasksFiles)

    nbImagesFiles = len(listImagesFiles)
    nbMasksFiles = len(listMasksFiles)

    # Run checkers
    if (nbImagesFiles == 0):
        message = "0 Images found in dir \'%s\'" % (InputImagesPath)
        CatchErrorException(message)
    if (nbImagesFiles != nbMasksFiles):
        message = "num CTs Images %i not equal to num Masks %i" % (
            nbImagesFiles, nbMasksFiles)
        CatchErrorException(message)

    for i, (images_file,
            masks_file) in enumerate(zip(listImagesFiles, listMasksFiles)):

        print('\'%s\'...' % (images_file))
        print('\'%s\'...' % (masks_file))

        images_array = FileReader.getImageArray(images_file)
        masks_array = FileReader.getImageArray(masks_file)

        if (images_array.shape != masks_array.shape):
            message = "size of Images and Masks not equal: %s != %s" % (
                images_array.shape, masks_array.shape)
            CatchErrorException(message)
        print("Original image of size: %s..." % (str(images_array.shape)))

        if (args.createImagesBatches):
            shape_batches = images_array.shape[1:]
            print(
                "Input images data stored as batches of size %s. Visualize batches..."
                % (str(shape_batches)))

            images_generator = getImagesVolumeTransformator3D(
                images_array, args.transformationImages,
                args.elasticDeformationImages)

            (visual_images_array,
             visual_masks_array) = images_generator.get_images_array(
                 images_array, masks_array=masks_array)

            for j, (batch_images_array, batch_masks_array) in enumerate(
                    zip(visual_images_array, visual_masks_array)):

                out_images_filename = joinpathnames(
                    VisualImagesPath, nameOutImagesFiles_type2 %
                    (i + 1, tuple2str(visual_images_array.shape[1:]), j + 1))
                out_masks_filename = joinpathnames(
                    VisualImagesPath, nameOutMasksFiles_type2 %
                    (i + 1, tuple2str(visual_masks_array.shape[1:]), j + 1))

                FileReader.writeImageArray(out_images_filename,
                                           batch_images_array)
                FileReader.writeImageArray(out_masks_filename,
                                           batch_masks_array)
            # endfor
        else:
            if (args.visualProcDataInBatches):
                print(
                    "Input images data stored as volume. Generate batches of size %s. Visualize batches..."
                    % (str(IMAGES_DIMS_Z_X_Y)))

                images_generator = getImagesDataGenerator3D(
                    args.slidingWindowImages, args.prop_overlap_Z_X_Y,
                    args.transformationImages, args.elasticDeformationImages)

                batch_data_generator = BatchDataGenerator_2Arrays(
                    IMAGES_DIMS_Z_X_Y,
                    images_array,
                    masks_array,
                    images_generator,
                    size_batch=1,
                    shuffle=False)
                num_batches_total = len(batch_data_generator)
                print(
                    "Generate total %s batches by sliding-window, with coordinates:..."
                    % (num_batches_total))

                for j in range(num_batches_total):
                    coords_sliding_window_box = images_generator.slidingWindow_generator.get_limits_image(
                        j)

                    (visual_images_array,
                     visual_masks_array) = next(batch_data_generator)

                    visual_images_array = np.squeeze(visual_images_array,
                                                     axis=0)
                    visual_masks_array = np.squeeze(visual_masks_array, axis=0)

                    out_images_filename = joinpathnames(
                        VisualImagesPath, nameOutImagesFiles_type3 %
                        (i + 1, tuple2str(visual_images_array.shape),
                         tuple2str(coords_sliding_window_box)))
                    out_masks_filename = joinpathnames(
                        VisualImagesPath, nameOutMasksFiles_type3 %
                        (i + 1, tuple2str(visual_masks_array.shape),
                         tuple2str(coords_sliding_window_box)))

                    FileReader.writeImageArray(out_images_filename,
                                               visual_images_array)
                    FileReader.writeImageArray(out_masks_filename,
                                               visual_masks_array)
                # endfor
            else:
                print(
                    "Input images data stored as volume of size %s. Visualize volume..."
                    % (str(images_array.shape)))

                images_generator = getImagesVolumeTransformator3D(
                    images_array.shape, args.transformationImages,
                    args.elasticDeformationImages)

                (visual_images_array,
                 visual_masks_array) = images_generator.get_images_array(
                     images_array, masks_array=masks_array)

                out_images_filename = joinpathnames(
                    VisualImagesPath, nameOutImagesFiles_type1 %
                    (i + 1, tuple2str(visual_images_array.shape)))
                out_masks_filename = joinpathnames(
                    VisualImagesPath, nameOutMasksFiles_type1 %
                    (i + 1, tuple2str(visual_masks_array.shape)))

                FileReader.writeImageArray(out_images_filename,
                                           visual_images_array)
                FileReader.writeImageArray(out_masks_filename,
                                           visual_masks_array)
def main(args):
    # ---------- SETTINGS ----------
    nameInputMasksRelPath = 'ProcMasks'
    nameCentrelinesRelPath = 'ProcAllMasks_3b_i5'

    # Get the file list:
    namePredictionsFiles = 'predict_probmaps*.nii.gz'
    nameInputMasksFiles = '*outerwall.nii.gz'
    nameCentrelinesFiles = '*centrelines.nii.gz'

    # template search files
    tempSearchInputFiles = 'av[0-9]*'

    # create file to save FROC values
    temp_outfilename = '%s_ROCsensTPspecFP_NEW.txt'

    # parameters
    nbr_of_thresholds = 8
    range_threshold = [-10, -3]
    #thresholds_list = (np.linspace(range_threshold[0], range_threshold[1], nbr_of_thresholds)).tolist()
    thresholds_list = (np.logspace(range_threshold[0], range_threshold[1],
                                   nbr_of_thresholds)).tolist()
    # thresholds_list += [1.0 - elem for elem in reversed(thresholds_list)]
    # nbr_of_thresholds *= 2
    allowedDistance = 0
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPredictDataPath = workDirsManager.getNameExistPath(
        args.basedir, args.predictionsdir)
    InputMasksPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                      nameInputMasksRelPath)
    CentrelinesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameCentrelinesRelPath)

    listPredictionsFiles = findFilesDir(InputPredictDataPath,
                                        namePredictionsFiles)
    listGrndTruthMasksFiles = findFilesDir(InputMasksPath, nameInputMasksFiles)
    listCentrelinesFiles = findFilesDir(CentrelinesPath, nameCentrelinesFiles)

    nbPredictionFiles = len(listPredictionsFiles)

    # Run checkers
    if (nbPredictionFiles == 0):
        message = "0 Predictions found in dir \'%s\'" % (InputPredictDataPath)
        CatchErrorException(message)

    threshold_listcases = np.zeros((nbr_of_thresholds, nbPredictionFiles))
    sensitivity_listcases = np.zeros((nbr_of_thresholds, nbPredictionFiles))
    FPaverage_listcases = np.zeros((nbr_of_thresholds, nbPredictionFiles))
    completeness_listcases = np.zeros((nbr_of_thresholds, nbPredictionFiles))
    volumeleakage_listcases = np.zeros((nbr_of_thresholds, nbPredictionFiles))
    dice_coeff_listcases = np.zeros((nbr_of_thresholds, nbPredictionFiles))

    print("IMPORTANT: List of Threshold Values: %s" % (thresholds_list))

    for i, predict_probmaps_file in enumerate(listPredictionsFiles):

        print('\'%s\'...' % (predict_probmaps_file))

        name_prefix_case = getExtractSubstringPattern(
            basename(predict_probmaps_file), tempSearchInputFiles)

        for iter1_file, iter2_file in zip(listGrndTruthMasksFiles,
                                          listCentrelinesFiles):
            if name_prefix_case in iter1_file:
                grndtruth_masks_file = iter1_file
                centrelines_file = iter2_file
                break
        #endfor
        print("assigned to \'%s\' and \'%s\'..." %
              (basename(grndtruth_masks_file), basename(centrelines_file)))

        predict_probmaps_array = FileReader.getImageArray(
            predict_probmaps_file)
        grndtruth_masks_array = FileReader.getImageArray(grndtruth_masks_file)
        centrelines_array = FileReader.getImageArray(centrelines_file)

        print("Predictions masks array of size: \'%s\'..." %
              (str(predict_probmaps_array.shape)))

        # need to convert to lists for FROC methods
        predict_probmaps_array = np.expand_dims(predict_probmaps_array, axis=0)
        grndtruth_masks_array = np.expand_dims(grndtruth_masks_array, axis=0)
        centrelines_array = np.expand_dims(centrelines_array, axis=0)

        # compute FROC: sensitivity-specificity
        print("computing FROC: sensitivity-specificity...")
        sensitivity_list, FPaverage_list = computeFROC(predict_probmaps_array,
                                                       grndtruth_masks_array,
                                                       allowedDistance,
                                                       thresholds_list)
        print("...done")

        # compute ROC: completeness-volume leakage
        print("computing ROC: completeness-volume leakage...")
        completeness_list, volumeleakage_list, dice_coeff_list = computeROC_Completeness_VolumeLeakage(
            predict_probmaps_array, grndtruth_masks_array, centrelines_array,
            thresholds_list)
        print("...done")

        out_filename = joinpathnames(InputPredictDataPath,
                                     temp_outfilename % (name_prefix_case))
        fout = open(out_filename, 'w')

        strheader = '/threshold/ /sensitivity/ /FPaverage/ /completeness/ /volume_leakage/ /dice_coeff/' + '\n'
        fout.write(strheader)

        for threshold, sensitivity, FPaverage, completeness, volumeleakage, dice_coeff in zip(
                thresholds_list, sensitivity_list, FPaverage_list,
                completeness_list, volumeleakage_list, dice_coeff_list):
            strdata = str(threshold) + ' ' + str(sensitivity) + ' ' + str(
                FPaverage) + ' ' + str(completeness) + ' ' + str(
                    volumeleakage) + ' ' + str(dice_coeff) + '\n'
            fout.write(strdata)
        #endfor

        fout.close()

        #store to compute average values over all cases
        threshold_listcases[:, i] = thresholds_list
        sensitivity_listcases[:, i] = sensitivity_list
        FPaverage_listcases[:, i] = FPaverage_list
        completeness_listcases[:, i] = completeness_list
        volumeleakage_listcases[:, i] = volumeleakage_list
        dice_coeff_listcases[:, i] = dice_coeff_list