def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'RawReferResults'
    nameInputReferImagesRelPath = 'Images_Full'
    nameOutputImagesRelPath = 'RawReferResults_Orig_Full'

    nameInputImagesFiles = '*.nii.gz'
    nameInputReferImagesFiles = '*.nii.gz'
    # prefixPatternInputFiles = 'vol[0-9][0-9]_*'
    nameBoundingBoxes = 'found_boundingBoxes_original.npy'
    nameOutputImagesFiles = lambda in_name: filenamenoextension(in_name) + '.nii.gz'
    # ---------- SETTINGS ----------


    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputImagesRelPath)
    InputReferImagesPath = workDirsManager.getNameExistPath(BaseDataPath, nameInputReferImagesRelPath)
    OutputImagesPath = workDirsManager.getNameNewPath(BaseDataPath, nameOutputImagesRelPath)

    listInputImagesFiles = findFilesDirAndCheck(InputImagesPath, nameInputImagesFiles)
    listInputReferImagesFiles = findFilesDirAndCheck(InputReferImagesPath, nameInputReferImagesFiles)

    dict_bounding_boxes = readDictionary(joinpathnames(BaseDataPath, nameBoundingBoxes))



    for i, in_image_file in enumerate(listInputImagesFiles):
        print("\nInput: \'%s\'..." % (basename(in_image_file)))

        in_referimage_file = findFileWithSamePrefix(basename(in_image_file), listInputReferImagesFiles,
                                                    prefix_pattern='vol[0-9][0-9]_')
        print("Refer image file: \'%s\'..." % (basename(in_referimage_file)))
        bounding_box = dict_bounding_boxes[filenamenoextension(in_referimage_file)]

        cropped_image_array = FileReader.getImageArray(in_image_file)
        print("Input cropped image size: \'%s\'..." %(str(cropped_image_array.shape)))

        # 1 step: invert image
        cropped_image_array = FlippingImages.compute(cropped_image_array, axis=0)
        # 2 step: extend image
        full_image_shape = FileReader.getImageSize(in_referimage_file)
        full_image_array = ExtendImages.compute3D(cropped_image_array, bounding_box, full_image_shape)
        print("Output full image size: \'%s\'..." % (str(full_image_array.shape)))

        out_image_file = joinpathnames(OutputImagesPath, nameOutputImagesFiles(in_image_file))
        print("Output: \'%s\', of dims \'%s\'..." %(basename(out_image_file), str(full_image_array.shape)))

        FileReader.writeImageArray(out_image_file, full_image_array)
def main(args):
    # ---------- SETTINGS ----------
    nameInputRelPath = '<input_dir>'
    nameOutputRelPath = '<output_dir>'

    nameInputFiles = '*.nii.gz'

    def nameOutputFiles(in_name):
        return in_name.replace('<suffix_ini>', '<suffix_end>')

    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(BASEDIR)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                 nameInputRelPath)
    OutputPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                nameOutputRelPath)

    listInputFiles = findFilesDirAndCheck(InputPath, nameInputFiles)

    for i, in_file in enumerate(listInputFiles):
        print("\nInput: \'%s\'..." % (basename(in_file)))

        in_array = FileReader.getImageArray(in_file)

        # ...
        # write here the code
        # ...

        out_array = None

        out_file = joinpathnames(OutputPath,
                                 nameOutputFiles(basename(in_file)))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_file), str(out_array.shape)))

        FileReader.writeImageArray(out_file, out_array)
Exemplo n.º 3
0
def main(args):

    # ---------- SETTINGS ----------
    nameRawImagesRelPath = 'RawImages'
    nameComputeMasksRelPath = 'ProbNnetoutMasks'

    # Get the file list:
    nameImagesFiles = '*.dcm'
    nameOutMasksFiles = lambda in_name: filenamenoextension(
        in_name) + '_probnnetout.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameRawImagesRelPath)
    ComputeMasksPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                      nameComputeMasksRelPath)

    listImagesFiles = findFilesDir(InputImagesPath, nameImagesFiles)
    nbImagesFiles = len(listImagesFiles)

    # Retrieve training model
    modelConstructor = DICTAVAILNETWORKS3D(IMAGES_DIMS_Z_X_Y, args.model)
    modelConstructor.type_padding = 'valid'

    if args.size_out_nnet == None:
        args.size_out_nnet = modelConstructor.get_size_output_full_Unet()

    print(
        "For input images of size: %s; Output of Neural Networks_Keras are images of size: %s..."
        % (IMAGES_DIMS_Z_X_Y, args.size_out_nnet))

    for images_file in listImagesFiles:

        print('\'%s\'...' % (images_file))

        images_array = FileReader.getImageArray(images_file)

        if (args.invertImageAxial):
            images_array = FlippingImages.compute(images_array, axis=0)

        print("Compute masks proportion output...")

        if (args.slidingWindowImages):

            images_reconstructor = SlidingWindowReconstructorImages3D(
                IMAGES_DIMS_Z_X_Y,
                images_array.shape,
                args.prop_overlap_Z_X_Y,
                size_outUnet_sample=args.size_out_nnet)
        else:
            images_reconstructor = SlidingWindowReconstructorImages3D(
                IMAGES_DIMS_Z_X_Y,
                images_array.shape, (0.0, 0.0, 0.0),
                size_outUnet_sample=args.size_out_nnet)

        masks_probValidConvNnet_output_array = images_reconstructor.get_filtering_map_array(
        )

        out_masksFilename = joinpathnames(ComputeMasksPath,
                                          nameOutMasksFiles(images_file))

        FileReader.writeImageArray(out_masksFilename,
                                   masks_probValidConvNnet_output_array)
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'Images_Full'
    nameInputGroundTruthRelPath = 'Airways_DistTrans_Full'
    nameInputRoiMasksRelPath = 'Airways_DilatedMasks_Full'
    nameOutputImagesRelPath = 'Images_WorkData_2'
    nameOutputGroundTruthRelPath = 'LumenDistTransClipped_WorkData'

    nameInputImagesFiles = '*.nii.gz'
    nameInputGroundTruthFiles = '*dist_clipdila_normal_power2.nii.gz'
    nameInputRoiMasksFiles = '*_dilated10.nii.gz'

    nameRescaleFactors = 'rescaleFactors_images.npy'
    nameCropBoundingBoxes = 'cropBoundingBoxes_images.npy'

    nameOutputImageFiles = 'images-%0.2i_dim%s' + getFileExtension(
        FORMATTRAINDATA)
    nameOutputGroundTruthFiles = 'grndtru-%0.2i_dim%s' + getFileExtension(
        FORMATTRAINDATA)

    if (args.saveVisualizeProcData):
        nameVisualOutputRelPath = 'VisualizeWorkData'
        nameOutputVisualFiles = lambda filename: basename(filename).replace(
            '.npz', '.nii.gz')
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameInputImagesRelPath)
    InputGroundTruthPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameInputGroundTruthRelPath)
    OutputImagesPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                      nameOutputImagesRelPath)
    OutputGroundTruthPath = workDirsManager.getNameNewPath(
        BaseDataPath, nameOutputGroundTruthRelPath)

    if (args.saveVisualizeProcData):
        VisualOutputPath = workDirsManager.getNameNewPath(
            BaseDataPath, nameVisualOutputRelPath)

    listInputImagesFiles = findFilesDirAndCheck(InputImagesPath,
                                                nameInputImagesFiles)
    listInputGroundTruthFiles = findFilesDirAndCheck(
        InputGroundTruthPath, nameInputGroundTruthFiles)

    if (len(listInputImagesFiles) != len(listInputGroundTruthFiles)):
        message = 'num files in dir 1 \'%s\', not equal to num files in dir 2 \'%i\'...' % (
            len(listInputImagesFiles), len(listInputGroundTruthFiles))
        CatchErrorException(message)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        listInputRoiMaskFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                     nameInputRoiMasksFiles)

    if (args.rescaleImages):
        dict_rescaleFactors = readDictionary(
            joinpathnames(BaseDataPath, nameRescaleFactors))

    if (args.cropImages):
        dict_cropBoundingBoxes = readDictionary(
            joinpathnames(BaseDataPath, nameCropBoundingBoxes))

    # START ANALYSIS
    # ------------------------------
    print("-" * 30)
    print("Preprocessing...")
    print("-" * 30)

    for i, (in_image_file, in_grndtru_file) in enumerate(
            zip(listInputImagesFiles, listInputGroundTruthFiles)):
        print("\nInput: \'%s\'..." % (basename(in_image_file)))
        print("And: \'%s\'..." % (basename(in_grndtru_file)))

        (image_array, grndtru_array) = FileReader.get2ImageArraysAndCheck(
            in_image_file, in_grndtru_file)
        print("Original dims : \'%s\'..." % (str(image_array.shape)))

        if (args.isClassificationData):
            print("Convert to binary masks (0, 1)...")
            grndtru_array = OperationBinaryMasks.process_masks(grndtru_array)

        if (args.masksToRegionInterest):
            print("Mask input to RoI: lungs...")
            in_roimask_file = listInputRoiMaskFiles[i]
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))

            roimask_array = FileReader.getImageArray(in_roimask_file)
            grndtru_array = OperationBinaryMasks.apply_mask_exclude_voxels(
                grndtru_array, roimask_array)

        if (args.rescaleImages):
            rescale_factor = dict_rescaleFactors[filenamenoextension(
                in_image_file)]
            print("Rescale image with a factor: \'%s\'..." %
                  (str(rescale_factor)))

            image_array = RescaleImages.compute3D(image_array, rescale_factor)
            grndtru_array = RescaleImages.compute3D(grndtru_array,
                                                    rescale_factor)
            print("Final dims: %s..." % (str(image_array.shape)))

        if (args.cropImages):
            crop_bounding_box = dict_cropBoundingBoxes[filenamenoextension(
                in_image_file)]
            print("Crop image to bounding-box: \'%s\'..." %
                  (str(crop_bounding_box)))

            image_array = CropImages.compute3D(image_array, crop_bounding_box)
            grndtru_array = CropImages.compute3D(grndtru_array,
                                                 crop_bounding_box)
            print("Final dims: %s..." % (str(image_array.shape)))

        # if (args.extendSizeImages):
        #     print("Extend images to fixed size \'%s\':..." %(str(CROPSIZEBOUNDINGBOX)))
        #     size_new_image = (image_array.shape[0], CROPSIZEBOUNDINGBOX[0], CROPSIZEBOUNDINGBOX[1])
        #     backgr_val_images = -1000
        #     backgr_val_masks = -1 if args.masksToRegionInterest else 0
        #     bounding_box = dict_bounding_boxes[filenamenoextension(in_image_file)]
        #
        #     image_array = ExtendImages.compute3D(image_array, bounding_box, size_new_image, background_value=backgr_val_images)
        #     grndtru_array = ExtendImages.compute3D(grndtru_array, bounding_box, size_new_image, background_value=backgr_val_masks)
        #     print("Final dims: %s..." % (str(image_array.shape)))

        out_image_file = joinpathnames(
            OutputImagesPath,
            nameOutputImageFiles % (i + 1, tuple2str(image_array.shape)))
        out_grndtru_file = joinpathnames(
            OutputGroundTruthPath, nameOutputGroundTruthFiles %
            (i + 1, tuple2str(grndtru_array.shape)))
        print("Output: \'%s\', of dims \'%s\'..." % (basename(out_image_file),
                                                     (image_array.shape)))
        print("And: \'%s\', of dims \'%s\'..." %
              (basename(out_grndtru_file), str(grndtru_array.shape)))

        FileReader.writeImageArray(out_image_file, image_array)
        FileReader.writeImageArray(out_grndtru_file, grndtru_array)

        if (args.saveVisualizeProcData):
            print("Saving working data to visualize...")
            out_image_file = joinpathnames(
                VisualOutputPath, nameOutputVisualFiles(out_image_file))
            out_grndtru_file = joinpathnames(
                VisualOutputPath, nameOutputVisualFiles(out_grndtru_file))

            FileReader.writeImageArray(out_image_file, image_array)
            FileReader.writeImageArray(out_grndtru_file, grndtru_array)
Exemplo n.º 5
0
def main(args):

    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'ProcImagesExperData'
    nameInputMasksRelPath = 'ProcMasksExperData'
    nameVisualImagesRelPath = 'VisualInputData'

    # Get the file list:
    nameInImagesFiles = '*.npy'
    nameInMasksFiles = '*.npy'
    nameOutImagesFiles_type1 = 'visualImages-%0.2i_dim%s.nii.gz'
    nameOutMasksFiles_type1 = 'visualMasks-%0.2i_dim%s.nii.gz'
    nameOutImagesFiles_type2 = 'visualImages-%0.2i_dim%s-batch%0.2i.nii.gz'
    nameOutMasksFiles_type2 = 'visualMasks-%0.2i_dim%s-batch%0.2i.nii.gz'
    nameOutImagesFiles_type3 = 'visualImages-%0.2i_dim%s-batch%s.nii.gz'
    nameOutMasksFiles_type3 = 'visualMasks-%0.2i_dim%s-batch%s.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameInputImagesRelPath)
    InputMasksPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                      nameInputMasksRelPath)
    VisualImagesPath = workDirsManager.getNameNewPath(args.basedir,
                                                      nameVisualImagesRelPath)

    listImagesFiles = findFilesDir(InputImagesPath, nameInImagesFiles)
    listMasksFiles = findFilesDir(InputMasksPath, nameInMasksFiles)

    nbImagesFiles = len(listImagesFiles)
    nbMasksFiles = len(listMasksFiles)

    # Run checkers
    if (nbImagesFiles == 0):
        message = "0 Images found in dir \'%s\'" % (InputImagesPath)
        CatchErrorException(message)
    if (nbImagesFiles != nbMasksFiles):
        message = "num CTs Images %i not equal to num Masks %i" % (
            nbImagesFiles, nbMasksFiles)
        CatchErrorException(message)

    for i, (images_file,
            masks_file) in enumerate(zip(listImagesFiles, listMasksFiles)):

        print('\'%s\'...' % (images_file))
        print('\'%s\'...' % (masks_file))

        images_array = FileReader.getImageArray(images_file)
        masks_array = FileReader.getImageArray(masks_file)

        if (images_array.shape != masks_array.shape):
            message = "size of Images and Masks not equal: %s != %s" % (
                images_array.shape, masks_array.shape)
            CatchErrorException(message)
        print("Original image of size: %s..." % (str(images_array.shape)))

        if (args.createImagesBatches):
            shape_batches = images_array.shape[1:]
            print(
                "Input images data stored as batches of size %s. Visualize batches..."
                % (str(shape_batches)))

            images_generator = getImagesVolumeTransformator3D(
                images_array, args.transformationImages,
                args.elasticDeformationImages)

            (visual_images_array,
             visual_masks_array) = images_generator.get_images_array(
                 images_array, masks_array=masks_array)

            for j, (batch_images_array, batch_masks_array) in enumerate(
                    zip(visual_images_array, visual_masks_array)):

                out_images_filename = joinpathnames(
                    VisualImagesPath, nameOutImagesFiles_type2 %
                    (i + 1, tuple2str(visual_images_array.shape[1:]), j + 1))
                out_masks_filename = joinpathnames(
                    VisualImagesPath, nameOutMasksFiles_type2 %
                    (i + 1, tuple2str(visual_masks_array.shape[1:]), j + 1))

                FileReader.writeImageArray(out_images_filename,
                                           batch_images_array)
                FileReader.writeImageArray(out_masks_filename,
                                           batch_masks_array)
            # endfor
        else:
            if (args.visualProcDataInBatches):
                print(
                    "Input images data stored as volume. Generate batches of size %s. Visualize batches..."
                    % (str(IMAGES_DIMS_Z_X_Y)))

                images_generator = getImagesDataGenerator3D(
                    args.slidingWindowImages, args.prop_overlap_Z_X_Y,
                    args.transformationImages, args.elasticDeformationImages)

                batch_data_generator = BatchDataGenerator_2Arrays(
                    IMAGES_DIMS_Z_X_Y,
                    images_array,
                    masks_array,
                    images_generator,
                    size_batch=1,
                    shuffle=False)
                num_batches_total = len(batch_data_generator)
                print(
                    "Generate total %s batches by sliding-window, with coordinates:..."
                    % (num_batches_total))

                for j in range(num_batches_total):
                    coords_sliding_window_box = images_generator.slidingWindow_generator.get_limits_image(
                        j)

                    (visual_images_array,
                     visual_masks_array) = next(batch_data_generator)

                    visual_images_array = np.squeeze(visual_images_array,
                                                     axis=0)
                    visual_masks_array = np.squeeze(visual_masks_array, axis=0)

                    out_images_filename = joinpathnames(
                        VisualImagesPath, nameOutImagesFiles_type3 %
                        (i + 1, tuple2str(visual_images_array.shape),
                         tuple2str(coords_sliding_window_box)))
                    out_masks_filename = joinpathnames(
                        VisualImagesPath, nameOutMasksFiles_type3 %
                        (i + 1, tuple2str(visual_masks_array.shape),
                         tuple2str(coords_sliding_window_box)))

                    FileReader.writeImageArray(out_images_filename,
                                               visual_images_array)
                    FileReader.writeImageArray(out_masks_filename,
                                               visual_masks_array)
                # endfor
            else:
                print(
                    "Input images data stored as volume of size %s. Visualize volume..."
                    % (str(images_array.shape)))

                images_generator = getImagesVolumeTransformator3D(
                    images_array.shape, args.transformationImages,
                    args.elasticDeformationImages)

                (visual_images_array,
                 visual_masks_array) = images_generator.get_images_array(
                     images_array, masks_array=masks_array)

                out_images_filename = joinpathnames(
                    VisualImagesPath, nameOutImagesFiles_type1 %
                    (i + 1, tuple2str(visual_images_array.shape)))
                out_masks_filename = joinpathnames(
                    VisualImagesPath, nameOutMasksFiles_type1 %
                    (i + 1, tuple2str(visual_masks_array.shape)))

                FileReader.writeImageArray(out_images_filename,
                                           visual_images_array)
                FileReader.writeImageArray(out_masks_filename,
                                           visual_masks_array)
Exemplo n.º 6
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputPredictionsRelPath = args.predictionsdir
    nameInputReferMasksRelPath = 'Airways_Full'
    nameInputRoiMasksRelPath = 'Lungs_Full'
    nameInputCentrelinesRelPath = 'Centrelines_Full'
    nameOutputPredictionsRelPath = nameInputPredictionsRelPath

    nameInputPredictionsFiles = 'predict-probmaps_*.nii.gz'
    nameInputReferMasksFiles = '*_lumen.nii.gz'
    nameInputRoiMasksFiles = '*_lungs.nii.gz'
    nameInputCentrelinesFiles = '*_centrelines.nii.gz'
    # prefixPatternInputFiles = 'av[0-9][0-9]*'

    if (args.calcMasksThresholding):
        suffixPostProcessThreshold = '_thres%s' % (str(
            args.thresholdValue).replace('.', '-'))
        if (args.attachTracheaToCalcMasks):
            suffixPostProcessThreshold += '_withtrachea'
        else:
            suffixPostProcessThreshold += '_notrachea'
    else:
        suffixPostProcessThreshold = ''

    nameAccuracyPredictFiles = 'predict_accuracy_tests%s.txt' % (
        suffixPostProcessThreshold)

    def nameOutputFiles(in_name, in_acc):
        out_name = filenamenoextension(in_name).replace(
            'predict-probmaps',
            'predict-binmasks') + '_acc%2.0f' % (np.round(100 * in_acc))
        return out_name + '%s.nii.gz' % (suffixPostProcessThreshold)

    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPredictionsPath = workDirsManager.getNameExistPath(
        args.basedir, nameInputPredictionsRelPath)
    InputReferenceMasksPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameInputReferMasksRelPath)
    OutputPredictionsPath = workDirsManager.getNameNewPath(
        args.basedir, nameOutputPredictionsRelPath)

    listInputPredictionsFiles = findFilesDirAndCheck(
        InputPredictionsPath, nameInputPredictionsFiles)
    listInputReferenceMasksFiles = findFilesDirAndCheck(
        InputReferenceMasksPath, nameInputReferMasksFiles)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        listInputRoiMasksFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                      nameInputRoiMasksFiles)

        if (args.attachTracheaToCalcMasks):

            def compute_trachea_masks(refermask_array, roimask_array):
                return np.where(roimask_array == 1, 0, refermask_array)

    listPostProcessMetrics = OrderedDict()
    list_isUseCenlineFiles = []
    for imetrics in args.listPostprocessMetrics:
        listPostProcessMetrics[imetrics] = DICTAVAILMETRICFUNS(
            imetrics).compute_np_safememory
        list_isUseCenlineFiles.append(
            DICTAVAILMETRICFUNS(imetrics)._is_cenline_grndtru)
    #endfor
    isuse_centreline_files = any(list_isUseCenlineFiles)

    if isuse_centreline_files:
        InputCentrelinesPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputCentrelinesRelPath)
        listInputCentrelinesFiles = findFilesDirAndCheck(
            InputCentrelinesPath, nameInputCentrelinesFiles)

    out_predictAccuracyFilename = joinpathnames(InputPredictionsPath,
                                                nameAccuracyPredictFiles)
    fout = open(out_predictAccuracyFilename, 'w')
    strheader = '/case/ ' + ' '.join(
        ['/%s/' % (key)
         for (key, _) in listPostProcessMetrics.iteritems()]) + '\n'
    fout.write(strheader)

    for i, in_prediction_file in enumerate(listInputPredictionsFiles):
        print("\nInput: \'%s\'..." % (basename(in_prediction_file)))

        in_refermask_file = findFileWithSamePrefix(
            basename(in_prediction_file).replace('predict-probmaps', ''),
            listInputReferenceMasksFiles,
            prefix_pattern='vol[0-9][0-9]_')
        print("Refer mask file: \'%s\'..." % (basename(in_refermask_file)))

        prediction_array = FileReader.getImageArray(in_prediction_file)
        refermask_array = FileReader.getImageArray(in_refermask_file)
        print("Predictions of size: %s..." % (str(prediction_array.shape)))

        if (args.calcMasksThresholding):
            print(
                "Compute prediction masks by thresholding probability maps to value %s..."
                % (args.thresholdValue))
            prediction_array = ThresholdImages.compute(prediction_array,
                                                       args.thresholdValue)

        if isuse_centreline_files:
            in_centreline_file = findFileWithSamePrefix(
                basename(in_prediction_file).replace('predict-probmaps', ''),
                listInputCentrelinesFiles,
                prefix_pattern='vol[0-9][0-9]_')
            print("Centrelines file: \'%s\'..." %
                  (basename(in_centreline_file)))
            centrelines_array = FileReader.getImageArray(in_centreline_file)

        if (args.masksToRegionInterest):
            in_roimask_file = findFileWithSamePrefix(
                basename(in_prediction_file).replace('predict-probmaps', ''),
                listInputRoiMasksFiles,
                prefix_pattern='vol[0-9][0-9]_')
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))
            roimask_array = FileReader.getImageArray(in_roimask_file)

            if (args.attachTracheaToCalcMasks):
                print("Attach trachea mask to computed prediction masks...")
                trachea_masks_array = compute_trachea_masks(
                    refermask_array, roimask_array)
                prediction_array = OperationBinaryMasks.join_two_binmasks_one_image(
                    prediction_array, trachea_masks_array)
            else:
                prediction_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                    prediction_array, roimask_array)
                refermask_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                    refermask_array, roimask_array)
                if isuse_centreline_files:
                    centrelines_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                        centrelines_array, roimask_array)

        # ---------- COMPUTE POST PROCESSING MEASURES ----------
        list_postprocess_measures = OrderedDict()
        for i, (key, value) in enumerate(listPostProcessMetrics.iteritems()):
            if list_isUseCenlineFiles[i]:
                acc_value = value(centrelines_array, prediction_array)
            else:
                acc_value = value(refermask_array, prediction_array)
            list_postprocess_measures[key] = acc_value
        # endfor
        main_postprocess_accuracy = list_postprocess_measures.values()[0]

        # print list accuracies on screen and in file
        prefix_casename = getSubstringPatternFilename(
            basename(in_prediction_file), substr_pattern='vol[0-9][0-9]_')[:-1]
        strdata = '\'%s\'' % (prefix_casename)
        for (key, value) in list_postprocess_measures.iteritems():
            print("Metric \'%s\': %s..." % (key, value))
            strdata += ' %s' % (str(value))
        #endfor
        strdata += '\n'
        fout.write(strdata)
        # ---------- COMPUTE POST PROCESSING MEASURES ----------

        out_file = joinpathnames(
            OutputPredictionsPath,
            nameOutputFiles(basename(in_prediction_file),
                            main_postprocess_accuracy))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_file), str(prediction_array.shape)))

        FileReader.writeImageArray(out_file, prediction_array)
    #endfor

    #close list accuracies file
    fout.close()
def main(args):
    # ---------- SETTINGS ----------
    nameOrigImagesDataRelPath = 'Images_WorkData'
    nameOrigMasksDataRelPath = 'LumenDistTrans_WorkData'

    nameOriginImagesFiles = 'images*' + getFileExtension(FORMATTRAINDATA)
    nameOriginMasksFiles = 'grndtru*' + getFileExtension(FORMATTRAINDATA)
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)

    OrigImagesDataPath = workDirsManager.getNameExistPath(
        workDirsManager.getNameBaseDataPath(), nameOrigImagesDataRelPath)
    OrigGroundTruthDataPath = workDirsManager.getNameExistPath(
        workDirsManager.getNameBaseDataPath(), nameOrigMasksDataRelPath)
    TrainingDataPath = workDirsManager.getNameNewPath(
        workDirsManager.getNameTrainingDataPath())
    ValidationDataPath = workDirsManager.getNameNewPath(
        workDirsManager.getNameValidationDataPath())
    TestingDataPath = workDirsManager.getNameNewPath(
        workDirsManager.getNameTestingDataPath())

    listImagesFiles = findFilesDir(OrigImagesDataPath, nameOriginImagesFiles)
    listGroundTruthFiles = findFilesDir(OrigGroundTruthDataPath,
                                        nameOriginMasksFiles)

    numImagesFiles = len(listImagesFiles)
    numGroundTruthFiles = len(listGroundTruthFiles)

    if (numImagesFiles != numGroundTruthFiles):
        message = "num image files \'%s\' not equal to num ground-truth files \'%s\'..." % (
            numImagesFiles, numGroundTruthFiles)
        CatchErrorException(message)

    if (args.distribute_fixed_names):
        print("Split dataset with Fixed Names...")
        names_repeated = find_element_repeated_two_indexes_names(
            NAME_IMAGES_TRAINING, NAME_IMAGES_VALIDATION)
        names_repeated += find_element_repeated_two_indexes_names(
            NAME_IMAGES_TRAINING, NAME_IMAGES_TESTING)
        names_repeated += find_element_repeated_two_indexes_names(
            NAME_IMAGES_VALIDATION, NAME_IMAGES_TESTING)

        if names_repeated:
            message = "found names repeated in list Training / Validation / Testing names: %s" % (
                names_repeated)
            CatchErrorException(message)

        indexesTraining = find_indexes_names_images_files(
            NAME_IMAGES_TRAINING, listImagesFiles)
        indexesValidation = find_indexes_names_images_files(
            NAME_IMAGES_VALIDATION, listImagesFiles)
        indexesTesting = find_indexes_names_images_files(
            NAME_IMAGES_TESTING, listImagesFiles)
        print(
            "Training (%s files)/ Validation (%s files)/ Testing (%s files)..."
            % (len(indexesTraining), len(indexesValidation),
               len(indexesTesting)))
    else:
        numTrainingFiles = int(args.prop_data_training * numImagesFiles)
        numValidationFiles = int(args.prop_data_validation * numImagesFiles)
        numTestingFiles = int(args.prop_data_testing * numImagesFiles)
        print(
            "Training (%s files)/ Validation (%s files)/ Testing (%s files)..."
            % (numTrainingFiles, numValidationFiles, numTestingFiles))
        if (args.distribute_random):
            print("Split dataset Randomly...")
            indexesAllFiles = np.random.choice(range(numImagesFiles),
                                               size=numImagesFiles,
                                               replace=False)
        else:
            print("Split dataset In Order...")
            indexesAllFiles = range(numImagesFiles)

        indexesTraining = indexesAllFiles[0:numTrainingFiles]
        indexesValidation = indexesAllFiles[numTrainingFiles:numTrainingFiles +
                                            numValidationFiles]
        indexesTesting = indexesAllFiles[numTrainingFiles +
                                         numValidationFiles::]

    print("Files assigned to Training Data: \'%s\'" %
          ([basename(listImagesFiles[index]) for index in indexesTraining]))
    print("Files assigned to Validation Data: \'%s\'" %
          ([basename(listImagesFiles[index]) for index in indexesValidation]))
    print("Files assigned to Testing Data: \'%s\'" %
          ([basename(listImagesFiles[index]) for index in indexesTesting]))

    # ******************** TRAINING DATA ********************
    for index in indexesTraining:
        makelink(
            listImagesFiles[index],
            joinpathnames(TrainingDataPath, basename(listImagesFiles[index])))
        makelink(
            listGroundTruthFiles[index],
            joinpathnames(TrainingDataPath,
                          basename(listGroundTruthFiles[index])))
    #endfor
    # ******************** TRAINING DATA ********************

    # ******************** VALIDATION DATA ********************
    for index in indexesValidation:
        makelink(
            listImagesFiles[index],
            joinpathnames(ValidationDataPath,
                          basename(listImagesFiles[index])))
        makelink(
            listGroundTruthFiles[index],
            joinpathnames(ValidationDataPath,
                          basename(listGroundTruthFiles[index])))
    #endfor
    # ******************** VALIDATION DATA ********************

    # ******************** TESTING DATA ********************
    for index in indexesTesting:
        makelink(
            listImagesFiles[index],
            joinpathnames(TestingDataPath, basename(listImagesFiles[index])))
        makelink(
            listGroundTruthFiles[index],
            joinpathnames(TestingDataPath,
                          basename(listGroundTruthFiles[index])))
Exemplo n.º 8
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'RawAirways'
    nameInputRoiMasksRelPath = 'RawLungs'
    nameReferenceImgRelPath = 'RawImages'
    nameOutputImagesRelPath = 'Airways_Rescaled_0.6x0.6x0.6_Full'
    nameOutputRoiMasksRelPath = 'Lungs_Rescaled_0.6x0.6x0.6_Full'

    nameInputImagesFiles = '*surface0.dcm'
    nameInputRoiMasksFiles = '*.dcm'
    nameReferenceImgFiles = '*.dcm'
    # prefixPatternInputFiles = 'av[0-9][0-9]*'

    nameRescaleFactors = 'rescaleFactors_images_0.6x0.6x0.6.npy'

    def nameOutputImagesFiles(in_name):
        in_name = in_name.replace('surface0', 'lumen')
        in_name = in_name.replace('surface1', 'outwall')
        #in_name = in_name.replace('-result','_noopfront')
        #in_name = in_name.replace('-centrelines','_centrelines')
        return filenamenoextension(in_name) + '.nii.gz'

    nameOutputRoiMasksFiles = lambda in_name: filenamenoextension(
        in_name).replace('-lungs', '_lungs') + '.nii.gz'
    nameOutputImagesMaskedToRoiFiles = lambda in_name: filenamenoextension(
        nameOutputImagesFiles(in_name)) + '_maskedToLungs.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputImagesPath = workDirsManager.getNameExistPath(BaseDataPath,
                                                       nameInputImagesRelPath)
    ReferenceImgPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameReferenceImgRelPath)
    OutputImagesPath = workDirsManager.getNameNewPath(BaseDataPath,
                                                      nameOutputImagesRelPath)

    listInputImagesFiles = findFilesDirAndCheck(InputImagesPath,
                                                nameInputImagesFiles)
    listReferenceImgFiles = findFilesDirAndCheck(ReferenceImgPath,
                                                 nameReferenceImgFiles)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        OutputRoiMasksPath = workDirsManager.getNameNewPath(
            BaseDataPath, nameOutputRoiMasksRelPath)

        listInputRoiMasksFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                      nameInputRoiMasksFiles)

    if (args.rescaleImages):
        dict_rescaleFactors = readDictionary(
            joinpathnames(BaseDataPath, nameRescaleFactors))

    for i, in_image_file in enumerate(listInputImagesFiles):
        print("\nInput: \'%s\'..." % (basename(in_image_file)))

        image_array = FileReader.getImageArray(in_image_file)

        if (args.isClassificationData):
            print("Convert to binary masks (0, 1)...")
            image_array = OperationBinaryMasks.process_masks(image_array)

        if (args.masksToRegionInterest):
            print("Mask input to RoI: lungs...")
            in_roimask_file = findFileWithSamePrefix(basename(in_image_file),
                                                     listInputRoiMasksFiles)
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))

            roimask_array = FileReader.getImageArray(in_roimask_file)

            if (args.isClassificationData):
                print("Convert to binary masks (0, 1)...")
                roimask_array = OperationBinaryMasks.process_masks(
                    roimask_array)

            # Image masked to RoI: exclude voxels not contained in lungs
            image_maskedToRoi_array = OperationBinaryMasks.apply_mask_exclude_voxels_fillzero(
                image_array, roimask_array)

        if (args.rescaleImages):
            in_referimg_file = findFileWithSamePrefix(basename(in_image_file),
                                                      listReferenceImgFiles)
            rescale_factor = dict_rescaleFactors[filenamenoextension(
                in_referimg_file)]
            print("Rescale image with a factor: \'%s\'..." %
                  (str(rescale_factor)))

            image_array = RescaleImages.compute3D(image_array,
                                                  rescale_factor,
                                                  is_binary_mask=True)
            print("Final dims: %s..." % (str(image_array.shape)))

            if (args.masksToRegionInterest):
                roimask_array = RescaleImages.compute3D(roimask_array,
                                                        rescale_factor,
                                                        is_binary_mask=True)
                image_maskedToRoi_array = RescaleImages.compute3D(
                    image_maskedToRoi_array,
                    rescale_factor,
                    is_binary_mask=True)

        out_file = joinpathnames(
            OutputImagesPath, nameOutputImagesFiles(basename(in_image_file)))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_file), str(image_array.shape)))

        FileReader.writeImageArray(out_file, image_array)

        if (args.masksToRegionInterest):
            out_roimask_file = joinpathnames(
                OutputRoiMasksPath,
                nameOutputRoiMasksFiles(basename(in_roimask_file)))
            out_maskedToRoi_file = joinpathnames(
                OutputImagesPath,
                nameOutputImagesMaskedToRoiFiles(basename(in_image_file)))

            FileReader.writeImageArray(out_roimask_file, roimask_array)
            FileReader.writeImageArray(out_maskedToRoi_file,
                                       image_maskedToRoi_array)
def main(args):
    # First thing, set session in the selected(s) devices: CPU or GPU
    set_session_in_selected_device(use_GPU_device=True,
                                   type_GPU_installed=args.typeGPUinstalled)

    # ---------- SETTINGS ----------
    nameInputRoiMasksRelPath = 'Lungs_Full'
    nameReferenceImgRelPath = 'Images_Full'

    # Get the file list:
    nameImagesFiles = 'images*' + getFileExtension(FORMATTRAINDATA)
    nameGroundTruthFiles = 'grndtru*' + getFileExtension(FORMATTRAINDATA)

    nameInputRoiMasksFiles = '*_lungs.nii.gz'
    nameReferenceImgFiles = '*.nii.gz'

    nameRescaleFactors = 'rescaleFactors_images.npy'
    nameCropBoundingBoxes = 'cropBoundingBoxes_images.npy'

    nameOutputPredictionFiles = 'predict-probmaps_%s.nii.gz'

    if (args.saveFeatMapsLayers):
        nameOutputFeatureMapsDirs = 'featureMaps-%s_lay_%s'
        nameOutputFeatureMapsFiles = 'featmaps-%s_lay_%s_map%0.2i.nii.gz'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    TestingDataPath = workDirsManager.getNameExistPath(
        workDirsManager.getNameDataPath(args.typedata))
    ReferenceImgPath = workDirsManager.getNameExistPath(
        BaseDataPath, nameReferenceImgRelPath)
    ModelsPath = workDirsManager.getNameExistPath(args.basedir, args.modelsdir)
    OutputPredictionPath = workDirsManager.getNameNewPath(
        args.basedir, args.predictionsdir)

    listTestImagesFiles = findFilesDir(TestingDataPath, nameImagesFiles)
    listTestGroundTruthFiles = findFilesDir(TestingDataPath,
                                            nameGroundTruthFiles)
    listReferenceImgsFiles = findFilesDirAndCheck(ReferenceImgPath,
                                                  nameReferenceImgFiles)

    if (args.masksToRegionInterest):
        InputRoiMasksPath = workDirsManager.getNameExistPath(
            BaseDataPath, nameInputRoiMasksRelPath)
        listInputRoiMasksFiles = findFilesDirAndCheck(InputRoiMasksPath,
                                                      nameInputRoiMasksFiles)

    if (args.rescaleImages):
        dict_rescaleFactors = readDictionary(
            joinpathnames(BaseDataPath, nameRescaleFactors))

    if (args.cropImages):
        dict_cropBoundingBoxes = readDictionary(
            joinpathnames(BaseDataPath, nameCropBoundingBoxes))

    test_images_generator = getImagesDataGenerator3D(
        args.slidingWindowImages, args.prop_overlap_Z_X_Y,
        args.transformationImages, args.elasticDeformationImages)

    images_reconstructor = getImagesReconstructor3D(
        args.slidingWindowImages,
        args.prop_overlap_Z_X_Y,
        use_TransformationImages=False,
        isfilterImages=args.filterPredictProbMaps,
        prop_valid_outUnet=args.prop_valid_outUnet)

    # LOADING MODEL
    # ----------------------------------------------
    print("-" * 30)
    print("Loading saved model...")
    print("-" * 30)

    if TYPE_DNNLIBRARY_USED == 'Keras':
        print(
            "Loading full model: weights, optimizer, loss, metrics ... and restarting..."
        )
        modelSavedPath = joinpathnames(
            ModelsPath, 'model_' + args.prediction_modelFile + '.hdf5')
        print("Restarting from file: \'%s\'..." % (modelSavedPath))

        loss_fun = DICTAVAILLOSSFUNS(
            args.lossfun, is_masks_exclude=args.masksToRegionInterest).loss
        metrics = [
            DICTAVAILMETRICFUNS(imetrics,
                                is_masks_exclude=args.masksToRegionInterest).
            get_renamed_compute() for imetrics in args.listmetrics
        ]
        custom_objects = dict(
            map(lambda fun: (fun.__name__, fun), [loss_fun] + metrics))
        # load and compile model
        model = NeuralNetwork.get_load_saved_model(
            modelSavedPath, custom_objects=custom_objects)

        # output model summary
        model.summary()

    elif TYPE_DNNLIBRARY_USED == 'Pytorch':
        print(
            "Loading full model: weights, optimizer, loss, metrics ... and restarting..."
        )
        modelSavedPath = joinpathnames(
            ModelsPath, 'model_' + args.prediction_modelFile + '.pt')
        print("Restarting from file: \'%s\'..." % (modelSavedPath))
        # load and compile model
        trainer = Trainer.load_model_full(modelSavedPath)

        # output model summary
        trainer.get_summary_model()

    if (args.saveFeatMapsLayers):
        if TYPE_DNNLIBRARY_USED == 'Keras':
            visual_model_params = VisualModelParams(model, IMAGES_DIMS_Z_X_Y)
            if args.firstSaveFeatMapsLayers:
                get_index_featmap = lambda i: args.firstSaveFeatMapsLayers + i
            else:
                get_index_featmap = lambda i: i

        elif TYPE_DNNLIBRARY_USED == 'Pytorch':
            message = 'Visualize a model feature maps still not implemented...'
            CatchErrorException(message)
    # ----------------------------------------------

    # START ANALYSIS
    # ----------------------------------------------
    print("-" * 30)
    print("Predicting model...")
    print("-" * 30)

    for ifile, test_xData_file in enumerate(listTestImagesFiles):
        print("\nInput: \'%s\'..." % (basename(test_xData_file)))

        # COMPUTE PREDICTION
        # ------------------------------------------
        print("Loading data...")
        if (args.slidingWindowImages or args.transformationImages):
            if TYPE_DNNLIBRARY_USED == 'Keras':
                test_xData = LoadDataManagerInBatches_DataGenerator(
                    IMAGES_DIMS_Z_X_Y,
                    test_images_generator).loadData_1File(test_xData_file,
                                                          shuffle_images=False)
            elif TYPE_DNNLIBRARY_USED == 'Pytorch':
                test_xData = LoadDataManager.loadData_1File(test_xData_file)
                test_batch_data_generator = TrainingBatchDataGenerator(
                    IMAGES_DIMS_Z_X_Y, [test_xData], [test_xData],
                    test_images_generator,
                    batch_size=1,
                    shuffle=False)
                (test_yData, test_xData) = DataSampleGenerator(
                    IMAGES_DIMS_Z_X_Y, [test_xData], [test_xData],
                    test_images_generator).get_full_data()
        else:
            test_xData = LoadDataManagerInBatches(
                IMAGES_DIMS_Z_X_Y).loadData_1File(test_xData_file)
            test_xData = np.expand_dims(test_xData, axis=0)

        print("Total Data batches generated: %s..." % (len(test_xData)))

        print("Evaluate model...")
        if TYPE_DNNLIBRARY_USED == 'Keras':
            predict_yData = model.predict(test_xData, batch_size=1)
        elif TYPE_DNNLIBRARY_USED == 'Pytorch':
            predict_yData = trainer.predict(test_batch_data_generator)

        if (args.saveFeatMapsLayers):
            print("Compute feature maps of evaluated model...")
            featuremaps_data = visual_model_params.get_feature_maps(
                test_xData,
                args.nameSaveModelLayer,
                max_num_feat_maps=args.maxNumSaveFeatMapsLayers,
                first_feat_maps=args.firstSaveFeatMapsLayers)
        # ------------------------------------------

        # RECONSTRUCT FULL-SIZE PREDICTION
        # ------------------------------------------
        print("Reconstruct prediction to full size...")
        # Assign original images and masks files
        index_refer_img = getIndexOriginImagesFile(basename(test_xData_file),
                                                   beginString='images',
                                                   firstIndex='01')
        reference_img_file = listReferenceImgsFiles[index_refer_img]
        print("Reference image file: \'%s\'..." %
              (basename(reference_img_file)))

        # init reconstructor with size of "ifile"
        predict_fullsize_shape = FileReader.getImageSize(test_xData_file)
        images_reconstructor.complete_init_data(predict_fullsize_shape)

        prediction_array = images_reconstructor.compute(predict_yData)

        if (args.saveFeatMapsLayers):
            featuremaps_array = images_reconstructor.compute(featuremaps_data)

        # reconstruct from cropped / rescaled images
        reference_img_shape = FileReader.getImageSize(reference_img_file)

        if (args.cropImages):
            crop_bounding_box = dict_cropBoundingBoxes[filenamenoextension(
                reference_img_file)]
            print(
                "Predicted data are cropped. Extend array size to original. Bounding-box: \'%s\'..."
                % (str(crop_bounding_box)))

            prediction_array = ExtendImages.compute3D(prediction_array,
                                                      crop_bounding_box,
                                                      reference_img_shape)
            print("Final dims: %s..." % (str(prediction_array.shape)))

        if (args.masksToRegionInterest):
            print("Mask predictions to RoI: lungs...")
            in_roimask_file = listInputRoiMasksFiles[index_refer_img]
            print("RoI mask (lungs) file: \'%s\'..." %
                  (basename(in_roimask_file)))

            roimask_array = FileReader.getImageArray(in_roimask_file)
            prediction_array = OperationBinaryMasks.reverse_mask_exclude_voxels_fillzero(
                prediction_array, roimask_array)

        if (args.saveFeatMapsLayers):
            print("Reconstruct predicted feature maps to full size...")
            if (args.cropImages):
                num_featmaps = featuremaps_array.shape[-1]
                featuremaps_shape = list(reference_img_shape) + [num_featmaps]
                featuremaps_array = ExtendImages.compute3D(
                    featuremaps_array, crop_bounding_box, featuremaps_shape)

            if (args.masksToRegionInterest):
                featuremaps_array = OperationBinaryMasks.reverse_mask_exclude_voxels_fillzero(
                    featuremaps_array, roimask_array)
        # ------------------------------------------

        out_prediction_file = joinpathnames(
            OutputPredictionPath, nameOutputPredictionFiles %
            (filenamenoextension(reference_img_file)))
        print("Output: \'%s\', of dims \'%s\'..." %
              (basename(out_prediction_file), prediction_array.shape))

        FileReader.writeImageArray(out_prediction_file, prediction_array)

        if (args.saveFeatMapsLayers):
            nameOutputFeatureMapsRelPath = nameOutputFeatureMapsDirs % (
                filenamenoextension(reference_img_file),
                args.nameSaveModelLayer)
            OutputFeatureMapsPath = workDirsManager.getNameNewPath(
                OutputPredictionPath, nameOutputFeatureMapsRelPath)

            num_featmaps = featuremaps_array.shape[-1]
            for ifeatmap in range(num_featmaps):
                out_featuremaps_file = joinpathnames(
                    OutputFeatureMapsPath, nameOutputFeatureMapsFiles %
                    (filenamenoextension(reference_img_file),
                     args.nameSaveModelLayer, get_index_featmap(ifeatmap) + 1))
                print("Output: \'%s\', of dims \'%s\'..." %
                      (basename(out_featuremaps_file),
                       featuremaps_array[..., ifeatmap].shape))

                FileReader.writeImageArray(out_featuremaps_file,
                                           featuremaps_array[..., ifeatmap])
Exemplo n.º 10
0
def main(args):
    # ---------- SETTINGS ----------
    nameInputImagesRelPath = 'ProcImages'
    nameInputMasksRelPath = 'ProcMasks'

    namePredictMasksFiles = 'predict_binmasks*thres0-5_withtraquea.nii.gz'
    nameInputImagesFiles = '*.nii.gz'
    nameInputMasksFiles = '*outerwall*traquea.nii.gz'

    # template search files
    tempSearchInputFiles = 'av[0-9]*'

    # create file to save FROC values
    temp_outfilename = '%s_video.gif'
    # ---------- SETTINGS ----------

    workDirsManager = WorkDirsManager(args.basedir)
    BaseDataPath = workDirsManager.getNameBaseDataPath()
    InputPredictMasksPath = workDirsManager.getNameExistPath(
        args.basedir, args.predictionsdir)
    InputImagesPath = workDirsManager.getNameExistPath(args.basedir,
                                                       nameInputImagesRelPath)
    InputMasksPath = workDirsManager.getNameExistPath(args.basedir,
                                                      nameInputMasksRelPath)
    OutputPath = workDirsManager.getNameNewPath(args.basedir, 'movies_results')

    listPredictMasksFiles = findFilesDir(InputPredictMasksPath,
                                         namePredictMasksFiles)
    listImagesCTFiles = findFilesDir(InputImagesPath, nameInputImagesFiles)
    listGrndTruthMasksFiles = findFilesDir(InputMasksPath, nameInputMasksFiles)

    nbPredictionsFiles = len(listPredictMasksFiles)
    nbImagesCTFiles = len(listImagesCTFiles)
    nbGrndTruthMasksFiles = len(listGrndTruthMasksFiles)

    # Run checkers
    if (nbPredictionsFiles == 0):
        message = "0 Predictions found in dir \'%s\'" % (InputPredictMasksPath)
        CatchErrorException(message)
    if (nbImagesCTFiles == 0):
        message = "0 Images CT found in dir \'%s\'" % (InputImagesPath)
        CatchErrorException(message)
    if (nbGrndTruthMasksFiles == 0):
        message = "0 Ground-truth Masks found in dir \'%s\'" % (InputMasksPath)
        CatchErrorException(message)

    for i, predict_masks_file in enumerate(listPredictMasksFiles):

        print('\'%s\'...' % (predict_masks_file))

        name_prefix_case = getExtractSubstringPattern(
            basename(predict_masks_file), tempSearchInputFiles)

        for iterfile_1, iterfile_2 in zip(listImagesCTFiles,
                                          listGrndTruthMasksFiles):
            if name_prefix_case in iterfile_1:
                images_CT_file = iterfile_1
                grndtruth_masks_file = iterfile_2
        #endfor
        print("assigned to '%s' and '%s'..." %
              (basename(images_CT_file), basename(grndtruth_masks_file)))

        predict_masks_array = FileReader.getImageArray(predict_masks_file)
        images_CT_array = FileReader.getImageArray(images_CT_file)
        grndtruth_masks_array = FileReader.getImageArray(grndtruth_masks_file)

        if (args.invertImageAxial):
            predict_masks_array = FlippingImages.compute(predict_masks_array,
                                                         axis=0)
            images_CT_array = FlippingImages.compute(images_CT_array, axis=0)
            grndtruth_masks_array = FlippingImages.compute(
                grndtruth_masks_array, axis=0)

        print("Rendering animations...")
        list_frames = []

        for i in range(images_CT_array.shape[0]):

            images_CT_slice = images_CT_array[i, :, :]
            grndtruth_masks_slice = grndtruth_masks_array[i, :, :]
            predict_masks_slice = predict_masks_array[i, :, :]

            frame_image_CT = (
                images_CT_slice - np.min(images_CT_slice)
            ) / float(np.max(images_CT_slice) - np.min(images_CT_slice))

            frame_new = np.zeros(
                (images_CT_slice.shape[0], images_CT_slice.shape[1], 3),
                dtype=np.uint8)

            frame_new[:, :, :] = 255 * frame_image_CT[:, :, None]

            if (TYPE_ANIMATION == '1'):

                index_frame_TP_mask = np.argwhere(grndtruth_masks_slice *
                                                  predict_masks_slice)
                index_frame_FN_mask = np.argwhere(grndtruth_masks_slice *
                                                  (1.0 - predict_masks_slice))
                index_frame_FP_mask = np.argwhere(
                    (1.0 - grndtruth_masks_slice) * predict_masks_slice)

                # paint True Positives, False Negatives and False Positives in yellow, blue and red colour, respectively
                for index in index_frame_TP_mask:
                    frame_new[tuple(index)] = YELLOW_COLOR
                for index in index_frame_FN_mask:
                    frame_new[tuple(index)] = BLUE_COLOR
                for index in index_frame_FP_mask:
                    frame_new[tuple(index)] = RED_COLOR

                is_valid_frame = len(index_frame_TP_mask) > 0 or len(
                    index_frame_FN_mask) > 0 or len(index_frame_FP_mask) > 0

            elif (TYPE_ANIMATION == '2'):

                index_frame_predict_bound_mask = skimage.segmentation.find_boundaries(
                    predict_masks_slice)
                index_frame_grndtruth_bound_mask = skimage.segmentation.find_boundaries(
                    grndtruth_masks_slice)

                # draw boundaries of prediction / ground-truth masks with green / red colour, respectively
                for index in index_frame_predict_bound_mask:
                    frame_new[tuple(index)] = GREEN_COLOR
                for index in index_frame_grndtruth_bound_mask:
                    frame_new[tuple(index)] = RED_COLOR

                is_valid_frame = len(
                    index_frame_predict_bound_mask) > 0 or len(
                        index_frame_grndtruth_bound_mask) > 0

            # skip frames that do not contain any predictions and/or ground-truth masks
            if is_valid_frame:
                list_frames.append(frame_new)

        if len(list_frames) > 0:
            print("Good movie...")
            outfilename = joinpathnames(OutputPath,
                                        temp_outfilename % (name_prefix_case))
            imageio.mimsave(outfilename, list_frames, fps=20)