Exemplo n.º 1
0
    (args.save_dir / 'Visualizations').mkdir(exist_ok=True)
    samples = os.listdir(str(args.mask_path))
    #samples = [os.path.basename(x) for x in glob(str(args.mask_path / '*'))]
    samples.sort()
    for sample in tqdm(samples, 'Analysing thickness'):
        # Sample path
        thickness_path = base_path / sample / subdir

        # Load image stacks
        if thickness_path.exists():
            data, files = load(str(thickness_path), rgb=False, n_jobs=args.n_threads)
        else:
            continue

        # Visualize thickness map
        print_orthogonal(data, savepath=str(args.save_dir / 'Visualizations' / ('CCTh_' + sample + '.png')))

        # Histogram
        data = data.flatten()
        #data = data[data != 0]  # Remove zeros
        max_value = np.max(data)
        hist, bin_edges = np.histogram(data * 3.2, bins=254, range=[1, max_value])  # Exclude background

        plt.hist(data, bins=range(1, max_value))
        plt.title(sample)
        plt.savefig(str(args.save_dir / 'Visualizations' / ('histogram_' + sample + '.png')))

        # Save most the frequent value
        results['Most frequent thickness value'].append(bin_edges[np.argmax(hist)])
        results['Sample'].append(sample)
Exemplo n.º 2
0
                 mask_final,
                 dtype=args.dtype)
            (args.save_dir.parent / 'probability').mkdir(exist_ok=True)
            save(str(args.save_dir.parent / 'probability' / sample),
                 files,
                 mask_avg * 255,
                 dtype=args.dtype)
        """
        render_volume(data_yz[:, :, :, 0] * mask_final,
                      savepath=str(args.save_dir / 'visualizations' / (sample + '_render' + args.dtype)),
                      white=True, use_outline=False)
        """
        print_orthogonal(data_yz[:, :, :, 0],
                         invert=True,
                         res=3.2,
                         title=None,
                         cbar=True,
                         savepath=str(args.save_dir / 'visualizations' /
                                      (sample + '_input.png')),
                         scale_factor=1000)

        print_orthogonal(data_yz[:, :, :, 0],
                         mask=mask_final,
                         invert=True,
                         res=3.2,
                         title=None,
                         cbar=True,
                         savepath=str(args.save_dir / 'visualizations' /
                                      (sample + '_prediction.png')),
                         scale_factor=1000)
    #except Exception as e:
    #    print(f'Sample {sample} failed due to error:\n\n {e}\n\n.')
Exemplo n.º 3
0
                        default='.bmp')
    args = parser.parse_args()

    # Loop for samples
    args.save_dir.mkdir(exist_ok=True)
    samples = os.listdir(str(args.data_path))
    #samples = [os.path.basename(x) for x in glob(str(args.mask_path / '*.png'))]
    samples.sort()
    for sample in tqdm(samples, 'Smoothing'):
        #try:
        # Load image
        _, data = load(str(args.data_path / sample), uCT=True)
        data_scaled = map_uint16_to_uint8(data,
                                          lower_bound=0,
                                          upper_bound=40000)
        print_orthogonal(data_scaled)
        img = cv2.imread(str(args.mask_path / sample), cv2.IMREAD_GRAYSCALE)
        if args.plot:
            plt.imshow(img)
            plt.title('Loaded image')
            plt.show()
        # Opening
        kernel = cv2.getStructuringElement(shape=cv2.MORPH_ELLIPSE,
                                           ksize=args.k_closing)
        img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel=kernel)
        #plt.imshow(img); plt.title('Closing'); plt.show()

        # Gaussian blur
        img = cv2.GaussianBlur(img, ksize=args.k_gauss, sigmaX=0, sigmaY=0)
        #plt.imshow(img); plt.title('Gaussian blur'); plt.show()
        # Median filter (round kernel 7)
Exemplo n.º 4
0
def evaluation_runner(args, config, save_dir):
    """
    Calculates evaluation metrics on predicted masks against target.
    :param args:
    :param config:
    :param save_dir:
    :return:
    """
    start_eval = time()

    # Evaluation arguments
    args.image_path = args.data_location / 'images'
    args.mask_path = args.data_location / 'masks'
    args.pred_path = args.data_location / 'predictions'
    args.save_dir = args.data_location / 'evaluation'
    args.save_dir.mkdir(exist_ok=True)
    args.n_labels = 2

    # Snapshots to be evaluated
    if type(save_dir) != list:
        save_dir = [save_dir]

    # Iterate through snapshots
    for snap in save_dir:

        # Initialize results
        results = {'Sample': [], 'Dice': [], 'IoU': [], 'Similarity': []}

        # Loop for samples
        (args.save_dir / ('visualizations_' + snap.name)).mkdir(exist_ok=True)
        samples = os.listdir(str(args.mask_path))
        samples.sort()
        try:
            for idx, sample in enumerate(samples):

                print(
                    f'==> Processing sample {idx + 1} of {len(samples)}: {sample}'
                )

                # Load image stacks
                if config['training']['experiment'] == '3D':
                    mask, files_mask = load(str(args.mask_path / sample),
                                            axis=(0, 2, 1),
                                            rgb=False,
                                            n_jobs=args.n_threads)

                    pred, files_pred = load(str(args.pred_path / snap.name /
                                                sample),
                                            axis=(0, 2, 1),
                                            rgb=False,
                                            n_jobs=args.n_threads)
                    data, files_data = load(str(args.image_path / sample),
                                            axis=(0, 2, 1),
                                            rgb=False,
                                            n_jobs=args.n_threads)

                    # Crop in case of inconsistency
                    crop = min(pred.shape, mask.shape)
                    mask = mask[:crop[0], :crop[1], :crop[2]]
                    pred = pred[:crop[0], :crop[1], :crop[2]]

                else:
                    data = cv2.imread(str(args.image_path / sample))
                    mask = cv2.imread(str(args.mask_path / sample),
                                      cv2.IMREAD_GRAYSCALE)
                    pred = cv2.imread(str(args.pred_path / snap.name / sample),
                                      cv2.IMREAD_GRAYSCALE)
                    if pred is None:
                        sample = sample[:-4] + '.bmp'
                        pred = cv2.imread(
                            str(args.pred_path / snap.name / sample),
                            cv2.IMREAD_GRAYSCALE)
                    elif mask is None:
                        mask = cv2.imread(str(args.mask_path / sample),
                                          cv2.IMREAD_GRAYSCALE)

                    # Crop in case of inconsistency
                    crop = min(pred.shape, mask.shape)
                    mask = mask[:crop[0], :crop[1]]
                    pred = pred[:crop[0], :crop[1]]

                # Evaluate metrics
                conf_matrix = calculate_conf(pred.astype(np.bool),
                                             mask.astype(np.bool),
                                             args.n_labels)
                dice = calculate_dice(conf_matrix)[1]
                iou = calculate_iou(conf_matrix)[1]
                sim = calculate_volumetric_similarity(conf_matrix)[1]

                print(
                    f'Sample {sample}: dice = {dice}, IoU = {iou}, similarity = {sim}'
                )

                # Save predicted full mask
                if config['training']['experiment'] == '3D':
                    print_orthogonal(
                        data,
                        invert=False,
                        res=3.2,
                        cbar=True,
                        savepath=str(args.save_dir /
                                     ('visualizations_' + snap.name) /
                                     (sample + '_input.png')),
                        scale_factor=1500)
                    print_orthogonal(
                        data,
                        mask=mask,
                        invert=False,
                        res=3.2,
                        cbar=True,
                        savepath=str(args.save_dir /
                                     ('visualizations_' + snap.name) /
                                     (sample + '_reference.png')),
                        scale_factor=1500)
                    print_orthogonal(
                        data,
                        mask=pred,
                        invert=False,
                        res=3.2,
                        cbar=True,
                        savepath=str(args.save_dir /
                                     ('visualizations_' + snap.name) /
                                     (sample + '_prediction.png')),
                        scale_factor=1500)

                # Update results
                results['Sample'].append(sample)
                results['Dice'].append(dice)
                results['IoU'].append(iou)
                results['Similarity'].append(sim)

        except AttributeError:
            print(f'Sample {sample} failing. Skipping to next one.')
            continue

        # Add average value to
        results['Sample'].append('Average values')
        results['Dice'].append(np.average(results['Dice']))
        results['IoU'].append(np.average(results['IoU']))
        results['Similarity'].append(np.average(results['Similarity']))

        # Write to excel
        writer = pd.ExcelWriter(
            str(args.save_dir / ('metrics_' + str(snap.name))) + '.xlsx')
        df1 = pd.DataFrame(results)
        df1.to_excel(writer, sheet_name='Metrics')
        writer.save()

        print(
            f'Metrics evaluated in {(time() - start_eval) // 60} minutes, {(time() - start_eval) % 60} seconds.'
        )
Exemplo n.º 5
0
        time_sample = time()
        print(f'Processing sample {sample}')

        # Load prediction
        pred, files = load(str(args.masks / sample), axis=(
            1,
            2,
            0,
        ))

        # Downscale
        #pred = (ndi.zoom(pred, 0.25) > 126).astype(np.bool)

        if args.plot:
            print_orthogonal(pred,
                             savepath=str(args.th_maps / 'visualization' /
                                          (sample + '_pred.png')))

        # Median filter
        pred = ndi.median_filter(pred, size=args.median)
        if args.plot:
            print_orthogonal(pred,
                             savepath=str(args.th_maps / 'visualization' /
                                          (sample + '_median.png')))

        # Thickness analysis
        # Create array of correct size
        th_map = _local_thickness(pred,
                                  mode=args.mode,
                                  spacing_mm=args.resolution,
                                  stack_axis=1,
Exemplo n.º 6
0
            # Crop in case of inconsistency
            crop = min(pred.shape, mask.shape)
            mask = mask[:crop[0], :crop[1], :crop[2]]
            pred = pred[:crop[0], :crop[1], :crop[2]]

            # Evaluate metrics
            conf_matrix = calculate_conf(pred.astype(np.bool), mask.astype(np.bool), args.n_labels)
            dice = calculate_dice(conf_matrix)[1]
            iou = calculate_iou(conf_matrix)[1]
            sim = calculate_volumetric_similarity(conf_matrix)[1]

            print(f'Sample {sample}: dice = {dice}, IoU = {iou}, similarity = {sim}')

            # Save predicted full mask
            print_orthogonal(data, invert=False, res=3.2, cbar=True,
                             savepath=str(args.save_dir / ('visualizations_' + snap) / (sample + '_input.png')),
                             scale_factor=1500)
            print_orthogonal(data, mask=mask, invert=False, res=3.2, cbar=True,
                             savepath=str(args.save_dir / ('visualizations_' + snap) / (sample + '_reference.png')),
                             scale_factor=1500)
            print_orthogonal(data, mask=pred, invert=False, res=3.2, cbar=True,
                             savepath=str(args.save_dir / ('visualizations_' + snap) / (sample + '_prediction.png')),
                             scale_factor=1500)

            # Update results
            results['Sample'].append(sample)
            results['Dice'].append(dice)
            results['IoU'].append(iou)
            results['Similarity'].append(sim)

        # Add average value to