コード例 #1
0
def main(args):
    makedirs(args)
    device = torch.device(
        "cpu" if not torch.cuda.is_available() else args.device)

    loader = data_loader(args)

    with torch.set_grad_enabled(False):
        unet = UNet(in_channels=Dataset.in_channels,
                    out_channels=Dataset.out_channels)
        #        unet = NestedUNet(in_ch=Dataset.in_channels, out_ch=Dataset.out_channels)
        state_dict = torch.load(args.weights, map_location=device)
        unet.load_state_dict(state_dict)
        unet.eval()
        unet.to(device)

        input_list = []
        pred_list = []
        true_list = []

        for i, data in tqdm(enumerate(loader)):
            x, y_true = data
            x, y_true = x.to(device), y_true.to(device)

            y_pred = unet(x)
            y_pred_np = y_pred.detach().cpu().numpy()
            pred_list.extend([y_pred_np[s] for s in range(y_pred_np.shape[0])])

            y_true_np = y_true.detach().cpu().numpy()
            true_list.extend([y_true_np[s] for s in range(y_true_np.shape[0])])

            x_np = x.detach().cpu().numpy()
            input_list.extend([x_np[s] for s in range(x_np.shape[0])])

    volumes = postprocess_per_volume(
        input_list,
        pred_list,
        true_list,
        loader.dataset.patient_slice_index,
        loader.dataset.patients,
    )

    dsc_dist = dsc_distribution(volumes)

    dsc_dist_plot = plot_dsc(dsc_dist)
    imsave(args.figure, dsc_dist_plot)

    for p in volumes:
        x = volumes[p][0]
        y_pred = volumes[p][1]
        y_true = volumes[p][2]
        for s in range(x.shape[0]):
            image = gray2rgb(x[s, 1])  # channel 1 is for FLAIR
            image = outline(image, y_pred[s, 0], color=[255, 0, 0])
            image = outline(image, y_true[s, 0], color=[0, 255, 0])
            filename = "{}-{}.png".format(p, str(s).zfill(2))
            filepath = os.path.join(args.predictions, filename)
            imsave(filepath, image)
コード例 #2
0
def run(b64str):
    im = utils.base64_to_cv2(b64str)
    cv2.imwrite('human_seg.png', im)
    res = human_seg.segment(images=[im], visualization=False)
    img = utils.gray2rgb(res[0]['data'])

    b_channel, g_channel, r_channel = cv2.split(img)

    alpha_channel = res[0]['data']

    img_BGRA = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))

    return utils.cv2_to_base64(img_BGRA)
コード例 #3
0
ファイル: vision.py プロジェクト: templeblock/PlaqueDetection
def sample_stack_color(stack,
                       metrics,
                       rows=10,
                       cols=10,
                       start_with=0,
                       show_every=2,
                       scale=4,
                       fig_name=None):
    """ show stacked image samples
    Args:
        stack: numpy ndarray, input stack to plot
    """
    _, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])
    for i in range(rows * cols):
        ind = start_with + i * show_every
        if ind < len(stack):
            ax[int(i / cols),
               int(i % cols)].set_title('F1= {:.4f}'.format(metrics[ind]))
            ax[int(i / cols), int(i % cols)].imshow(gray2rgb(stack[ind]))
            ax[int(i / cols), int(i % cols)].axis('off')

    if fig_name:
        plt.savefig(fig_name + '.png')
    plt.close()
コード例 #4
0
ファイル: test.py プロジェクト: prasad3000/brain_segmentation
plt.barh(y_positions, values, align="center", color="skyblue")
plt.yticks(y_positions, labels)
plt.xticks(np.arange(0.0, 1.0, 0.1))
plt.xlim([0.0, 1.0])
plt.gca().axvline(np.mean(values), color="tomato", linewidth=2)
plt.gca().axvline(np.median(values), color="forestgreen", linewidth=2)
plt.xlabel("Dice coefficient", fontsize="x-large")
plt.gca().xaxis.grid(color="silver", alpha=0.5, linestyle="--", linewidth=1)
plt.tight_layout()
canvas.draw()
plt.close()
s, (width, height) = canvas.print_to_buffer()
dsc_dist_plot =  np.fromstring(s, np.uint8).reshape((height, width, 4))

#imsave('dsc.png', dsc_dist_plot)

for p in volumes:
    x = volumes[p][0]
    y_pred = volumes[p][1]
    y_true = volumes[p][2]
    for s in range(x.shape[0]):
        image = gray2rgb(x[s, 1])  # channel 1 is for FLAIR
        image = outline(image, y_pred[s, 0], color=[255, 0, 0])
        image = outline(image, y_true[s, 0], color=[0, 255, 0])
        filename = "{}-{}.png".format(p, str(s).zfill(2))
        filepath = os.path.join(predictions, filename)
        imsave(filepath, image)
    
    

コード例 #5
0
def main(args):
    makedirs(args)
    device = torch.device(
        "cpu" if not torch.cuda.is_available() else args.device)

    loader = data_loader(args)

    FOV_x = 20  # AP FOV_x mm
    FOV_y = 20  # AP FOV_y mm
    FOV_z = 1  # AP Thickness mm
    resolution_x = FOV_x / 256
    resolution_y = FOV_y / 256

    conversion_factor = 1  #Conversion factor for cropping. Typically 3.5
    print('Conversion factor for automatic seg = %.2f' % conversion_factor)

    with torch.set_grad_enabled(False):
        unet = UNet(in_channels=Dataset.in_channels,
                    out_channels=Dataset.out_channels)
        state_dict = torch.load(args.weights, map_location=device)
        unet.load_state_dict(state_dict)
        unet.eval()
        unet.to(device)

        input_list = []
        pred_list = []
        true_list = []

        for i, data in tqdm(enumerate(loader)):
            x, y_true = data
            x, y_true = x.to(device), y_true.to(device)

            y_pred = unet(x)
            y_pred_np = y_pred.detach().cpu().numpy()
            pred_list.extend([y_pred_np[s] for s in range(y_pred_np.shape[0])])

            y_true_np = y_true.detach().cpu().numpy()
            true_list.extend([y_true_np[s] for s in range(y_true_np.shape[0])])

            x_np = x.detach().cpu().numpy()
            input_list.extend([x_np[s] for s in range(x_np.shape[0])])

    volumes = postprocess_per_volume(
        input_list,
        pred_list,
        true_list,
        loader.dataset.patient_slice_index,
        loader.dataset.patients,
    )

    dsc_dist = dsc_distribution(volumes, conversion_factor, resolution_x,
                                resolution_y, FOV_z)

    dsc_dist_plot = plot_dsc(dsc_dist)
    imsave(args.figure, dsc_dist_plot)

    filepath_pixels_predicted_total = "{}.txt".format(
        "predictions/Predicted-volume-singleslice_total")  # AP
    with open(filepath_pixels_predicted_total, 'w') as fff:  # AP
        fff.write('FOV = %s x %s x %s mm3 \n' % (FOV_x, FOV_y, FOV_z))
        fff.write('Matrix size = 256x256 \n\n')
        fff.write('Predicted volume per single slice for all cases (mm3): \n')

        filepath_pixels_true_total = "{}.txt".format(
            "predictions/True-volume-singleslice_total")  # AP
        with open(filepath_pixels_true_total, 'w') as ffff:  # AP
            ffff.write('FOV = %s x %s x %s mm3 \n' % (FOV_x, FOV_y, FOV_z))
            ffff.write('Matrix size = 256x256 \n\n')
            ffff.write(
                'True (manual seg) volume per single slice for all cases (mm3): \n'
            )

            for p in volumes:
                x = volumes[p][0]
                y_pred = volumes[p][1]
                #y_pred_round = np.round(y_pred).astype(int)
                #print(p, 'y_pred is = ', y_pred_round)
                y_true = volumes[p][2]

                y_pred_pixels_total = 0  #AP
                y_true_pixels_total = 0
                y_pred_pixels_singleslice = 0  #AP
                volume_predicted_value_singleslice = 0  #AP

                y_true_pixels_singleslice = 0  #AP
                volume_true_value_singleslice = 0  #AP

                for s in range(x.shape[0]):

                    # Volume true per slice
                    y_true_array = y_true[s, 0] * 1  # AP
                    y_true_pixels_total = np.count_nonzero(
                        y_true_array == 1) + y_true_pixels_total  # AP

                # Reading volume total from ClinicalVolumes manual segmentation
                filepath_vol = os.path.join(
                    args.images, p, 'vol.txt'
                )  # "images" is the argument corresponding to ./kaggle_3m  #AP
                read_volume = open(filepath_vol, 'r')  # AP
                lines = read_volume.readlines()  # AP
                volume_true_value_fromline = lines[12]  # AP
                volume_true_value_ClinicalVolumes = float(
                    volume_true_value_fromline)  # AP
                # volume_true_value_ClinicalVolumes = volume_true_value_ClinicalVolumes             #AP (removed 5: the term to adjust to the contorn differences in T2)

                volume_true_value_before_corection = resolution_x * resolution_y * FOV_z * y_true_pixels_total  # AP  volume calc
                # Calculating the true conversion factor
                true_conversion_factor = volume_true_value_before_corection / volume_true_value_ClinicalVolumes  # AP mask volume is incorrectly calculated from the sofware, it needs an adjustment
                print('Conversion factor for manual seg = %.2f' %
                      true_conversion_factor)
                # Calculating the true volume
                volume_true_value = volume_true_value_before_corection / true_conversion_factor
                y_true_pixels_total = y_true_pixels_total / true_conversion_factor

                # Creating predicted and True volumes
                filename_pixels_predicted_true = "{}-{}.txt".format(
                    p, "Predicted_and-True-volume-singleslice")  # AP
                filepath_pixels_predicted_true = os.path.join(
                    args.predictions, filename_pixels_predicted_true)  # AP
                with open(filepath_pixels_predicted_true, 'w') as f:  # AP

                    # Creating True single slice volumes
                    f.write(
                        'True (manual seg) volume per single slice (mm3): \n')
                    for s in range(x.shape[0]):
                        y_true_array = y_true[s, 0] * 1  # AP
                        y_true_pixels_singleslice = np.count_nonzero(
                            y_true_array ==
                            1) + y_true_pixels_singleslice  # AP
                        volume_true_value_singleslice = resolution_x * resolution_y * FOV_z * y_true_pixels_singleslice / true_conversion_factor  # /conversion_factor #* 1.5

                        f.write('%.2f' % volume_true_value_singleslice)
                        f.write('\n')  # AP
                        y_true_pixels_singleslice = 0

                        ffff.write('%.2f \n' %
                                   volume_true_value_singleslice)  # AP
                    # END Creating True single slice volumes

                    # Creating predicted volumes
                    f.write('\nPredicted volume per slice slice (mm3): \n')
                    for s in range(x.shape[0]):
                        # Images creation
                        image = gray2rgb(x[s, 1])  # channel 1 is for FLAIR

                        image = outline(image, y_pred[s, 0], color=[
                            255, 0, 0
                        ])  # AP y_pred RED is the predicted volume!
                        image = outline(image, y_true[s, 0], color=[
                            0, 255, 0
                        ])  # AP y_true GREEN is for true volumes!

                        filename = "{}-{}.png".format(p, str(s).zfill(2))
                        filepath = os.path.join(args.predictions, filename)
                        imsave(filepath, image)
                        # End images creation

                        # Volume predicted per slice
                        y_pred_array = y_pred[s, 0] * 1  #AP
                        y_pred_pixels_total = np.count_nonzero(
                            y_pred_array == 1) + y_pred_pixels_total  #AP
                        y_pred_pixels_singleslice = np.count_nonzero(
                            y_pred_array ==
                            1) + y_pred_pixels_singleslice  # AP
                        volume_predicted_value_singleslice = resolution_x * resolution_y * FOV_z * y_pred_pixels_singleslice / conversion_factor  # AP  predicted volume per slice
                        f.write('%.2f' % volume_predicted_value_singleslice)
                        f.write('\n')  # AP
                        y_pred_pixels_singleslice = 0
                        # End volume predicted per slice

                        fff.write('%.2f \n' %
                                  volume_predicted_value_singleslice)
                    # END Creating predicted volumes

                # Predicted mask pixels total
                y_pred_pixels_total = y_pred_pixels_total / conversion_factor  # AP 3.5 is the conversion factor for skull stripping image zooming
                # Calculating the predicted volume
                volume_predicted_value = resolution_x * resolution_y * FOV_z * y_pred_pixels_total  # AP  volume calc

                # Calculating the difference in volumes
                volume_difference = volume_true_value - volume_predicted_value  #AP
                if (volume_true_value == 0):  #AP
                    volume_difference_percentage = 0  #AP
                else:
                    volume_difference_percentage = abs(
                        100 - (volume_predicted_value * 100 /
                               volume_true_value))  #AP

                filename_pixels = "{}-{}.txt".format(p, "Volume")  #AP
                filepath_pixels = os.path.join(args.predictions,
                                               filename_pixels)  #AP

                with open(filepath_pixels, 'w') as f:  #AP
                    f.write('FOV = %s x %s x %s mm3 \n' %
                            (FOV_x, FOV_y, FOV_z))
                    f.write('Matrix size = 256x256 \n\n')
                    f.write('Number of pixels true      = %d \n' %
                            y_true_pixels_total)

                    f.write('Number of pixels predicted = %d \n\n' %
                            y_pred_pixels_total)

                    f.write(
                        'Volume from manual segmentation (mm3)    = %.2f \n' %
                        volume_true_value)

                    f.write(
                        'Volume from predicted segmentation (mm3) = %.2f \n\n'
                        % volume_predicted_value)

                    f.write('Volume difference (mm3) = %.2f \n' %
                            volume_difference)

                    f.write('Volume difference percentage (%%) = %.2f \n' %
                            volume_difference_percentage)

                with open(filepath_pixels) as f1:
                    with open(filepath_pixels_predicted_true) as f2:
                        filename_pixels_volume_summary = "{}-{}.txt".format(
                            p, "Volume_summary")  # AP
                        filepath_pixels_volume_summary = os.path.join(
                            args.predictions,
                            filename_pixels_volume_summary)  # AP
                        with open(filepath_pixels_volume_summary, "w") as f3:
                            for line in f1:
                                f3.write(line)
                            f3.write('\n\n')
                            for line in f2:
                                f3.write(line)

                os.remove(filepath_pixels)
                os.remove(filepath_pixels_predicted_true)
コード例 #6
0
ファイル: vision.py プロジェクト: templeblock/PlaqueDetection
def sample_list3(data_list,
                 rows=15,
                 cols=4,
                 start_with=0,
                 show_every=2,
                 scale=4,
                 fig_name=None,
                 start_inx=0):
    """ show sample of a list of data
        here we plot slice, label, hu0050, overlap
        this function is mainly for plotting outputs, predictions as well as average F1 scores
    Args:
        data_list: list, list of data in which each element is a dictionary
        start_inx: int, starting slice index for current figure
    """

    n_batch = len(data_list)
    _, ax = plt.subplots(rows, cols, figsize=[scale * cols, scale * rows])

    for ind in range(n_batch):
        # read data and calculate average precision
        input1 = data_list[ind]['slice1']
        input2 = data_list[ind]['slice2']
        label = data_list[ind]['label']
        hu0050 = data_list[ind]['hu0050']
        overlap = data_list[ind]['overlap']
        f_score = data_list[ind]['f1']
        mix_overlap = data_list[ind]['mix_overlap']
        noncal_eval = data_list[ind]['noncal_eval']
        file_path = data_list[ind]['file_path']
        if (ind - start_with) % show_every == 0:
            i = (ind - start_with) // show_every
            if i < rows:
                ax[i, 0].imshow(input1, cmap='gray')
                ax[i, 0].set_title("Slice {} ({}) \n {}".format(
                    ind + start_inx, file_path, 'Input with HU(-100~155)'),
                                   loc='left')
                ax[i, 0].axis('off')

                ax[i, 1].imshow(input2, cmap='gray')
                ax[i, 1].set_title("{}".format('Input with HU(200~1200)'))
                ax[i, 1].axis('off')

                ax[i, 2].imshow(gray2rgb(label))
                ax[i, 2].set_title('{}'.format('Label'))
                ax[i, 2].axis('off')

                ax[i, 3].imshow(gray2rgb(hu0050))
                ax[i, 3].set_title('{}'.format('Mask HU(0~50)'))
                ax[i, 3].axis('off')

                ax[i, 4].imshow(gray2rgb(overlap))
                ax[i,
                   4].set_title('{} (F1= {:.4f})'.format('Overlap', f_score))
                ax[i, 4].axis('off')

                # not all red pixels are within HU range 0~50

                if (np.sum(overlap == 76)) != 0:
                    n_above50, n_below0, topk, buttomk = noncal_eval[
                        0], noncal_eval[1], noncal_eval[2:7], noncal_eval[7:12]
                    ax[i, 4].text(5,
                                  30,
                                  "top5 HU: {}".format(topk),
                                  color='red')
                    ax[i, 4].text(5,
                                  60,
                                  "but5 HU: {}".format(buttomk),
                                  color='red')
                    ax[i, 4].text(5,
                                  90,
                                  "Num of pixels HU>50: {}".format(n_above50),
                                  color='red')
                    ax[i, 4].text(5,
                                  120,
                                  "Num of pixels HU<0: {}".format(n_below0),
                                  color='red')

                ax[i, 5].imshow(gray2rgb(mix_overlap))
                ax[i, 5].set_title('{} (F1= {:.4f})'.format(
                    'Label+Overlap', f_score))
                ax[i, 5].axis('off')

                # ax[i, 3].scatter(range(0, n_class), f_score)
                # ax[i, 3].set_title('Slice %d : Ave F-score = %0.2f' % (ind + start_inx, ave_f_score))
                # ax[i, 3].set_ylabel('F score')
                # ax[i, 3].set_ylim([-0.1, 1.1])

    # plt.show()
    if fig_name:
        plt.savefig(fig_name + '.pdf')
    plt.close()
コード例 #7
0
ファイル: testing.py プロジェクト: CosmosHua/AICC_CV
        print("Loading model")
    else:
        print("No model for loading")
        exit()

    for i, file in enumerate(files):

        if file.endswith('_.jpg'):
            ori = img.imread(os.path.join(test_path, file))

            if file in [
                    '0000486003_.jpg', '0000486005_.jpg', '0000486013_.jpg',
                    '0000492105_.jpg'
            ]:
                ori = cv2.cvtColor(ori, cv2.COLOR_RGB2GRAY)
                ori = gray2rgb(ori)

            ori = ori / 255.0
            input_tensor = np.expand_dims(ori[:, :, :], axis=0)
            detail_layer = input_tensor - guided_filter(
                input_tensor,
                num_patches=1,
                width=input_tensor.shape[1],
                height=input_tensor.shape[2],
                channel=num_channels)

            final_output = sess.run(output,
                                    feed_dict={
                                        image: input_tensor,
                                        detail: detail_layer
                                    })