コード例 #1
0
def resample_all(new_spacing, isnormal, shift_need):
    # _rd = _read_data(2,train_tag='train/',validation_tag='validation/',test_tag='test/',
    #              img_name='CT.mha',label_name='GTV_CT.mha')
    # _rd = _read_data(1, train_tag='prostate_train/', validation_tag='prostate_validation/', test_tag='prostate_test/',
    #                  img_name='CTImage.mha', label_name='Bladder.mha')

    resampled_path = '/exports/lkeb-hpc/syousefi/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Data-01/21data1100data2-v3/'
    _rd = _read_data(2,
                     train_tag='train/',
                     validation_tag='validation/',
                     test_tag='test/',
                     img_name='CT.mha',
                     label_name='GTV_CT.mha',
                     resampled_path=resampled_path)
    '''read path of the images for train, test, and validation'''

    train_CTs, train_GTVs, train_Torso, validation_CTs, validation_GTVs, validation_Torso, \
    test_CTs, test_GTVs, test_Torso, depth, width, height = _rd.read_image_path()

    extract_torsos(train_CTs)

    # padding and casting:
    padding_images(train_CTs, train_GTVs, train_Torso, 57)
    padding_images(validation_CTs, validation_GTVs, validation_Torso, 57)
    padding_images(test_CTs, test_GTVs, test_Torso, 57)
コード例 #2
0
def resample_all(new_spacing,isnormal,shift_need):
    # _rd = _read_data(2,train_tag='train/',validation_tag='validation/',test_tag='test/',
    #              img_name='CT.mha',label_name='GTV_CT.mha')
    # _rd = _read_data(1, train_tag='prostate_train/', validation_tag='prostate_validation/', test_tag='prostate_test/',
    #                  img_name='CTImage.mha', label_name='Bladder.mha')
    _rd = _read_data(2, train_tag='train/', validation_tag='validation/', test_tag='Extra/',
                     img_name='CT.mha', label_name='GTV_prim.mha')

    '''read path of the images for train, test, and validation'''

    train_CTs, train_GTVs, train_Torso, validation_CTs, validation_GTVs, validation_Torso, \
    test_CTs, test_GTVs, test_Torso, depth, width, height = _rd.read_image_path()

    # extract_torsos(train_CTs)

    # padding and casting:
    # padding_images(train_CTs, train_GTVs,train_Torso, 57)
    # padding_images(validation_CTs, validation_GTVs, validation_Torso, 57)
    padding_images(test_CTs, test_GTVs, test_Torso, 57)
コード例 #3
0
    def __init__(self):

        self.train_tag='train/'
        self.validation_tag='validation/'
        self.test_tag='test/'
        self.img_name='CT_padded.mha'
        self.label_name='GTV_CT_padded.mha'
        self.torso_tag='CT_padded_Torso.mha'
        self.data=2
        self.gtv_min_z = []
        self.gtv_max_z = []
        self.gtv_min_x = []
        self.gtv_max_x = []
        self.gtv_min_y = []
        self.gtv_max_y = []
        self.mask_min_z = []
        self.mask_max_z = []
        self.mask_min_x = []
        self.mask_max_x = []
        self.mask_min_y = []
        self.mask_max_y = []
        self._rd = _read_data(data=self.data, train_tag=self.train_tag, validation_tag=self.validation_tag,
                     test_tag=self.test_tag,
                     img_name=self.img_name, label_name=self.label_name, torso_tag=self.torso_tag)
from os.path import isfile, join
from random import shuffle
import matplotlib.pyplot as plt
import datetime
import scipy.misc
from densenet_unet import _densenet_unet
from read_data import _read_data
from measurements import _measure


densnet_unet_config=[6,8,8,8,6]
compression_coefficient=.7
growth_rate=2
ext=''.join(map(str, densnet_unet_config))

_rd = _read_data()
test_path='/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Data-01/prostate_test/'

test_CTs, test_GTVs=_rd.read_imape_path(test_path)

img_height=512
img_padded_size = 519
seg_size = 505
_meas=_measure()
jj=[]
dd=[]
in_path='/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Code/dense_net_14/densenet_unet_output_volume_'+ext+'/'
out_path='/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Code/dense_net_14/densenet_unet_output_volume_'+ext+'_Dilate/'
# data_dir = [join(out_path, f) for f in listdir(out_path) if isfile(join(out_path, f))]
labels=[]
for img_indx in range(len(test_CTs)):
コード例 #5
0
def dicejacc():
    _rd = _read_data()
    test_path = '/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Data-01/prostate_test/'
    result_path='/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Code/dense_net_14/densenet_unet_output_volume_88888_0.5_2/'
    test_CTs, test_GTVs = _rd.read_imape_path(test_path)


    xtickNames = []
    dice_boxplot=[]
    jacc_boxplot=[]
    for i in range(len(test_GTVs)):
        fig1, ax1 = plt.subplots()
        fig2, ax2 = plt.subplots()
        d = []
        j = []
        [CT_image, GTV_image, volume_depth, voxel_size, origin, direction] = _rd.read_image_seg_volume(test_CTs,
                                                                                                       test_GTVs,
                                                                                                       i)
        ss = str(test_CTs[i]).split("/")
        name = ss[8] + '_' + ss[9]
        xtickNames.append(name)
        res = sitk.ReadImage(result_path + name + '.mha')
        res = sitk.GetArrayFromImage(res)
        for jj in range(GTV_image.shape[0]):
            im1 = np.asarray(res[jj][:][:]).astype(np.bool)
            im2 = np.asarray(GTV_image[jj][:][:]).astype(np.bool)
            im2 = im2[0:511,0:511]
            im2 = im2[int(511/2)-int(im1.shape[0]/2)-1:int(511/2)+int(im1.shape[0]/2),
                      int(511 / 2) - int(im1.shape[1]/2) - 1:int(511 / 2) + int(im1.shape[1]/2)]
            dice(im1, im2)
            d.append(dice(im1, im2))
            j.append(jaccard(im1, im2))

        index = np.arange(len(d))
        bar_width = 0.35
        opacity = 1
        rects1 =ax1.bar(index, d, bar_width,
                            alpha=1,
                            color='b',
                            label='Dice')#'bo--',range(2), j, 'rs--')
        rects2 =ax2.bar(index+bar_width, j,  bar_width,
                            alpha=1,
                            color='r',
                            label='Jaccard')#,range(2), j, 'rs--')
        # first_legend = plt.legend(handles=[line1], loc=1)
        # ax = plt.gca().add_artist(first_legend)
        ax1.set_xlabel('Slices')
        ax2.set_xlabel('Slices')
        ax1.set_ylabel('Accuracy')
        ax2.set_ylabel('Accuracy')
        non_zero_j =  [x for x in j if x != 0]
        non_zero_d =  [x for x in d if x != 0]
        if len(non_zero_j)!=0:
            title2=name+': '+'jaccard: (%.2f,%.2f)' %\
                        (min(non_zero_j), max(non_zero_j))
            title1 = name + ': ' + 'dice: (%.2f,%.2f)' % \
                                   ( min(non_zero_d), max(non_zero_d))
        else:
            title2 = name + ': ' + 'jaccard: (%.2f,%.2f)' % \
                                  (min(j), max(j))
            title1 = name + ': ' + 'dice: (%.2f,%.2f)' % \
                                  ( min(d), max(d))
        fig1.suptitle(title1)
        ax1.legend( loc=4)
        plt.ylim(0, 1.1)
        fig1.savefig(result_path + '/dice_'+name+'.png')

        fig2.suptitle(title2)
        ax2.legend(loc=4)
        plt.ylim(0, 1.0)
        fig2.savefig(result_path + '/jaccard_'+name+'.png')


        dice_boxplot.append((non_zero_d))
        jacc_boxplot.append((non_zero_j))

        fig1.clear()
        fig2.clear()
    fig, ax = plt.subplots()
    plt.boxplot(dice_boxplot, 0, '')
    plt.xticks(list(range(1, len(dice_boxplot) + 1)), xtickNames, rotation='vertical')
    plt.ylabel('Dice')
    plt.margins(0.2)
    plt.subplots_adjust(bottom=.4)
    fig.savefig(result_path + '/dice_boxplot.png')

    fig, ax = plt.subplots()
    plt.boxplot(jacc_boxplot, 0, '')
    plt.xticks(list(range(1, len(jacc_boxplot) + 1)), xtickNames, rotation='vertical')
    plt.ylabel('Jaccard')
    plt.margins(0.2)
    plt.subplots_adjust(bottom=.4)
    fig.savefig(result_path + '/jacc_boxplot.png')
コード例 #6
0
    # img_name='CT_padded.mha'
    # label_name='GTV_CT_padded.mha'
    # torso_tag='CT_padded_Torso.mha'

    train_tag='train/'
    validation_tag='validation/'
    test_tag='Esophagus/'
    # img_name='CTpadded.mha'
    # label_name='GTV_CTpadded.mha'
    # torso_tag='Torsopadded.mha'

    img_name = ''
    label_name = ''
    torso_tag = ''

    _rd = _read_data(data=data,train_tag=train_tag, validation_tag=validation_tag, test_tag=test_tag,
                             img_name=img_name, label_name=label_name)
    test_path = '/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Data-01/'+test_tag
    chckpnt_dir = '/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Code/'+Log+log_tag+'/densenet_unet_checkpoints/'


    # test_CTs, test_GTVs ,test_Torsos= _rd.read_imape_path(test_path)

    train_CTs, train_GTVs, train_Torso, train_penalize, \
    validation_CTs, validation_GTVs, validation_Torso, validation_penalize, \
    test_CTs, test_GTVs, test_Torso, test_penalize = _rd.read_data_path(fold=fold)

    # test_CTs=train_CTs
    # test_GTVs=train_GTVs
    # test_Torso=train_Torso
    # test_penalize=train_penalize
コード例 #7
0
from joblib import Parallel, delayed
import multiprocessing

import pandas as pd


def penalize_mask_generator(gtv_name, indx):

    nm = gtv_name.split('.mha')[0] + '.xlsx'
    print(indx, nm)
    [scan, voxel_size, origin, direction] = rd.read_volume(gtv_name)
    where = np.where(scan)

    df = pd.DataFrame({'x': where[0], 'y': where[1], 'z': where[2]})
    writer = pd.ExcelWriter(nm)
    df.to_excel(writer, 'Sheet1')
    writer.save()


rd = _read_data(2,
                train_tag='train/',
                validation_tag='validation/',
                test_tag='test/',
                img_name='CT_re113.mha',
                label_name='GTV_re113.mha')
CTs, GTVs, Torsos = rd.read_imape_path2(
    '/srv/2-lkeb-17-dl01/syousefi/TestCode/EsophagusProject/Data-01/21data1100data2-v3/'
)
num_cores = multiprocessing.cpu_count()
res = Parallel(n_jobs=num_cores)(delayed(penalize_mask_generator)(
    gtv_name=GTVs[i], indx=i) for i in range(len(GTVs)))  #len(GTVs)
コード例 #8
0
def run_net(densnet_unet_config, compression_coefficient, growth_rate,
            img_padded_size, seg_size, Exp, log_ext, LOGDIR, train_acc_file,
            validation_acc_file, chckpnt_dir):
    '''read 2d images from the data:'''
    two_dim = True
    _rd = _read_data()
    flag = False
    # if os.path.exists(LOGDIR):
    #     shutil.rmtree(LOGDIR)
    '''read path of the images for train, test, and validation'''
    train_CTs, train_GTVs, validation_CTs, validation_GTVs, \
    test_CTs, test_GTVs=_rd.read_image_path()

    GTV_patchs_size = 81
    patch_window = 95
    sample_no = 100000  #badan avaz kon!!1000
    batch_no = 50
    batch_no_validation = 1000
    validation_samples = 100000  # badan avaz kon!!1000
    display_step = 100
    run_validation_steps = display_step

    learning_rate = 1E-4
    learning_decay = .95

    if two_dim:
        image = tf.placeholder(tf.float32, shape=[None, None, None, 1])
        label = tf.placeholder(tf.float32, shape=[None, None, None, 2])
        # image=tf.placeholder(tf.float32,shape=[None,patch_window,patch_window,1])
        # image=tf.placeholder(tf.float32,shape=[None,512,512,1])
        # label=tf.placeholder(tf.float32,shape=[None,GTV_patchs_size,GTV_patchs_size,2])
        # label=tf.placeholder(tf.float32,shape=[None,512,512,2])
        ave_vali_acc = tf.placeholder(tf.float32)
        ave_loss_vali = tf.placeholder(tf.float32)

    dropout = tf.placeholder(tf.float32, name='dropout')
    is_training = tf.placeholder(tf.bool, name='is_training')
    dense_net_dim = tf.placeholder(tf.int32, name='dense_net_dim')

    # _u_net=_unet()
    # _u_net.unet(image)

    _dn = _densenet_unet(densnet_unet_config, compression_coefficient,
                         growth_rate)  #create object
    y = _dn.dens_net(image, is_training, dropout, dense_net_dim)

    sess = tf.Session()
    train_writer = tf.summary.FileWriter(LOGDIR + '/train' + Exp,
                                         graph=tf.get_default_graph())
    validation_writer = tf.summary.FileWriter(LOGDIR + '/validation' + Exp,
                                              graph=sess.graph)

    # y=_dn.vgg(image)

    saver = tf.train.Saver()
    '''AdamOptimizer:'''
    with tf.name_scope('cost'):
        cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
            logits=y, labels=label),
                              name="cost")
    tf.summary.scalar("cost", cost)

    # extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.name_scope('train'):
        optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

    with tf.name_scope('validation'):
        average_validation_accuracy = ave_vali_acc
        average_validation_loss = ave_loss_vali
    tf.summary.scalar("average_validation_accuracy",
                      average_validation_accuracy)
    tf.summary.scalar("average_validation_loss", average_validation_loss)

    with tf.name_scope('accuracy'):
        with tf.name_scope('correct_prediction'):
            correct_prediction = tf.equal(tf.argmax(y, 3), tf.argmax(label, 3))
        with tf.name_scope('accuracy'):
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar("accuracy", accuracy)

    sess.run(tf.global_variables_initializer())
    train_writer.add_graph(sess.graph)

    summ = tf.summary.merge_all()

    total_epochs = 10
    img_width = 512
    img_height = 512
    # patch_radius = 49
    '''loop for epochs'''
    itr1 = 0
    for epoch in range(total_epochs):
        print('0')
        save_file(train_acc_file, 'epoch: %d\n' % (epoch))
        save_file(validation_acc_file, 'epoch: %d\n' % (epoch))
        print("epoch #: %d" % (epoch))
        startTime = time.time()
        GTV_patchs, CT_image_patchs = _rd.read_data_all_train_batches(
            train_CTs, train_GTVs, sample_no, GTV_patchs_size, patch_window,
            img_width, img_height, epoch)

        validation_CT_image, validation_GTV_image = _rd.read_all_validation_batches(
            validation_CTs,
            validation_GTVs,
            validation_samples,
            GTV_patchs_size,
            patch_window,
            img_width,
            img_height,
            epoch + 1,
            img_padded_size,
            seg_size,
            whole_image=0)

        step = 0
        '''loop for training batches'''
        while (step * batch_no < sample_no):
            # print('Training %d samples of %d' %(step*batch_no,sample_no))

            train_CT_image_patchs = CT_image_patchs[step *
                                                    batch_no:(step + 1) *
                                                    batch_no - 1]
            train_GTV_label = GTV_patchs[step *
                                         batch_no:(step + 1) * batch_no - 1]
            [acc_train1, loss_train1, optimizing] = sess.run(
                [accuracy, cost, optimizer],
                feed_dict={
                    image: train_CT_image_patchs,
                    label: train_GTV_label,
                    dropout: 0.5,
                    is_training: True,
                    ave_vali_acc: -1,
                    ave_loss_vali: -1,
                    dense_net_dim: patch_window
                })

            save_file(train_acc_file, '%f, %f\n' % (acc_train1, loss_train1))

            if itr1 % display_step == 0:
                [sum_train] = sess.run(
                    [summ],
                    feed_dict={
                        image: train_CT_image_patchs,
                        label: train_GTV_label,
                        dropout: 0.5,
                        is_training: True,
                        ave_vali_acc: acc_train1,
                        ave_loss_vali: loss_train1,
                        dense_net_dim: patch_window
                    })
                train_writer.add_summary(sum_train, itr1)

                #=============validation================
                '''Validation: '''
                validation_step = 0
                loss_validation = 0
                acc_validation = 0

                while (validation_step * batch_no_validation <
                       validation_samples):
                    validation_CT_image_patchs = validation_CT_image[
                        validation_step *
                        batch_no_validation:(validation_step + 1) *
                        batch_no_validation - 1]
                    validation_GTV_label = validation_GTV_image[
                        validation_step *
                        batch_no_validation:(validation_step + 1) *
                        batch_no_validation - 1]
                    [acc_vali, loss_vali] = sess.run(
                        [accuracy, cost],
                        feed_dict={
                            image: validation_CT_image_patchs,
                            label: validation_GTV_label,
                            dropout: 1,
                            is_training: False,
                            ave_vali_acc: -1,
                            ave_loss_vali: -1,
                            dense_net_dim: patch_window
                        })

                    acc_validation += acc_vali
                    loss_validation += loss_vali
                    validation_step += 1
                    # print(validation_step * batch_no_validation )
                    save_file(validation_acc_file,
                              '%f, %f\n' % (acc_vali, loss_vali))
                    # print('Validation, step: %d accuracy: %.4f loss: %f' % (
                    # itr1, acc_vali, loss_vali))
                    # end while

                acc_validation = acc_validation / validation_step
                loss_validation = loss_validation / validation_step
                print(
                    '******Validation, step: %d accuracy: %.4f loss: %f*******'
                    % (step, acc_validation, loss_validation))
                [sum_validation] = sess.run(
                    [summ],
                    feed_dict={
                        image: validation_CT_image_patchs,
                        label: validation_GTV_label,
                        dropout: 1,
                        is_training: False,
                        ave_vali_acc: acc_validation,
                        ave_loss_vali: loss_validation,
                        dense_net_dim: patch_window
                    })
                validation_writer.add_summary(sum_validation, itr1)
                print('end of validation')
            #end if

            step = step + 1
            itr1 = itr1 + 1
        endTime = time.time()

        print("End of epoch----> %d, elapsed time: %d" %
              (epoch, endTime - startTime))

        if itr1 % 1 == 0:
            if flag == False:
                img, seg = _rd.read_all_validation_batches(test_CTs,
                                                           test_GTVs,
                                                           validation_samples,
                                                           GTV_patchs_size,
                                                           patch_window,
                                                           img_width,
                                                           img_height,
                                                           epoch + 1,
                                                           img_padded_size,
                                                           seg_size,
                                                           whole_image=1)

                scipy.misc.imsave(
                    './dense_unet_out/test.png',
                    img[0][:][:].reshape(img_padded_size, img_padded_size))
                scipy.misc.imsave('./dense_unet_out/label.png',
                                  (seg[0][:][:])[:, :, 1])
                flag = True

            [acc_img, loss_img, out] = sess.run(
                [accuracy, cost, y],
                feed_dict={
                    image: img,
                    label: seg,
                    dropout: 1,
                    is_training: False,
                    ave_vali_acc: -1,
                    ave_loss_vali: -1,
                    dense_net_dim: img_padded_size
                })
            # imgplot = plt.imshow((out[0][:][:])[:, :, 1], cmap='gray')

            # plt.figure()
            # img1=np.zeros((1, 1023, 1023))
            # img1[0][:][:] = np.lib.pad((out[0][:][:])[:, :, 1], (263, 263), "constant", constant_values=(0, 0))
            # imgplot = plt.imshow((out[0][:][:])[:, :, 1], cmap='gray')
            # plt.figure()
            # img1 = np.zeros((1, 1023, 1023))
            # img1[0][:][:] = np.lib.pad((seg[0][:][:])[:, :, 1], (263, 263), "constant", constant_values=(0, 0))
            # imgplot = plt.imshow((img1[0][:][:]), cmap='gray')
            # plt.figure()
            # imgplot = plt.imshow(img[0][:][:].reshape(1023, 1023), cmap='gray')

            scipy.misc.imsave('./dense_unet_out/result_%d.png' % (epoch),
                              (out[0][:][:])[:, :, 1])
        '''saveing model after each epoch'''
        chckpnt_path = os.path.join(chckpnt_dir, 'densenet_unet.ckpt')
        saver.save(sess, chckpnt_path, global_step=epoch)

        learning_rate = learning_rate * learning_decay