def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    print("Using data from: %s" % data_root)
    data_provider = ultrasound_util.DataProvider(data_root + "/*.tif",
                                                 a_min=0,
                                                 a_max=210)
    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost="dice_coefficient",
    )

    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, norm_grads=True, optimizer="adam")
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
Example #2
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    generator = Generator(572, data_root)
    
    data, label = generator(1)
    weights = None#(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)
    
    net = unet.Unet(channels=generator.channels, 
                    n_class=generator.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    add_regularizers=True,
                    class_weights=weights,
#                     filter_size=5
                    )
    
    path = output_path if restore else create_training_path(output_path)
#     trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(generator, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    prediction = net.predict(path, data)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))))
    
#     import numpy as np
#     np.save("prediction", prediction[0, ..., 1])
    
    img = util.combine_img_prediction(data, label, prediction)
    util.save_image(img, "prediction.jpg")
Example #3
0
def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    print("Using data from: %s" % data_root)
    data_provider = DataProvider(600, glob.glob(data_root + "/*"))

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost_kwargs=dict(regularizer=0.001),
    )

    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net,
                           optimizer="momentum",
                           opt_kwargs=dict(momentum=0.2))
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
Example #4
0
def train(dataset_csv, working_dir, appendum='', force=False):

    # create folder name
    foldername = os.path.join(
        working_dir, s.stages[4], '__'.join([
            os.path.splitext(os.path.basename(dataset_csv))[0],
            'ly' + str(s.network['layers']) + 'ftr' + str(s.network['features_root']) + appendum, ''
            ]))

    # only retrain if necessary
    if not os.path.exists(foldername) or force:
        generator = image_util.ImageDataProvider(
            dataset_path=dataset_csv,
            roles=['train'],
            shuffle_data=True,
            a_min=None,
            a_max=None,
            n_class=s.network['classes'],
            n_channels=s.network['channels'])  # add all options and put shuffle data = True

        net = unet.Unet(
            channels=s.network['channels'],
            n_class=s.network['classes'],
            layers=s.network['layers'],
            features_root=s.network['features_root'],
            cost_kwargs=dict(class_weights=s.network['class_weights'])
        )
        trainer = unet.Trainer(net, optimizer=s.train['optimizer'], batch_size=s.train['batch_size'])

        trainer.train(generator, foldername, training_iters=s.train['training_iters'], epochs=s.train['epochs'],
                      display_step=s.train['display_step'], dropout=s.train['dropout'],
                      restore=False, write_graph=True)
    else:
        print(os.path.basename(foldername), ' already exists. Skipping.')
Example #5
0
def train(args):
    # preparing data loading
    data_provider = image_util.ImageDataProvider(args.data_dir,
                                                 n_class=args.classes,
                                                 class_colors=[0, 255, 127])

    # setup & training
    net = unet.Unet(layers=args.layers,
                    features_root=args.features_root,
                    channels=args.channels,
                    n_class=args.classes)
    trainer = unet.Trainer(net)
    total_parameters = 0
    for variable in tf.trainable_variables():
        # shape is an array of tf.Dimension
        shape = variable.get_shape()
        variable_parameters = 1
        for dim in shape:
            variable_parameters *= dim.value
        total_parameters += variable_parameters
    print("Total number of parameters:{0}".format(total_parameters))
    trainer.train(data_provider,
                  args.output_path,
                  training_iters=args.training_iters,
                  epochs=args.num_epochs,
                  write_graph=args.write_graph,
                  restore=args.restore)
Example #6
0
def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    data_provider = DataProvider(572, data_root)

    data, label = data_provider(1)
    weights = None  #(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost_kwargs=dict(regularizer=0.001, class_weights=weights),
    )

    path = output_path if restore else util.create_training_path(output_path)

    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    prediction = net.predict(path, data)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(label,
                                                       prediction.shape))))
Example #7
0
def main():

    # input training and test datasets
    train_data = image_util.ImageDataProvider(
        search_path='RoadDetection_Train_Images', n_class=2)

    # instantiate U-net (best results: layers=5, feature_roots=64, batch_size=2, epochs=50, training_iters=64)
    net = unet.Unet(layers=4,
                    n_class=train_data.n_class,
                    channels=train_data.channels,
                    features_root=48,
                    cost='dice_coefficient',
                    cost_kwargs={'regularizer': 0.01})

    trainer = unet.Trainer(net,
                           batch_size=2,
                           verification_batch_size=4,
                           optimizer="momentum",
                           opt_kwargs=dict(momentum=0.5))

    # path = trainer.train(data_provider=train_data, output_path="./unet_trained", training_iters=32,  epochs=1, display_step=2)
    trainer.train(data_provider=train_data,
                  output_path="./unet_trained",
                  training_iters=64,
                  epochs=50,
                  dropout=0.75,
                  display_step=2)

    print('Process completed.')
Example #8
0
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root):
    print("Using data from: %s"%data_root)
    data_provider = DataProvider(600, glob.glob(data_root+"/*"))
    
    net = unet.Unet(channels=data_provider.channels, 
                    n_class=data_provider.n_class, 
                    layers=layers, 
                    features_root=features_root,
                    add_regularizers=True,
#                     filter_size=5
                    )
    
    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(data_provider, path, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=2, 
                         restore=restore)
     
    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)
     
    print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
    
#     import numpy as np
#     np.save("prediction", prediction[0, ..., 1])
    
    img = util.combine_img_prediction(x_test, y_test, prediction)
    util.save_image(img, "prediction.jpg")
Example #9
0
    def __init__(self, data_dir):
        """
			data_directory : path like /home/rajat/nnproj/dataset/
			includes the dataset folder with '/'
			Initialize all your variables here
			"""
        self.path = data_dir
        self.net = unet.Unet(layers=3, features_root=64, channels=1, n_class=2)
        self.trainer = unet.Trainer(self.net)
        self.count = 0
def Training(net):

    TrainData = image_util.ImageDataProvider(dir + "*.tif", shuffle_data=True)
    L = int(len(TrainData.data_files) / 10)
    trainer = unet.Trainer(net, optimizer="adam")
    path = trainer.train(TrainData,
                         dir + 'model',
                         training_iters=L,
                         epochs=epoch_Num,
                         display_step=100,
                         GPU_Num=gpuNum)  # , GPU_Num=gpuNum
    return path
Example #11
0
def train(gen):
    net = unet.Unet(channels=gen.channels,
                    n_class=gen.n_class,
                    layers=5,
                    features_root=16)
    trainer = unet.Trainer(net,
                           optimizer="momentum",
                           opt_kwargs=dict(momentum=0.2))
    trainer.train(gen,
                  "./unet_trained/%s" % (gen.labelclass),
                  training_iters=32,
                  epochs=100,
                  display_step=2)
Example #12
0
def launch(data_root,
           roidictfile,
           output_path,
           training_iters,
           epochs,
           restore,
           layers,
           features_root,
           val=None):

    with open(roidictfile) as fh:
        roidict = yaml.load(fh)
    if val:
        val_data_provider = ImageDataProvider(val, roidict)

    data_provider = ImageDataProvider(data_root, roidict)

    data, label = data_provider(1)
    # make sure the labels are not flat
    assert np.any(
        np.asarray([label[-1, ..., nn].var()
                    for nn in range(label.shape[-1])]) > 0)

    weights = None  #(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size)

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        cost_kwargs=dict(regularizer=0.001, class_weights=weights),
    )

    path = output_path if restore else create_training_path(output_path)
    trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore,
                         val_data_provider=val_data_provider)

    prediction = net.predict(path, data)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(label,
                                                       prediction.shape))))
Example #13
0
def ThalamusExtraction(net , Test_Path , Train_Path , subFolders, CropDim , padSize):


    Trained_Model_Path = Train_Path + 'model/model.cpkt'


    trainer = unet.Trainer(net)

    TestData = image_util.ImageDataProvider(  Test_Path + '*.tif',shuffle_data=False)

    L = len(TestData.data_files)
    DiceCoefficient  = np.zeros(L)
    LogLoss  = np.zeros(L)
    BB_Cord = np.zeros(L)

    for BB_ind in range(L):
        Stng = TestData.data_files[BB_ind]
        d = Stng.find('slice')
        BB_Cord[BB_ind] = int(Stng[d+5:].split('.')[0])

    BB_CordArg = np.argsort(BB_Cord)
    Data , Label = TestData(len(BB_Cord))


    szD = Data.shape
    szL = Label.shape

    data  = np.zeros((1,szD[1],szD[2],szD[3]))
    label = np.zeros((1,szL[1],szL[2],szL[3]))

    shiftFlag = 0
    PredictionFull = np.zeros((szD[0],148,148,2))
    for BB_ind in BB_CordArg:

        data[0,:,:,:]  = Data[BB_ind,:,:,:].copy()
        label[0,:,:,:] = Label[BB_ind,:,:,:].copy()

        if shiftFlag == 1:
            shiftX = 0
            shiftY = 0
            data = np.roll(data,[0,shiftX,shiftY,0])
            label = np.roll(label,[0,shiftX,shiftY,0])

        prediction = net.predict( Trained_Model_Path, data)
        PredictionFull[BB_ind,:,:,:] = prediction

    return PredictionFull
def train():
    net = unet.Unet(channels=1, n_class=2, layers=4, features_root=64)
    trainer = unet.Trainer(net,
                           batch_size=8,
                           verification_batch_size=4,
                           optimizer='adam')
    #data_provider = image_util.SimpleDataProvider(X_test, y_test)
    path = trainer.train(X_test,
                         y_test,
                         X_test,
                         y_test,
                         './pre_trained',
                         training_iters=32,
                         epochs=50,
                         dropout=0.5,
                         display_step=8,
                         restore=True)  #restore = True
def train(raw_path, label_path, model_path):
    data = read_dicom(raw_path)
    label = read_dicom(label_path, True)
    # 创建训练集
    data_provider = SimpleDataProvider(data, label, n_class=2, channels=1)

    # 构建网络
    net = unet.Unet(layers=3,
                    features_root=32,
                    channels=1,
                    n_class=2,
                    summaries=False)
    trainer = unet.Trainer(net,
                           batch_size=2,
                           opt_kwargs={'learning_rate': 0.02})
    path = trainer.train(data_provider,
                         model_path,
                         training_iters=64,
                         epochs=100)
    print(path)
def launch(data_root, output_path, training_iters, epochs, restore, layers,
           features_root):
    print("Using data from: %s" % data_root)

    if not os.path.exists(data_root):
        raise IOError("Kaggle Ultrasound Dataset not found")

    data_provider = DataProvider(search_path=data_root + "/*.tif",
                                 mean=100,
                                 std=56)

    net = unet.Unet(
        channels=data_provider.channels,
        n_class=data_provider.n_class,
        layers=layers,
        features_root=features_root,
        #cost="dice_coefficient",
    )

    path = output_path if restore else util.create_training_path(output_path)

    trainer = unet.Trainer(net,
                           batch_size=1,
                           norm_grads=False,
                           optimizer="adam")
    path = trainer.train(data_provider,
                         path,
                         training_iters=training_iters,
                         epochs=epochs,
                         dropout=0.5,
                         display_step=2,
                         restore=restore)

    x_test, y_test = data_provider(1)
    prediction = net.predict(path, x_test)

    print("Testing error rate: {:.2f}%".format(
        unet.error_rate(prediction, util.crop_to_shape(y_test,
                                                       prediction.shape))))
Example #17
0
def main():
    restore = False
    # adding measurements (loggers) to plotter
    logs = []
    logs.append(logger(label="train loss", color="Red"))
    logs.append(logger(label="validation loss", color="Blue"))
    logs.append(logger(label="validation error", color="Black"))
    logs.append(logger(label="dice score", color="Green"))
    plot = plotter(logs, EPOCHS, os.getcwd() + "/" + "plots")

    # data provider
    dp = DataProviderTiled(splits=12,
                           batchSize=BATCH_SIZE,
                           validationSize=VALIDATION_SIZE)
    dp.readData()
    print("DONE READING DATA")
    # calculate num of iterations
    iters = dp.getTrainSize() // BATCH_SIZE

    # unet
    opt = {"class_weights": [0.99, 0.01]}
    net = unet.Unet(channels = 1, n_class = 2, layers = 3,\
     features_root = 16, cost="cross_entropy", cost_kwargs={})

    # trainer
    options = {"momentum": 0.2, "learning_rate": 0.2, "decay_rate": 0.95}

    trainer = unet.Trainer(net,
                           optimizer="momentum",
                           plotter=plot,
                           opt_kwargs=options)
    # train model
    path = trainer.train(dp, OUTPUT_PATH,training_iters = iters,epochs=EPOCHS,\
     dropout=DROPOUT_KEEP_PROB, display_step = DISPLAY_STEP,restore = restore)

    # plot results
    plot.saveLoggers()
    plot.plotLoggers()
    print("DONE")
Example #18
0
def main():
    np.random.seed(12345)
    LAYERS = 3
    pkl_fname = "data/preprocess/stage1_train_set_rgb.pkl"
    images, masks = get_dataset(pkl_fname)
    logging.info("read train set: %s, %s", images.shape, masks.shape)
    logging.info("image:[%s, %s], mask:[%s, %s]", np.max(images), np.min(images), np.max(masks), np.min(masks))

    pred_size, offset = unet_size(256, LAYERS)
    logging.info("pred_size: %d, offset: %d", pred_size, offset)
    images = padding_array(images, offset, default_val=0.0)
    masks = padding_array(masks, offset, default_val=False)
    logging.info("shape after padded: %s, %s", images.shape, masks.shape)

    # images = normalize(images)
    # test_data(images, masks, 1679)
    data_provider = image_util.SimpleDataProvider(images, masks, channels=3)
    logging.info("data_provider.channels: %s, data_provider.n_class: %s", data_provider.channels, data_provider.n_class)

    # test_data_provider(data_provider)
    net = unet.Unet(channels=data_provider.channels,
                    n_class=data_provider.n_class,
                    cost='cross_entropy',
                    layers=LAYERS,
                    features_root=64,
                    cost_kwargs=dict(regularizer=0.001),
                    )
    batch_size = 8
    net.verification_batch_size = batch_size * 2
    training_iters = (images.shape[0]-1) / batch_size + 1
    logging.info("batch_size: %s, iters: %s", batch_size, training_iters)

    trainer = unet.Trainer(net, batch_size=batch_size, optimizer="momentum",
                           opt_kwargs=dict(momentum=0.9, learning_rate=0.01))
    path = trainer.train(data_provider, "log/20180416-1",
                         training_iters=training_iters, epochs=20, display_step=2)
Example #19
0
os.environ["CUDA_VISIBLE_DEVICES"] = '2'
from __future__ import division, print_function
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import matplotlib
import numpy as np

from tf_unet import image_util
from tf_unet import unet
from tf_unet import util

# In[2]:

net = unet.Unet(channels=3, n_class=2, layers=2, features_root=300)
trainer = unet.Trainer(net,
                       optimizer="momentum",
                       opt_kwargs=dict(momentum=0.2))

# In[5]:

data_provider = image_util.ImageDataProvider(
    "/mnt/ccipd_data/CCF/ccfMaskTmp/training/*.png",
    data_suffix='_img.png',
    mask_suffix='_mask.png')

# In[4]:

path = trainer.train(data_provider,
                     "./unet_trained",
                     training_iters=32,
                     epochs=1,
Example #20
0
print(reduce(lambda x, y: x * y, xx.shape))

generator = AlexDataProvider(xx, yy)  # generator.channels, generator.n_class
#net = unet.Unet(channels=4, n_class=5, layers=3, features_root=16, cost='dice_coefficient') #, cost_kwargs={"class_weights":[2,120,10,60,100]})
net = unet.Unet(channels=4,
                n_class=5,
                layers=3,
                features_root=16,
                cost_kwargs={"class_weights": [2, 120, 10, 60, 100]})
#net = unet.Unet(channels=4, n_class=5, layers=3, features_root=16, cost_kwargs={"class_weights":[.01,.2,.2,.2,.2]})
#net.load("./unet_trained_nii/model.cpkt")

#trainer = unet.Trainer(net, optimizer="momentum",opt_kwargs=dict(learning_rate=.00005)) # .000005  # momentum=0.9
trainer = unet.Trainer(
    net,
    optimizer="adam",
    batch_size=20,
    opt_kwargs=dict(learning_rate=.00001))  # .000005  # momentum=0.9
path = trainer.train(generator,
                     "./unet_trained_nii",
                     training_iters=20,
                     epochs=50,
                     display_step=10,
                     restore=False)

slice_idx, max_cnt = 0, 0
for i in range(y.shape[0]):
    wtf = np.where(y[i, :, :] > 0)
    cnt = len(wtf[0])
    print('wtx', cnt, i)
    if cnt > max_cnt:
                        features_root=32)

    else:

        data_provider = SimpleDataProvider(data=images_train,
                                           label=mask_labels_train)

        net = unet.Unet(channels=data_provider.channels,
                        n_class=data_provider.n_class,
                        layers=3,
                        features_root=32)

        batch_size = 20

        trainer = unet.Trainer(net,
                               batch_size=20,
                               verification_batch_size=10,
                               optimizer="adam")

        if continue_training:
            print("Restore model of U-Net run", timestamp)

            path = trainer.train(
                data_provider,
                output_path="Output_Training/{}-unet_trained".format(
                    timestamp),
                training_iters=images_train.shape[0] // batch_size,
                epochs=10,
                dropout=0.9,
                display_step=20,
                restore=True,
                prediction_path="Output_Training/{}-prediction".format(
Example #22
0
        return image, label


#%%
data_provider = DataProvider(data_root)
data, label = data_provider(1)
weights = None

net = unet.Unet(channels=data_provider.channels,
                n_class=data_provider.n_class,
                layers=layers,
                features_root=features_root,
                cost_kwargs=dict(regularizer=0.001, class_weights=weights))
path = output_path

trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91))

path_2 = trainer.train(data_provider,
                       path,
                       training_iters=training_iters,
                       epochs=epochs,
                       dropout=0.5,
                       display_step=2,
                       restore=False)


#%%
def error_rate(predictions, labels):
    """
    Return the error rate based on dense predictions and 1-hot labels.
    """
Example #23
0
    restore = qq!=0
    
    tf.reset_default_graph()
    net = unet.Unet(channels=1, 
                    n_class=2, 
                    layers=layers, 
                    features_root=features_root,
                    cost_kwargs=dict(regularizer=0.001))
            
    if not restore:    
        print('')
        n_variables = np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()])
        print('Number of trainable variables: {:d}'.format(n_variables))
        
 
    trainer = unet.Trainer(net, batch_size=10, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
    path = trainer.train(dpt, model_dir, 
                         training_iters=training_iters, 
                         epochs=epochs, 
                         dropout=0.5, 
                         display_step=1000000,
                         restore=restore,
                         prediction_path = 'prediction/'+name)
                         

    prop = np.array(qq)
    np.save(res_file+'_prop',prop)

n_files = 80
prefix = '../../data/RFI_data_Sep_2019/prepared/test_1/'
Example #24
0
from tf_unet import image_util
from tf_unet import unet
from tf_unet import util
import glob

search_path = "D:\\pythonworkspace\\tf_unet\\tf_unet\\demo\\IRholder\\ImageResize\\*.png"

data_provider = image_util.ImageDataProvider(search_path,
                                             data_suffix=".png",
                                             mask_suffix='_label.png')

net = unet.Unet(channels=data_provider.channels,
                n_class=data_provider.n_class,
                layers=4,
                features_root=32)

#trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2))
trainer = unet.Trainer(net,
                       batch_size=8,
                       optimizer="adam",
                       opt_kwargs=dict(learning_rate=0.00001))
path = trainer.train(data_provider,
                     "./unet_trained",
                     training_iters=32,
                     epochs=100,
                     dropout=0.5,
                     display_step=2,
                     write_graph=False,
                     restore=False)
#path = trainer.train(data_provider, "./unet_trained", training_iters=20, epochs=50, display_step=2)
Example #25
0
def main(trainPath='traindata',
         testPath='/media/data4TbExt4/neuron/neurofinder.00.00.test/',
         layerNum=4,
         features=64,
         bsize=4,
         opm='adam',
         iter=120,
         ep=220,
         display=60):
    '''
    Driver function. Provides the required inputs for all the modules of the tf_unet package.
    Input:
    trainPath: The path to the training data. This is the path to the directory. All .tif training images must be stored in this directory.
    testPath: The path to the test data. All _mask.tif files must be stored in this directory.
    layerNum: Number of layers in the Unet architecture.
    features: Length of the feature map in the Unet architecture.
    bsize: The batch size for the input.
    opm: The type of optimizer used.
    iter: The number of iterations during training. 
    ep: Number of epochs to be used for training. 
    display: This is used during display. Number of epochs after which the accuracy should be displayed.
    
    '''
    if sys.version_info[0] >= 3:
        raise ("Must be using Python 2.7!")
    if (tf.test.gpu_device_name()):
        print('GPU detected')
    else:
        print('No GPU!')

    # Train using Unet
    data_provider = image_util.ImageDataProvider('{}/*.tif'.format(trainPath))
    net = unet.Unet(channels=1,
                    n_class=2,
                    layers=layerNum,
                    features_root=features)
    trainer = unet.Trainer(net, batch_size=bsize, optimizer=opm)
    path = trainer.train(data_provider,
                         "./unet_trained",
                         training_iters=iter,
                         epochs=ep,
                         display_step=display)

    # Test using the trained result
    path = '{}/images'.format(testPath)
    files = sorted(glob(path + '/*.tiff'))
    testimg = array([imread(f) for f in files])
    concatArray = testimg.sum(axis=0)
    print('The dimension of testing image is {}'.format(concatArray.shape))
    plt.imshow(concatArray)
    concatArray = concatArray.reshape((1, ) + s.shape + (1, ))
    prediction = net.predict("./unet_trained/model.cpkt", concatArray)
    prediction = prediction[0, :, :, 1]
    print('The output dimension is {}'.format(prediction.shape))
    savetxt('predictedArray.txt', prediction)

    # Plot the results
    fig, ax = plt.subplots(1, 2, figsize=(12, 5))
    ax[0].imshow(s[0, ..., 0], cmap='gray')
    ax[1].imshow(prediction, aspect="auto", cmap='gray')
    ax[0].set_title("Input")
    ax[1].set_title("Prediction")
    plt.show()
Example #26
0
from tf_unet import unet, util, image_util
data_provider = image_util.ImageDataProvider("./train/*",
                                             data_suffix=".jpg",
                                             mask_suffix="_Segmentation.png")

output_path = "./model_val/"
#setup & training
net = unet.Unet(layers=3, features_root=16, channels=3, n_class=2)
trainer = unet.Trainer(net)
path = trainer.train(data_provider,
                     output_path,
                     training_iters=32,
                     epochs=100,
                     restore=True)
Example #27
0
        image = sitk.ReadImage(names)
    else:
        image = sitk.ReadImage(path)
    image = sitk.GetArrayFromImage(image)
    # image = image.transpose([1, 0, 2])
    if label:
        image = image > 0
    else:
        image = image.reshape(list(image.shape) + [1]) / 255.0
    return image


if __name__ == '__main__':
    Train_Data = read_dcm('out.vtk')
    Label_Data = read_dcm(path2, True)
    print(Train_Data.shape, Label_Data.shape)
    # 加载数据
    data_provider = image_util.SimpleDataProvider(
        Train_Data, Label_Data, n_class=2, channels=1)
    # 创建并训练网络
    net = unet.Unet(layers=3, features_root=64, channels=1, n_class=2)
    # trainer = unet.Trainer(net, batch_size=2)
    # path = trainer.train(data_provider, Model_PATH, training_iters=32, epochs=50, write_graph=True)
    
    for i in range(3,5):
        trainer = unet.Trainer(net, batch_size=i)
        path = trainer.train(data_provider, Model_PATH, training_iters=32, epochs=100, write_graph=True,restore=True)
        print(path)
    # print(data_provider.n_class)
    pass
Example #28
0
    # generator = image_gen.RgbDataProvider(nx, ny, cnt=20, rectangles=False)
    train_generator = UNetGeneratorClass(args.train_list, args.num_classes,
                                         args.batch_size, args.data_path,
                                         args.img_path, args.labels_path,
                                         args.patch_size, args.patch_overlap)
    test_generator = UNetGeneratorClass(args.test_list, args.num_classes, 1,
                                        args.data_path, args.img_path,
                                        args.labels_path, args.patch_size,
                                        args.patch_overlap)
    net = unet.Unet(channels=3,
                    n_class=args.num_classes,
                    layers=3,
                    features_root=16,
                    cost="cross_entropy")

    trainer = unet.Trainer(net, batch_size=args.batch_size,
                           optimizer="adam")  # ,
    # opt_kwargs=dict(momentum=0.2))

    path = trainer.train(train_generator,
                         "./unet_trained",
                         training_iters=train_generator.training_iters,
                         epochs=epochs,
                         dropout=dropout,
                         display_step=args.display_step,
                         restore=restore)

    if args.do_test:
        x_test, y_test = test_generator(1)
        prediction = net.predict(path, x_test)

        print("Testing error rate: {:.2f}%".format(
Example #29
0
def TestData(net , Test_Path , Train_Path , padSize):

    TestImageNum = 7

    Trained_Model_Path = Train_Path + 'model/model.cpkt'
    TestResults_Path   = Test_Path  + 'results/'

    try:
        os.stat(TestResults_Path)
    except:
        os.makedirs(TestResults_Path)

    AllImage_logical = np.zeros((1924,1924))
    AllImage = np.zeros((1924,1924))

    trainer = unet.Trainer(net)

    TestData = image_util.ImageDataProvider(  Test_Path + '*.tif' , shuffle_data=False)

    L = len(TestData.data_files)
    DiceCoefficient  = np.zeros(L)
    LogLoss  = np.zeros(L)
    # BB_Cord = np.zeros(L,3)
    BB_Cord = np.zeros((L,2))


    aa = TestData.data_files
    for BB_ind in range(L):
    # BB_ind = 1
        bb = aa[BB_ind]
        d = bb.find('/img')
        cc = bb[d:len(bb)-4]
        dd = cc.split('_')
        # imageName = int(dd[0])
        xdim = int(dd[1])
        ydim = int(dd[2])
        # BB_Cord[ BB_ind , : ] = [xdim,ydim,imageName]
        BB_Cord[ BB_ind , : ] = [xdim,ydim]

    Data , Label = TestData(L)


    szD = Data.shape
    szL = Label.shape

    data  = np.zeros((1,szD[1],szD[2],szD[3]))
    label = np.zeros((1,szL[1],szL[2],szL[3]))

    shiftFlag = 0
    for BB_ind in range(L):

        data[0,:,:,:]  = Data[BB_ind,:,:,:].copy()
        label[0,:,:,:] = Label[BB_ind,:,:,:].copy()

        if shiftFlag == 1:
            shiftX = 0
            shiftY = 0
            data = np.roll(data,[0,shiftX,shiftY,0])
            label = np.roll(label,[0,shiftX,shiftY,0])

        prediction = net.predict( Trained_Model_Path, data)
        PredictedSeg = prediction[0,...,1] > 0.2

        # ix, iy, ImgNum = BB_Cord[ BB_ind , : ]
        ix, iy = BB_Cord[ BB_ind , : ]
        ix = int(148*ix)
        iy = int(148*iy)
        # AllImage[148*ix:148*(ix+1) , 148*iy:148*(iy+1) ,ImgNum] = prediction[0,...,1]
        # AllImage_logical[148*ix:148*(ix+1) , 148*iy:148*(iy+1) ,ImgNum] = PredictedSeg

        AllImage[ix:148+ix , iy:148+iy] = prediction[0,...,1]
        AllImage_logical[ix:148+ix , iy:148+iy] = PredictedSeg

        # unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))

        sz = label.shape

        A = (padSize/2)
        imgCombined = util.combine_img_prediction(data, label, prediction)
        DiceCoefficient[BB_ind] = DiceCoefficientCalculator(PredictedSeg,label[0,A:sz[1]-A,A:sz[2]-A,1])  # 20 is for zero padding done for input
        util.save_image(imgCombined, TestResults_Path+"prediction_slice"+ str(BB_Cord[BB_ind]) + ".jpg")


        Loss = unet.error_rate(prediction,label[:,A:sz[1]-A,A:sz[2]-A,:])
        LogLoss[BB_ind] = np.log10(Loss+eps)

    np.savetxt(TestResults_Path+'DiceCoefficient.txt',DiceCoefficient)
    np.savetxt(TestResults_Path+'LogLoss.txt',LogLoss)


    im = Image.fromarray(np.uint8(AllImage))
    msk = Image.fromarray(np.uint8(AllImage_logical))

    im.save( TestResults_Path + 'PredictionSeg_'+str(TestImageNum)+'.tif')
    msk.save(TestResults_Path + 'PredictionSeg_'+str(TestImageNum)+'_Logical.tif')


    return AllImage , AllImage_logical
Example #30
0
                                "./data/cj_right_all_gt.png")
# data_provider = ImageDataSingle("./data/cj_cut.png", "./data/cj_cut_gt.png")
# data_provider = ImageDataSingle("./data/cj_test1.png", "./data/cj_test1_gt.png")

class_weights = np.ones(data_provider.n_class) * 5
class_weights[0] = 0.5
net = unet.Unet(data_provider.channels,
                data_provider.n_class,
                layers=3,
                features_root=32,
                cost_kwargs={"class_weights": class_weights})

# trainer = unet.Trainer(net, optimizer="momentum", batch_size=10, verification_batch_size=3, opt_kwargs=dict(learning_rate=0.02))
trainer = unet.Trainer(net,
                       optimizer="adam",
                       batch_size=3,
                       verification_batch_size=3,
                       opt_kwargs=dict(learning_rate=0.001))
if not predict_only:
    path = trainer.train(data_provider,
                         output_path,
                         training_iters=10,
                         epochs=500,
                         restore=True)

# path = trainer.train(data_provider, output_path, training_iters=30, epochs=100)


# predict
def stack_imgs(imgs, num_row, num_col):
    '''