train_path=train_path,
                                image_folder='liver',
                                mask_folder='mask',
                                target_size=img_size)

    # check if pretrained weights are defined
    if is_file(file_name=model_weights_name):
        pretrained_weights = model_weights_name
    else:
        pretrained_weights = None

    # build model
    unett = UNet(input_size=(img_width, img_height, 1),
                 n_filters=64,
                 pretrained_weights=pretrained_weights)
    unett.build()

    # creating a callback, hence best weights configurations will be saved
    model_checkpoint = unett.checkpoint(model_name)

    # model training
    # steps per epoch should be equal to number of samples in database divided by batch size
    # in this case, it is 560 / 2 = 280
    unett.fit_generator(train_gen,
                        steps_per_epoch=281,
                        epochs=10,
                        callbacks=[model_checkpoint])
    #history=unett.fit(
    #    train_gen,
    #    steps_per_epoch=280,
    #    epochs=5,
Exemplo n.º 2
0
test_path = '/home/new-ece/szc0173/liver/test/'
save_path = '/home/new-ece/szc0173/liver/result2/'
model_weights_name = 'unet_weight_model.hdf5'
# TODO: move to config .json files
img_height = 512
img_width = 512
img_size = (img_height, img_width)
model = UNet(input_size=(img_width, img_height, 1),
             n_filters=64,
             pretrained_weights=model_weights_name)
train_gen = train_generator(batch_size=2,
                            train_path=train_path,
                            image_folder='liver',
                            mask_folder='mask',
                            target_size=img_size)
model.build()
model_name2 = '/home/new-ece/szc0173/liver/unet_model2.hdf5'
model_checkpoint2 = model.checkpoint(model_name2)
model.fit_generator(train_gen,
                    steps_per_epoch=280,
                    epochs=10,
                    callbacks=[model_checkpoint2])
#history1=model.fit(
#    train_gen,
#    steps_per_epoch =280 ,
#    epochs = 10,
#    callbacks = [model_checkpoint2]
#    )

# saving model weights
model.save_model('/home/new-ece/szc0173/liver/unet_weight_model2.hdf5')
Exemplo n.º 3
0

def next_train_batch(batch_s, batch_count, is_first_iter):
    if (batch_count == 0):
        shuffle()
    count = batch_s * batch_count
    return images[count:(count + batch_s)], labels[count:(count + batch_s)]


tf.reset_default_graph()
X = tf.placeholder(tf.float32, [None, IMG_SIZE, IMG_SIZE, IMG_CHANNELS])
Y_ = tf.placeholder(tf.float32, [None, IMG_SIZE, IMG_SIZE, 1])
lr = tf.placeholder(tf.float32)
#
model = UNet()
model.build(X)
cross_entropy = tf.losses.sigmoid_cross_entropy(Y_, model.output)
train_step = tf.train.AdamOptimizer(lr).minimize(cross_entropy)

# init
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

batch_count = 0
display_count = 1
epoch_loss = 0
best_loss = np.inf
saver = tf.train.Saver()
writer = tf.summary.FileWriter('../../logs')
writer.add_graph(tf.get_default_graph())