def compile_model(self):
        'Compile model'

        if self.architecture == 'xnet':
            print('Training on XNet')
            self.mod = xnet(input_shape=(self.height, self.width, 1),
                            classes=2,
                            kernel_size=3,
                            filter_depth=self.filter_depth)
        if self.architecture == 'unet':
            print('Training on UNet')
            self.mod = unet(input_shape=(self.height, self.width, 1),
                            classes=2,
                            kernel_size=3,
                            filter_depth=self.filter_depth)

        #self.mod.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics = ['accuracy'])
        self.mod.compile(optimizer=Adam(lr=1e-4),
                         loss=self.pixel_wise_loss,
                         metrics=['accuracy'])
Beispiel #2
0
##### Computational Graph - Start #####

X = tf.placeholder(tf.float32,
                   shape=(batch_size, 256, 256, num_channels),
                   name="myInput")
sparse_label = tf.placeholder(tf.int32,
                              shape=(batch_size, 256, 256),
                              name="myOutput")
class_weights = tf.placeholder(tf.float32,
                               shape=(num_classes),
                               name="class_weights")

global_step = tf.Variable(0, "global_step")

net_logits = unet(X, num_classes)

summ1 = tf.summary.scalar(
    'unweighted_cost',
    sparse_unweighted_cost(net_logits, sparse_label, num_classes))
summ2 = tf.summary.scalar(
    'weighted_cost',
    sparse_weighted_cost1(net_logits, sparse_label, class_weights,
                          num_classes))

learning_rate_node = tf.train.exponential_decay(learning_rate=learning_rate,
                                                global_step=global_step,
                                                decay_steps=100,
                                                decay_rate=0.95,
                                                staircase=True)
Beispiel #3
0
config = tf.ConfigProto(log_device_placement=False)
config.gpu_options.allow_growth = True

batch_size = 20
learning_rate = 1e-3
iter = 0

logs_path = '/tensorboard/tf-summary-logs/'
img_type = 'depth'

with tf.Session(config=config) as sess:

    UNET = unet(batch_size,
                img_height,
                img_width,
                learning_rate,
                sess,
                num_classes=max_labels,
                is_training=True,
                img_type=img_type)

    sess.run(tf.global_variables_initializer())

    summary_writer = tf.summary.FileWriter(logs_path,
                                           graph=tf.get_default_graph())

    while True:

        img, label = SUNRGBD_dataset.get_random_shuffle(batch_size)
        batch_labels = label

        label = np.reshape(label, [-1])
Beispiel #4
0
from unet import unet, dice_coef, conv2d_block, dice_coef_loss

plt.style.use("ggplot")

X_mass = np.load('Task1_Aug5_Mass_Full.npy')
y_mass = np.load('Task1_Aug5_Mass_Mask.npy')

# Split the data set into training and validation sets
X_train, X_valid, y_train, y_valid = train_test_split(X_mass, y_mass, test_size=0.2, random_state=42)
# Calculate test size ratio
test_size = (X_valid.shape[0]/X_train.shape[0])

im_height = 512
im_width = 512
input_img = Input((im_height, im_width, 1), name='img')
model = unet(input_img, n_filters=32, dropout=0.5, batchnorm=True)
model.compile(optimizer=Adam(), loss=dice_coef_loss, metrics=[dice_coef])

callbacks = [
    ReduceLROnPlateau(factor=0.1, patience=5, min_lr=0.0001, verbose=1),
    ModelCheckpoint('UNet256.h5', verbose=1, save_best_only=True, save_weights_only=True)
]

results = model.fit(X_train, y_train, batch_size=32, epochs=100, callbacks=callbacks, validation_data=(X_valid, y_valid))

# Model evaluations
# load the best model
model.load_weights('UNet256.h5')
# Evaluate on train set
model.evaluate(X_train, y_train, verbose=1)
# Evaluate on validation set
learning_rate = 1e-3
iter_num = 0

logs_path = '/tensorboard/tf-summary-logs/'

global_step = tf.train.get_or_create_global_step()

graph = tf.Graph()

with graph.as_default():

    UNET = unet(batch_size,
                img_height,
                img_width,
                learning_rate,
                sess=None,
                num_classes=max_labels,
                is_training=True,
                img_type=img_type,
                use_horovod=True,
                global_step=global_step)

    # hooks = [
    #         # Horovod: BroadcastGlobalVariablesHook broadcasts initial variable states
    #         # from rank 0 to all other processes. This is necessary to ensure consistent
    #         # initialization of all workers when training is started with random weights
    #         # or restored from a checkpoint.
    #         hvd.BroadcastGlobalVariablesHook(0),
    #         tf.train.StopAtStepHook(last_step=600000), # // hvd.size())
    #         tf.train.LoggingTensorHook(tensors={'step': global_step},
    #                                    every_n_iter=1000),
    #     ]