示例#1
0
    def build_graph (self):
        is_training = tf.placeholder(tf.bool, name="is_training")
        images = tf.placeholder(tf.float32, shape=(None, None, None, FLAGS.channels), name="images")
        labels = tf.placeholder(tf.int32, shape=(None, None, None, 1))

        self.is_training = is_training
        self.images = images
        self.labels = labels

        logits = self.inference(images, FLAGS.classes, is_training)
        self.logits = logits

        labels1 = tf.reshape(labels, (-1,))

        if FLAGS.classes == 1:
            logits1 = tf.reshape(logits, (-1,))
            probs = tf.sigmoid(logits, name='probs')
            prob = tf.squeeze(probs, 3, name='prob')
            self.probs = probs
            if FLAGS.dice:
                loss = tf.identity(dice_loss(tf.cast(labels1, tf.float32), prob), name='di')
            elif FLAGS.lovasz:
                loss = lovasz_losses_tf.lovasz_hinge(logits=logits, labels=labels1)
                loss = tf.identity(loss, name='blov')
            else:
                loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits1, labels=tf.cast(labels1, tf.float32))
                loss = tf.reduce_mean(loss, name='bxe')
                pass
        else:   # multiple channels
            logits1 = tf.reshape(logits, (-1, FLAGS.classes))
            probs = tf.nn.softmax(logits, name='probs')
            self.probs = probs
            prob = tf.identity(probs[:, :, :, 1], name='prob')

            if FLAGS.dice:
                assert False, 'Not supported'
            elif FLAGS.lovasz:
                loss = lovasz_losses_tf.lovasz_softmax(probs, labels1, per_image=True)
                loss = tf.identity(loss, name='lov')
            else:
                # accuracy
                acc = tf.cast(tf.nn.in_top_k(logits1, labels1, 1), tf.float32)
                acc = tf.reduce_mean(acc, name='acc')
                self.metrics.append(acc)
                # cross-entropy
                loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits1, labels=labels1)
                loss = tf.reduce_mean(loss, name='xe')
                pass
        tf.losses.add_loss(loss)
        self.metrics.append(loss)
        pass
示例#2
0
def lovasz_loss(y_true, y_pred):
    y_true, y_pred = K.cast(K.squeeze(y_true, -1),
                            'int32'), K.cast(K.squeeze(y_pred, -1), 'float32')
    logits = y_pred  # Jiaxin
    loss = lovasz_hinge(logits, y_true, per_image=True, ignore=None)
    return loss
def lavazs_loss(labels, scores): # Keras and TF has reversed order of args
    return L_loss.lovasz_hinge(2*scores-1, labels, ignore=255, per_image=True)
示例#4
0
def main():
    sub = 'austin_256/unet_1'
    log_path = '/home/raylee/aerialimage_label/log/' + sub
    model_path = '/media/raylee/BIGRAYLEE/dataset_aeriel/model/' + sub  #38000 for austin dataset

    patch_size = 512
    batch_size = 8
    class_num = 2

    max_iteration = 900000  #8 epoches
    momentum = 0.99
    learning_rate = 1e-4
    os.environ["CUDA_VISIBLE_DEVICES"] = '0'

    #gpu training set
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 0.9
    config.allow_soft_placement = True
    config.log_device_placement = True
    config.gpu_options.allocator_type = 'BFC'

    #image, label = load_data(path_img,path_gt)

    #print(image.shape)
    #print(label.shape)

    #x = tf.placeholder(tf.float32,image.shape)#[batch_size, img_width, img_height, 3])
    #y = tf.placeholder(tf.float32,label.shape) #[batch_size, img_width, img_height, class_num])

    x_batch = tf.placeholder(tf.float32,
                             [batch_size, img_width, img_height, 3])
    y_batch = tf.placeholder(tf.float32,
                             [batch_size, img_width, img_height, 2])
    '''dataset = tf.data.Dataset.from_tensor_slices((x,y))
	dataset = dataset.shuffle(1).batch(batch_size).repeat()
 
	iterator = dataset.make_initializable_iterator()
	data_element = iterator.get_next()'''

    y_ = model.U_Net(x_batch, class_num)
    #Lovasz-softmox
    loss = L.lovasz_hinge(y_, y_batch, ignore=None, per_image=True)
    #cross entropy loss
    #loss = tf.reduce_mean((tf.nn.softmax_cross_entropy_with_logits_v2(labels = y_batch,logits = y_)))
    #test the train res
    #y_1 = tf.nn.softmax(y_)
    #y_1 = tf.argmax(y_1,axis=-1)
    correct_prediction = tf.equal(tf.argmax(y_, axis=-1),
                                  tf.argmax(y_batch, axis=-1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    #tf.summary.scalar('entropy_loss',loss)
    tf.summary.scalar('lovasz softmax', loss)
    tf.summary.scalar('accuracy', accuracy)
    train_step = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(loss)

    sess = tf.Session()
    saver = tf.train.Saver()
    #sess.run(iterator.initializer, feed_dict={x: image,y:label})
    sess.run(tf.global_variables_initializer())
    print('variables initialized')
    #threads = tf.train.start_queue_runners(sess=sess)
    merged = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(log_path, sess.graph)
    print('Start Traning...')

    #Breakpoint training
    start_iter = 1
    ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
    if ckpt and ckpt.model_checkpoint_path:
        saver.restore(sess, ckpt.model_checkpoint_path)
        pos = ckpt.model_checkpoint_path
        pos = pos.split('.')[0].split('r')[-1]
        start_iter = int(pos)

    #for epoch in range(0,4):
    #epoch 4
    batch = batch_gen.generator(path_img + city, path_gt + city)
    for itr in range(start_iter + 1, max_iteration + 1):
        #print('training')
        #small data loading
        #step_batch, step_label = sess.run(data_element)
        step_batch, step_label = next(batch)
        sess.run(train_step,
                 feed_dict={
                     x_batch: step_batch,
                     y_batch: step_label
                 })
        #res = y_1.eval(session=sess,feed_dict={x_batch:step_batch,y_batch:step_label})
        #save_pre_img(result_path,itr,res[0])
        if itr % 10 == 0:
            summary, train_loss, train_accuracy = sess.run(
                [merged, loss, accuracy],
                feed_dict={
                    x_batch: step_batch,
                    y_batch: step_label
                })
            print('iteration %d, loss:%f, acc:%f' %
                  (itr, train_loss, train_accuracy))
            summary_writer.add_summary(summary, itr)
        if itr % 9000 == 0:
            save_path = os.path.join(model_path, 'UNet_itr%d.ckpt' % (itr))
            saver.save(sess, save_path)
            print('model parameter has been saved in %s.' % model_path)
示例#5
0
weights, biases = Initialize_Weights()

prediction = nn_model(X,
                      weights,
                      biases,
                      batch_size,
                      training_flag=training_flag,
                      decay=decay,
                      keep_prob=keep_prob)
seg_pred = tf.nn.sigmoid(prediction)
labels = Y

learning_rate = tf.Variable(0.005, name='learning_rate', trainable=False)

cost = lovasz_hinge(prediction, labels, per_image=True)

optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

epoch_loss_list = []
test_loss_list = []

with tf.Session() as sess:

    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver(max_to_keep=1)

    tf.add_to_collection('cost', cost)

    early_stopper = Early_Stopping(sess, saver, 30)
示例#6
0
def lov(y_true , y_pred ):
    loss = L.lovasz_hinge(y_pred, y_true)
    y_pred = keras.activations.sigmoid(y_pred)
    return  loss