with tf.name_scope("loss"): # Get loss loss = residual_decoder.get_loss(predict_val=logits, real_val=y) tf.summary.histogram("loss", loss) # Prepare optimizer update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): optimizer = tf.train.AdamOptimizer().minimize(loss, global_step=global_step, name='optimizer') #optimizer = tf.train.AdamOptimizer(0.0001).minimize(loss) merged = tf.summary.merge_all() image_batch, anno_batch, filename = input_data.read_batch(BATCH_SIZE, type = 'train') image_batch_val, anno_batch_val, filename_val = input_data.read_batch(BATCH_SIZE, type = 'val') with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) sess.run(tf.local_variables_initializer()) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() #if os.path.exists(saved_ckpt_path): ckpt = tf.train.get_checkpoint_state(saved_ckpt_path) if ckpt and ckpt.model_checkpoint_path:
[64, 0, 128], [64, 64, 0], [0, 128, 192], [0, 0, 0]]) def color_gray(image): height, width = image.shape return_img = np.zeros([height, width, 3], np.uint8) for i in range(height): for j in range(width): return_img[i, j, :] = cmap[image[i, j]] return return_img image_batch, anno_batch, filename = input_data.read_batch(BATCH_SIZE, type=prediction_on, shuffle=False) with tf.name_scope("input"): x = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, 3], name='x_input') y = tf.placeholder(tf.int32, [BATCH_SIZE, HEIGHT, WIDTH], name='ground_truth') keep_prob = tf.placeholder(dtype=tf.float32, name='keep_prob') logits = denseASPP.denseASPP(x, keep_prob, train=False) with tf.name_scope('prediction_and_miou'): prediction = tf.argmax(logits, axis=-1, name='predictions')
softmax = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(softmax, axis=-1, name='predictions') train_mIoU = tf.Variable(0, dtype=tf.float32, trainable=False) tf.summary.scalar('train_mIoU', train_mIoU) test_mIoU = tf.Variable(0, dtype=tf.float32, trainable=False) tf.summary.scalar('test_mIoU', test_mIoU) merged = tf.summary.merge_all() image_batch_0, image_batch, anno_batch, filename = input_data.read_batch( FLAGS.batch_size, FLAGS.height, FLAGS.width, FLAGS.crop_height, FLAGS.crop_width, FLAGS.train_random_scales, FLAGS.scales, FLAGS.train_random_mirror, FLAGS.rgb_mean, type='train') _, image_batch_test, anno_batch_test, filename_test = input_data.read_batch( FLAGS.batch_size, FLAGS.height, FLAGS.width, FLAGS.crop_height, FLAGS.crop_width, FLAGS.val_random_scales, FLAGS.scales, FLAGS.val_random_mirror,
tf.summary.scalar('learning_rate', lr) optimizer = tf.train.AdamOptimizer(lr).minimize(loss_all) with tf.name_scope("mIoU"): softmax = tf.nn.softmax(logits, axis=-1) predictions = tf.argmax(logits, axis=-1, name='predictions') train_mIoU = tf.Variable(0, dtype=tf.float32) tf.summary.scalar('train_mIoU', train_mIoU) test_mIoU = tf.Variable(0, dtype=tf.float32) tf.summary.scalar('test_mIoU', test_mIoU) merged = tf.summary.merge_all() _, image_batch, anno_batch, filename = input_data.read_batch(BATCH_SIZE, type='train') _, image_batch_test, anno_batch_test, filename_test = input_data.read_batch( BATCH_SIZE, type='val') with tf.Session() as sess: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) sess.run(tf.local_variables_initializer()) sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() # if os.path.exists(saved_ckpt_path): ckpt = tf.train.get_checkpoint_state(saved_ckpt_path)
cmap = Labels.trainId2Color cmap[19] = (0, 0, 0) # add ignore class color cmap[255] = (0, 0, 0) def color_gray(image): height, width = image.shape return_img = np.zeros([height, width, 3], np.uint8) for i in range(height): for j in range(width): return_img[i, j, :] = cmap[image[i, j]] return return_img image_batch_0, image_batch, anno_batch, filename = input_data.read_batch(BATCH_SIZE, type=prediction_on) with tf.name_scope("input"): x = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, 3], name='x_input') y = tf.placeholder(tf.int32, [BATCH_SIZE, HEIGHT, WIDTH], name='ground_truth') _, logits = PSPNet.PSPNet(x, is_training=False, output_stride=8, pre_trained_model=PRETRAINED_MODEL_PATH) with tf.name_scope('prediction_and_miou'): prediction = tf.argmax(logits, axis=-1, name='predictions')