t_filenames = tf.constant(t_train_name) dataset = tf.data.Dataset.from_tensor_slices((x_filenames, t_filenames)) dataset = dataset.map(lambda x, y: _parse_function(x, y, (img_height, img_width), down_scale)) dataset = dataset.batch(batch_size).repeat(1) iterator = dataset.make_initializable_iterator() next_batch = iterator.get_next() x_batch, t_batch = next_batch # get the tf variable of input and target images unet = UNet(x=x_batch, t=t_batch, LR=1e-8, input_shape=[None, img_height, img_width, 3], output_shape=[None, img_height, img_width, class_num], ) unet.optimize(entropy_loss) sess = tf.Session() sess.run(tf.global_variables_initializer()) sess.run(iterator.initializer) saver = tf.train.Saver(max_to_keep=epoch) saver.restore(sess, './Models/U-Net/unet-'+epoch+'/unet.ckpt') for ep in range(1): total_loss = 0 counter = 0 start = time.time() for _ in range(int(math.ceil(data_size/batch_size))): _, loss = sess.run([unet.training, unet.loss])
img_height, img_width), down_scale, class_num)) dataset = dataset.shuffle(buffer_size=32).batch(batch_size).repeat(epoch + 1) iterator = dataset.make_initializable_iterator() next_batch = iterator.get_next() x_batch, t_batch = next_batch # get the tf variable of input and target images if model_name.lower() == 'unet' or model_name.lower == 'u-net': segnet = UNet( x=x_batch, t=t_batch, LR=LR, input_shape=[None, img_height, img_width, 3], output_shape=[None, img_height, img_width, class_num], ) segnet.optimize(loss_function) elif model_name.lower() == 'fcn': segnet = FCN( x=x_batch, t=t_batch, LR=LR, input_shape=[None, img_height, img_width, 3], output_shape=[None, img_height, img_width, class_num], ) segnet.optimize(loss_function) elif model_name.lower() == 'resnet50' or model_name.lower() == 'resnet': segnet = FCN_ResNet50( x=x_batch, t=t_batch, LR=LR, input_shape=[None, img_height, img_width, 3],