parser.add_argument('--max_steps', type=int, default=1500) parser.add_argument('--learning_rate', type=float, default=1e-4) FLAGS, unparsed = parser.parse_known_args() return FLAGS, unparsed FLAGS, unparsed = parse_args() slim = tf.contrib.slim tf.reset_default_graph() is_training_placeholder = tf.placeholder(tf.bool) batch_size = FLAGS.batch_size image_tensor_train, orig_img_tensor_train, annotation_tensor_train = inputs( FLAGS.dataset_train, train=True, batch_size=batch_size, num_epochs=1e4) image_tensor_val, orig_img_tensor_val, annotation_tensor_val = inputs( FLAGS.dataset_val, train=False, num_epochs=1e4) image_tensor, orig_img_tensor, annotation_tensor = tf.cond( is_training_placeholder, true_fn=lambda: (image_tensor_train, orig_img_tensor_train, annotation_tensor_train), false_fn=lambda: (image_tensor_val, orig_img_tensor_val, annotation_tensor_val)) feed_dict_to_use = {is_training_placeholder: True} upsample_factor = 8 number_of_classes = 21
parser.add_argument('--train_dir', type=str) parser.add_argument('--dataset_train', type=str) parser.add_argument('--batch_size', type=int, default=16) parser.add_argument('--max_steps', type=int, default=1500) parser.add_argument('--learning_rate', type=float, default=1e-4) FLAGS, unparsed = parser.parse_known_args() return FLAGS, unparsed FLAGS, unparsed = parse_args() number_of_classes = 21 # Define input image_tensor, orig_img_tensor, annotation_tensor = inputs( FLAGS.dataset_train, train=True, batch_size=FLAGS.batch_size, num_epochs=1e4) # Define loss cross_entropy_loss = vgg16_fcn_loss(image_tensor, annotation_tensor, number_of_classes) global_step, train_step = optimizer( cross_entropy_loss, FLAGS.learning_rate, global_step=tf.train.get_or_create_global_step()) log_folder = FLAGS.train_dir if not os.path.exists(log_folder): os.makedirs(log_folder)
def parse_args(check=True): parser = argparse.ArgumentParser() parser.add_argument('--train_dir', type=str) parser.add_argument('--eval_dir', type=str) parser.add_argument('--dataset_val', type=str) parser.add_argument('--num_pics', type=int, default=10) FLAGS, unparsed = parser.parse_known_args() return FLAGS, unparsed FLAGS, unparsed = parse_args() number_of_classes=21 # Define network image_tensor, orig_img_tensor, annotation_tensor = inputs(FLAGS.dataset_val, train=False, num_epochs=1e4) pred,probabilities = vgg16_fcn_pred(image_tensor,number_of_classes) eval_dir = FLAGS.eval_dir if not os.path.exists(eval_dir): os.makedirs(eval_dir) sess_config = tf.ConfigProto() sess_config.gpu_options.allow_growth = True init_op = tf.global_variables_initializer() init_local_op = tf.local_variables_initializer() with tf.Session(config=sess_config) as sess: # Run the initializers.
parser.add_argument('--learning_rate', type=float, default=1e-4) FLAGS, unparsed = parser.parse_known_args() return FLAGS, unparsed FLAGS, unparsed = parse_args() slim = tf.contrib.slim tf.reset_default_graph() is_training_placeholder = tf.placeholder(tf.bool) batch_size = FLAGS.batch_size image_tensor_train, orig_img_tensor_train, annotation_tensor_train = inputs(FLAGS.dataset_train, train=True, batch_size=batch_size, num_epochs=1e4) image_tensor_val, orig_img_tensor_val, annotation_tensor_val = inputs(FLAGS.dataset_val, train=False, num_epochs=1e4) image_tensor, orig_img_tensor, annotation_tensor = tf.cond(is_training_placeholder, true_fn=lambda: (image_tensor_train, orig_img_tensor_train, annotation_tensor_train), false_fn=lambda: (image_tensor_val, orig_img_tensor_val, annotation_tensor_val)) feed_dict_to_use = {is_training_placeholder: True} upsample_factor = 16 number_of_classes = 21 log_folder = os.path.join(FLAGS.output_dir, 'train') vgg_checkpoint_path = FLAGS.checkpoint_path