# Learning rate initial_learning_rate = 0.001 decay_steps = 250 decay_rate = 0.9 # Validation output_steps = 10 # Number of steps to print output eval_steps = 20 # Number of steps to perform evaluations # Training max_steps = 3000 # Number of steps to perform training save_steps = 200 # Number of steps to perform saving checkpoints num_tests = 5 # Number of times to test for test accuracy max_checkpoints_to_keep = 3 save_dir = "data/checkpoints" train_vars = 'models/fc8-pets/weights:0,models/fc8-pets/biases:0' # Export export_dir = "/tmp/export/" export_name = "pet-model" export_version = 2 images, labels = datasets.input_pipeline(dataset_dir, batch_size, is_training=True) test_images, test_labels = datasets.input_pipeline(dataset_dir, batch_size, is_training=False) with tf.variable_scope("models") as scope: logits = nets.inference(images, is_training=True) scope.reuse_variables() test_logits = nets.inference(test_images, is_training=False) total_loss = models.compute_loss(logits, labels) train_accuracy = models.compute_accuracy(logits, labels) test_accuracy = models.compute_accuracy(test_logits, test_labels) global_step = tf.Variable(0, trainable=False) learning_rate = models.get_learning_rate(global_step, initial_learning_rate, decay_steps, decay_rate) train_op = models.train(total_loss, learning_rate, global_step, train_vars) saver = tf.train.Saver(max_to_keep=max_checkpoints_to_keep)
train_data_reader = lines_from_file(train_txt, repeat=True) image_paths_placeholder = tf.placeholder(tf.string, shape=(None, num_frames), name='image_paths') labels_placeholder = tf.placeholder(tf.int64, shape=(None, ), name='labels') train_input_queue = data_flow_ops.FIFOQueue(capacity=10000, dtypes=[tf.string, tf.int64], shapes=[(num_frames, ), ()]) train_enqueue_op = train_input_queue.enqueue_many( [image_paths_placeholder, labels_placeholder]) frames_batch, labels_batch = input_pipeline(train_input_queue, batch_size=batch_size, image_size=image_size) with tf.variable_scope("models") as scope: logits, _ = nets.inference(frames_batch, is_training=True) total_loss, cross_entropy_loss, reg_loss = models.compute_loss( logits, labels_batch) train_accuracy = models.compute_accuracy(logits, labels_batch) global_step = tf.Variable(0, trainable=False) learning_rate = models.get_learning_rate(global_step, initial_learning_rate, decay_steps, decay_rate) train_op = models.train(total_loss, learning_rate, global_step) tf.summary.scalar("learning_rate", learning_rate)