def main(unused_argv): if FLAGS.log_dir is None or FLAGS.log_dir == "": raise ValueError("Must specify an explicit `log_dir`") if FLAGS.data_dir is None or FLAGS.data_dir == "": raise ValueError("Must specify an explicit `data_dir`") device, target = device_and_target() with tf.device(device): images = tf.placeholder(tf.float32, [None, 784], name='image_input') labels = tf.placeholder(tf.float32, [None], name='label_input') data = read_data_sets(FLAGS.data_dir, one_hot=False, fake_data=False) logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels) loss = tf.Print(loss, [loss], message="Loss = ") train_op = mnist.training(loss, FLAGS.learning_rate) with tf.train.MonitoredTrainingSession( master=target, is_chief=(FLAGS.task_index == 0), checkpoint_dir=FLAGS.log_dir) as sess: while not sess.should_stop(): xs, ys = data.train.next_batch(FLAGS.batch_size, fake_data=False) sess.run(train_op, feed_dict={images:xs, labels:ys})
def restore(_): # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) with tf.Graph().as_default(): images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Restore model saver = tf.train.Saver() print(eval_correct.name); with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir) saver.restore(sess,ckpt.model_checkpoint_path) steps = data_sets.test.num_examples // FLAGS.batch_size accuracy = 0 for i in range(steps): batchx,batchy = data_sets.test.next_batch(FLAGS.batch_size) accuracy += sess.run(eval_correct,feed_dict={images_placeholder:batchx, labels_placeholder:batchy}) print("accuracy: {}".format(accuracy/float(steps*FLAGS.batch_size)))
def train(self, **kwargs): tf.logging.set_verbosity(tf.logging.ERROR) self.data_sets = input_data.read_data_sets(INPUT_DATA_DIR) self.images_placeholder = tf.placeholder(tf.float32, shape=(BATCH_SIZE, mnist.IMAGE_PIXELS)) self.labels_placeholder = tf.placeholder(tf.int32, shape=(BATCH_SIZE)) logits = mnist.inference(self.images_placeholder, HIDDEN_1, HIDDEN_2) self.loss = mnist.loss(logits, self.labels_placeholder) self.train_op = mnist.training(self.loss, LEARNING_RATE) self.summary = tf.summary.merge_all() init = tf.global_variables_initializer() self.sess = tf.Session() self.summary_writer = tf.summary.FileWriter(LOG_DIR, self.sess.graph) self.sess.run(init) data_set = self.data_sets.train for step in xrange(MAX_STEPS): images_feed, labels_feed = data_set.next_batch(BATCH_SIZE, False) feed_dict = { self.images_placeholder: images_feed, self.labels_placeholder: labels_feed, } _, loss_value = self.sess.run([self.train_op, self.loss], feed_dict=feed_dict) if step % 100 == 0: print("At step {}, loss = {}".format(step, loss_value)) summary_str = self.sess.run(self.summary, feed_dict=feed_dict) self.summary_writer.add_summary(summary_str, step) self.summary_writer.flush()
def main(_): data_sets = input_data.read_data_sets(data_dir) images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, mnist.IMAGE_PIXELS)) labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size)) logits = mnist.inference(images_placeholder, hidden1, hidden2) loss = mnist.loss(logits, labels_placeholder) train_op = mnist.training(loss, learning_rate) eval_correct = mnist.evaluation(logits, labels_placeholder) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) for step in range(max_steps): start_time = time.time() feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) if (step + 1) % 1000 == 0 or (step + 1) == max_steps: print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def main(unused_argv): if FLAGS.data_dir is None or FLAGS.data_dir == "": raise ValueError("Must specify an explicit `data_dir`") if FLAGS.train_dir is None or FLAGS.train_dir == "": raise ValueError("Must specify an explicit `train_dir`") device, target = device_and_target() with tf.device(device): images, labels = inputs(FLAGS.batch_size) logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels) train_op = mnist.training(loss, FLAGS.learning_rate) # scott hooks = [tf.train.StopAtStepHook(last_step=100000)] mystep = 0 with tf.train.MonitoredTrainingSession(master=target, is_chief=(FLAGS.task_index == 0), checkpoint_dir=FLAGS.train_dir, hooks=hooks) as sess: while not sess.should_stop(): mystep += 1 sess.run(train_op) with open(os.path.join(FLAGS.train_dir, "mystep.txt"), 'a') as fd: fd.write(str(mystep) + "\n")
def run_training(): """Train MNIST for a number of steps.""" # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Input images and labels. images, labels = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the loss calculation. loss = mnist.loss(logits, labels) # Add to the Graph operations that train the model. train_op = mnist.training(loss, FLAGS.learning_rate) # The op for initializing the variables. init_op = tf.initialize_all_variables() # Create a session for running operations in the Graph. sess = tf.Session() # Initialize the variables (the trained variables and the # epoch counter). sess.run(init_op) # Start input enqueue threads. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: step = 0 while not coord.should_stop(): start_time = time.time() # Run one step of the model. The return values are # the activations from the `train_op` (which is # discarded) and the `loss` op. To inspect the values # of your ops or variables, you may include them in # the list passed to sess.run() and the value tensors # will be returned in the tuple from the call. _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time # Print an overview fairly often. if step % 100 == 0: print("Step %d: loss = %.2f (%.3f sec)" % (step, loss_value, duration)) step += 1 except tf.errors.OutOfRangeError: print("Done training for %d epochs, %d steps." % (FLAGS.num_epochs, step)) finally: # When done, ask the threads to stop. coord.request_stop() # Wait for threads to finish. coord.join(threads) sess.close()
def run_training(): """Train MNIST for a number of steps.""" # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Input images and labels. image_batch, label_batch = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(image_batch, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the loss calculation. loss = mnist.loss(logits, label_batch) # Add to the Graph operations that train the model. train_op = mnist.training(loss, FLAGS.learning_rate) # The op for initializing the variables. init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # Create a session for running operations in the Graph. with tf.Session() as sess: # Initialize the variables (the trained variables and the # epoch counter). sess.run(init_op) writer = tf.summary.FileWriter(".", sess.graph) try: step = 0 while True: # Train until OutOfRangeError start_time = time.time() # Run one step of the model. The return values are # the activations from the `train_op` (which is # discarded) and the `loss` op. To inspect the values # of your ops or variables, you may include them in # the list passed to sess.run() and the value tensors # will be returned in the tuple from the call. _, loss_value = sess.run([train_op, loss]) #label = sess.run([label_batch]) duration = time.time() - start_time # Print an overview fairly often. if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) #print(len(label[0])) step += 1 except tf.errors.OutOfRangeError: print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
def run_training(): data_set = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) # 默认在Graph下运行 with tf.Graph().as_default(): images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels_placeholder) train_op = mnist.training(loss, FLAGS.learning_rate) eval_correct = mnist.evaluation(logits, labels_placeholder) # 汇总tensor summary = tf.summary.merge_all() # 建立初始化机制 init = tf.global_variables_initializer() # 建立保存机制 saver = tf.train.Saver() #建立session sess = tf.Session() # 建立一个SummaryWriter输出汇聚的tensor summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) sess.run(init) # 开始训练 for step in xrange(FLAGS.max_steps): start_time = time.time() # 获得当前循环次数 feed_dict = fill_feed_dict(data_set.train, images_placeholder, labels_placeholder) '''sess.run() 会返回一个有两个元素的元组。其中每一个 Tensor 对象, 对应了返回的元组 中的numpy数组,而这些数组中包含了当前这步训练中对应Tensor的值。 由于 train_op 并不会产生输出,其在返 回的元祖中的对应元素就是 None , 所以会被抛弃。但是,如果模型在训练中出现偏差, loss Tensor的值可能 会变成NaN, 所以我们要获取它的值,并记录下来''' _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # 每1000次测试模型 if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) print('Traning data eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set.train) print('Validation data eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set.validation) print('test data eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_set.test)
def analysis(im): # 读取保存的模型 images_placeholder = tf.placeholder(tf.float32, shape=(1, mnist.IMAGE_PIXELS)) logits = mnist.inference(images_placeholder, 128, 32) init_op = tf.global_variables_initializer() saver = tf.train.Saver() with tf.Session() as sess: sess.run(init_op) saver.restore(sess, os.path.abspath('.') + '/model.ckpt-49999') prediction = tf.argmax(logits, 1) return prediction.eval(feed_dict={images_placeholder: [im]}, session=sess)
def run_training(): """Train MNIST for a number of steps.""" # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Input images and labels. image_batch, label_batch = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(image_batch, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the loss calculation. loss = mnist.loss(logits, label_batch) # Add to the Graph operations that train the model. train_op = mnist.training(loss, FLAGS.learning_rate) # The op for initializing the variables. init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) # Create a session for running operations in the Graph. with tf.Session() as sess: # Initialize the variables (the trained variables and the # epoch counter). sess.run(init_op) try: step = 0 while True: #train until OutOfRangeError start_time = time.time() # Run one step of the model. The return values are # the activations from the `train_op` (which is # discarded) and the `loss` op. To inspect the values # of your ops or variables, you may include them in # the list passed to sess.run() and the value tensors # will be returned in the tuple from the call. _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time # Print an overview fairly often. if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) step += 1 except tf.errors.OutOfRangeError: print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
def run_training(): """Train MNIST for a number of steps.""" # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Input images and labels. images, labels = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the loss calculation. loss = mnist.loss(logits, labels) # Add to the Graph operations that train the model. train_op = mnist.training(loss, FLAGS.learning_rate) # The op for initializing the variables. init_op = tf.initialize_all_variables() # Create a session for running operations in the Graph. sess = tf.Session() # Initialize the variables (the trained variables and the # epoch counter). sess.run(init_op) # Start input enqueue threads. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: step = 0 while not coord.should_stop(): start_time = time.time() # Run one step of the model. The return values are # the activations from the `train_op` (which is # discarded) and the `loss` op. To inspect the values # of your ops or variables, you may include them in # the list passed to sess.run() and the value tensors # will be returned in the tuple from the call. _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time # Print an overview fairly often. if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) step += 1 except tf.errors.OutOfRangeError: print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step)) finally: # When done, ask the threads to stop. coord.request_stop() # Wait for threads to finish. coord.join(threads) sess.close()
def run_training(): data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) with tf.Graph().as_default(): images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels_placeholder) train_op = mnist.training(loss, FLAGS.learning_rate) eval_correct = mnist.evaluation(logits, labels_placeholder) summary_op = tf.merge_all_summaries() init = tf.initialize_all_variables() saver = tf.train.Saver() sess = tf.Session() summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) sess.run(init) for step in range(FLAGS.max_steps): start_time = time.time() feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint') saver.save(sess, checkpoint_file, global_step=step) print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def run_training(): with tf.Graph().as_default(): #input images and labes images, labels = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) #construct log net logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) #defin loss function loss = mnist.loss(logits, labels) #Add to the Graph operations that train the model train_op = mnist.training(loss, FLAGS.learning_rate) #initilize parameters init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess = tf.Session() sess.run(init_op) #Start input enqueue threads. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) try: step = 0 while not coord.should_stop(): #in start_time = time.time() _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time #each 100 times ouput one result if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) step += 1 except tf.errors.OutOfRangeError: print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step)) finally: coord.request_stop() #info other threads to close coord.join(threads) sess.close()
def run_training(): # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Create a saver for writing training checkpoints. saver = tf.train.Saver(tf.all_variables()) # Create a session for running Ops on the Graph. sess = tf.Session() # Run the Op to initialize the variables. init = tf.initialize_all_variables() sess.run(init) ckpt = tf.train.get_checkpoint_state(FLAGS.train_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print('...no checkpoint found...') # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def main(unused_argv): if FLAGS.data_dir is None or FLAGS.data_dir == "": raise ValueError("Must specify an explicit `data_dir`") if FLAGS.train_dir is None or FLAGS.train_dir == "": raise ValueError("Must specify an explicit `train_dir`") device, target = device_and_target() with tf.device(device): images, labels = inputs(FLAGS.batch_size) logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels) train_op = mnist.training(loss, FLAGS.learning_rate) with tf.train.MonitoredTrainingSession( master=target, is_chief=(FLAGS.task_index == 0), checkpoint_dir=FLAGS.train_dir) as sess: while not sess.should_stop(): sess.run(train_op)
def build(self, hp): self.data_sets = input_data.read_data_sets(INPUT_DATA_DIR) self.images_placeholder = tf.placeholder( tf.float32, shape=(hp['batch_size'], mnist.IMAGE_PIXELS)) self.labels_placeholder = tf.placeholder( tf.int32, shape=(hp['batch_size'])) logits = mnist.inference(self.images_placeholder, hp['hidden1'], hp['hidden2']) self.loss = mnist.loss(logits, self.labels_placeholder) self.train_op = mnist.training(self.loss, hp['learning_rate']) self.summary = tf.summary.merge_all() init = tf.global_variables_initializer() saver = tf.train.Saver() self.sess = tf.Session() self.summary_writer = tf.summary.FileWriter(LOG_DIR, self.sess.graph) self.sess.run(init)
def run_training(): """Train MNIST for a number of steps.""" # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Input images and labels. images, labels = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the loss calculation. loss = mnist.loss(logits, labels) # Add to the Graph operations that train the model. train_op = mnist.training(loss, FLAGS.learning_rate) # The op for initializing the variables. init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) # Create a session for running operations in the Graph. sess = tf.Session() # Initialize the variables (the trained variables and the # epoch counter). sess.run(init_op) # Start input enqueue threads. print("Queue runners: %s" %([qr.name for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)])) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # waiting for queue to get loaded time.sleep(15) run_metadata = tf.RunMetadata() try: step = 0 while not coord.should_stop(): start_time = time.time() # Run one step of the model. The return values are # the activations from the `train_op` (which is # discarded) and the `loss` op. To inspect the values # of your ops or variables, you may include them in # the list passed to sess.run() and the value tensors # will be returned in the tuple from the call. if step == 500: _, loss_value = sess.run([train_op, loss], options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_metadata) with open("run_metadata.pbtxt", "w") as out: out.write(str(run_metadata)) from tensorflow.python.client import timeline trace = timeline.Timeline(step_stats=run_metadata.step_stats) trace_file = open('timeline.reader-1thread.json', 'w') trace_file.write(trace.generate_chrome_trace_format()) else: _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time # Print an overview fairly often. if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) step += 1 except tf.errors.OutOfRangeError: print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step)) finally: # When done, ask the threads to stop. coord.request_stop() # Wait for threads to finish. coord.join(threads) sess.close()
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) ps_hosts = FLAGS.ps_hosts.split(',') worker_hosts = FLAGS.worker_hosts.split(',') task_index = FLAGS.task_index master = "grpc://" + worker_hosts[task_index] logs_path = os.path.join(FLAGS.log_dir, str(task_index)) # start a server for a specific task cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts}) # Between-graph replication with tf.device( tf.train.replica_device_setter( worker_device="/job:worker/task:%d" % task_index, cluster=cluster)): # count the number of updates global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = async_training(loss, FLAGS.learning_rate, global_step) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary Tensor based on the TF collection of Summaries. summary_op = tf.summary.merge_all() # Add the variable initializer Op. init_op = tf.global_variables_initializer() sv = tf.train.Supervisor(is_chief=(task_index == 0), global_step=global_step, init_op=init_op) with sv.prepare_or_wait_for_session(master) as sess: # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(logs_path, sess.graph) # And then after everything is built: # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value, summary = sess.run([train_op, loss, summary_op], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_writer.add_summary(summary, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def run_training(): """Train MNIST for a number of epochs.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): with tf.name_scope('input'): # Input data images_initializer = tf.placeholder( dtype=data_sets.train.images.dtype, shape=data_sets.train.images.shape) labels_initializer = tf.placeholder( dtype=data_sets.train.labels.dtype, shape=data_sets.train.labels.shape) input_images = tf.Variable(images_initializer, trainable=False, collections=[]) input_labels = tf.Variable(labels_initializer, trainable=False, collections=[]) image, label = tf.train.slice_input_producer( [input_images, input_labels], num_epochs=FLAGS.num_epochs) label = tf.cast(label, tf.int32) images, labels = tf.train.batch([image, label], batch_size=FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create the op for initializing variables. init_op = tf.initialize_all_variables() # Create a session for running Ops on the Graph. sess = tf.Session() # Run the Op to initialize the variables. sess.run(init_op) sess.run(input_images.initializer, feed_dict={images_initializer: data_sets.train.images}) sess.run(input_labels.initializer, feed_dict={labels_initializer: data_sets.train.labels}) # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, graph_def=sess.graph_def) # Start input enqueue threads. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # And then after everything is built, start the training loop. try: step = 0 while not coord.should_stop(): start_time = time.time() # Run one step of the model. _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) step += 1 # Save a checkpoint periodically. if (step + 1) % 1000 == 0: print('Saving') saver.save(sess, FLAGS.train_dir, global_step=step) step += 1 except tf.errors.OutOfRangeError: print('Saving') saver.save(sess, FLAGS.train_dir, global_step=step) print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step)) finally: # When done, ask the threads to stop. coord.request_stop() # Wait for threads to finish. coord.join(threads) sess.close()
def run_training(): data_sets = input_data.read_data_sets(FLAGS.input_data_dir) max_steps = math.ceil(CONFIG.epoch * data_sets.train.num_examples / CONFIG.batch_size) with tf.Graph().as_default(): images_placeholder, labels_placeholder = placeholder_inputs( CONFIG.batch_size) logits = mnist.inference(images_placeholder, CONFIG.size_hidden_1, CONFIG.size_hidden_2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, CONFIG.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() if FLAGS.c: saver.restore(sess, os.path.join(FLAGS.log_dir, 'model.ckpt')) # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) progbar = Progbar(target=CONFIG.eval_every_n_steps) for step in xrange(max_steps): start_time = time.time() feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) progbar.update((step % CONFIG.eval_every_n_steps) + 1, [("Loss", loss_value)], force=True) duration = time.time() - start_time # Save a checkpoint and evaluate the model periodically. if (step + 1) % CONFIG.eval_every_n_steps == 0 or (step + 1) == max_steps: print("Total : ", int( (step + 1) / CONFIG.eval_every_n_steps), "/", int(math.ceil(max_steps / CONFIG.eval_every_n_steps))) summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test) progbar = Progbar(target=CONFIG.eval_every_n_steps)
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(tempfile.mkdtemp(), FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels and mark as input. placeholders = placeholder_inputs() keys_placeholder, images_placeholder, labels_placeholder = placeholders inputs = {'key': keys_placeholder.name, 'image': images_placeholder.name} tf.add_to_collection('inputs', json.dumps(inputs)) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # To be able to extract the id, we need to add the identity function. keys = tf.identity(keys_placeholder) # The prediction will be the index in logits with the highest score. # We also use a softmax operation to produce a probability distribution # over all possible digits. prediction = tf.argmax(logits, 1) scores = tf.nn.softmax(logits) # Mark the outputs. outputs = {'key': keys.name, 'prediction': prediction.name, 'scores': scores.name} tf.add_to_collection('outputs', json.dumps(outputs)) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Add the variable initializer Op. init = tf.initialize_all_variables() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test) # Export the model so that it can be loaded and used later for predictions. file_io.create_dir(FLAGS.model_dir) saver.save(sess, os.path.join(FLAGS.model_dir, 'export'))
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. # BOT: making the lr a variable so we can update it using our bot learning_rate = tf.Variable(FLAGS.learning_rate, trainable=False) train_op = mnist.training(loss, learning_rate) bot.lr = FLAGS.learning_rate # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) # Print step number: print("step: {}".format(step)) # Evaluate against the training set. print('Training Data Eval:') message_trn = 'Training Data Eval: \n' + do_eval( sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') message_val = 'Validation Data Eval:\n' + do_eval( sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate validation loss val_loss_value = sess.run(loss, feed_dict=fill_feed_dict( data_sets.validation, images_placeholder, labels_placeholder)) # Evaluate against the test set. print('Test Data Eval:') message_tst = 'Test Data Eval:\n' + do_eval( sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test) ## BOT: handling of all bot commands ## # Prepare bot update message message = "\n".join([ "step: {}".format(step + 1), message_trn, message_val, message_tst ]) bot.set_status(message) # Send update message if bot.verbose: bot.send_message(message) # Stop training command from bot if bot.stop_train_flag: bot.send_message('Training stopped!') print( 'Training Stopped! Stop command sent via Telegram bot.' ) break # Update bot's loss history (for /plot command) bot.loss_hist.append(loss_value) bot.val_loss_hist.append(val_loss_value) # Modify learning rate via bot if bot.modify_lr != 1: curr_lr = sess.run(learning_rate) new_lr = curr_lr * bot.modify_lr learning_rate = tf.assign(learning_rate, new_lr) message = '\nStep %05d: setting learning rate to %f.' % ( step + 1, new_lr) print(message) bot.send_message(message) bot.modify_lr = 1 bot.lr = new_lr
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. If input_path is specified, download the data from GCS to # the folder expected by read_data_sets. data_dir = tempfile.mkdtemp() if FLAGS.input_path: files = [os.path.join(FLAGS.input_path, file_name) for file_name in INPUT_FILES] subprocess.check_call(['gsutil', '-m', '-q', 'cp', '-r'] + files + [data_dir]) data_sets = input_data.read_data_sets(data_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary operation based on the TF collection of Summaries. # Remove this if once Tensorflow 0.12 is standard. try: summary_op = tf.contrib.deprecated.merge_all_summaries() except AttributeError: summary_op = tf.merge_all_summaries() # Add the variable initializer Op. # Remove this if once Tensorflow 0.12 is standard. try: init = tf.global_variables_initializer() except AttributeError: init = tf.initialize_all_variables() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. # Remove this if once Tensorflow 0.12 is standard. try: summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) except AttributeError: summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. # fake_dataは単体テストのために使われるフラグ。今は無視してOK。 data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. # tf.Graph()のグローバルなデフォルトのインスタンスに対して、行っている操作であることを # Pythonのwith構文で記述。 # 大抵の場合はtf.Graphのインスタンスは単一でOKなので、as_default()を使えばOK with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # mnist.pyに記述されている関数を計算グラフを構築する。 # Build a Graph that computes predictions from the inference model. # 1つ目 inference() # 学習したいネットワーク? logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. # 2つ目 loss() # loss関数のOps(operation?)をグラフに追加 loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. # 3つ目 training() # loss関数を最小化するための最適化計算を追加 # 入力されたloss関数を、どういう手法で最適化するのかを記述している。 train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. # 推論結果の評価方法を追加 # logitsがどういう出力をしていたら良いのかをevaluation()で記述している(?) eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. # 初期化処理を生成しておく init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. # 計算グラフの構築など、必要な操作をすべて生成完了したらtf.Session()を生成する # Session()の引数が空であることは、デフォルトのローカル・セッションにアタッチ(使う)ということ。 sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. # Session.runを呼ぶことで、変数が初期化される sess.run(init) # Start the training loop. # 各種インスタンス化やOperationの作成・構築が終わったら学習のループを開始 for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. # run()に入力する引数が2つなので、出力も2つと覚えれば良い(?)。 # train_opは学習のOperationであり、出力を持たないのでNoneが返ってくる。破棄する。 # lossは出力を持つので変数に保持。 _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def run_training(): """ Train MNIST for a number of steps. """ # Ensures the correct data has been downloaded and unpacks it into a dict of # DataSet instances. data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) # Tell TF that the model will be built into the default Graph. # 'with' command indicates all of the ops are associated with the specified # instance - this being the default global tf.Graph instance # A tf.Graph is a collection of ops that may be executed together as a group. with tf.Graph().as_default(): # Generate placeholders images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size) # Build a graph that computes predictions from the inference model. # Inference function builds the graph as far as needed to return the tensor # containing output predictions. # Takes images placeholder in and builds on top a pair of fully connected layers. # using ReLU activation. It then has a ten node linear layer with outputs. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add the ops for loss calculation loss = mnist.loss(logits, labels_placeholder) # Add ops that calculate and apply gradients train_op = mnist.training(loss, FLAGS.learning_rate) # Add op to compare logits to labels during evaluation eval_correct = mnist.evaluation(logits, labels_placeholder) # Summary tensor based on collection of summaries summary = tf.summary.merge_all() # Add the variable initalizer init = tf.global_variables_initializer() # Create a saver saver = tf.train.Saver() # Create a session for running ops # Alternatively, could do 'with tf.Session() as sess:' sess = tf.Session() # Instantiate SummaryWriter for output summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) ### Built everything ! ### # Now run and train. # run() will complete the subset of the graph as corresponding to the # ops described above. Thus, only init() is given. sess.run(init) for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with actual set of images feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run a step. # What is returned is the activations from the training_op # and the loss operation. # If you want to insepct the values of ops or variables, include # them in the list passed to sess.run() # Each tensor in the list of values corresponds to a numpy array in the returned tuple. # This is filled with the value of that tensor during this step of training. # Since train_op is an Operation with no output value, it can be discarded. # BUT...if loss becomes NaN, the model has likely diverged during training. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Let's log some stuff so we know we're doing ok. if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) #Update events file # This can be used by TensorBoard to display the summaries. summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def main(unused_argv): # mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # if FLAGS.download_only: # sys.exit(0) print(FLAGS) if FLAGS.job_name is None or FLAGS.job_name == "": raise ValueError("Must specify an explicit `job_name`") if FLAGS.task_index is None or FLAGS.task_index == "": raise ValueError("Must specify an explicit `task_index`") print("job name = %s" % FLAGS.job_name) print("task index = %d" % FLAGS.task_index) # Construct the cluster and start the server ps_spec = FLAGS.ps_hosts.split(",") worker_spec = FLAGS.worker_hosts.split(",") # Get the number of workers. num_workers = len(worker_spec) cluster = tf.train.ClusterSpec({"ps": ps_spec, "worker": worker_spec}) server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index) if FLAGS.job_name == "ps": server.join() else: is_chief = (FLAGS.task_index == 0) worker_device = "/job:worker/task:%d" % (FLAGS.task_index) # The device setter will automatically place Variables ops on separate # parameter servers (ps). The non-Variable ops will be placed on the workers. # The ps use CPU and workers use corresponding GPU with tf.device( tf.train.replica_device_setter(worker_device=worker_device, cluster=cluster)): global_step = tf.contrib.framework.get_or_create_global_step() images, labels = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels) tf.summary.scalar(loss.op.name, loss) opt = tf.train.AdamOptimizer(FLAGS.learning_rate) if FLAGS.replicas_to_aggregate is None: replicas_to_aggregate = num_workers else: replicas_to_aggregate = FLAGS.replicas_to_aggregate opt = tf.train.SyncReplicasOptimizer( opt, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=num_workers, name="mnist_sync_replicas") train_op = opt.minimize(loss, global_step=global_step) if is_chief: # Initial token and chief queue runners required by the sync_replicas mode chief_queue_runner = opt.get_chief_queue_runner() sync_init_op = opt.get_init_tokens_op() init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) my_summary_op = tf.summary.merge_all() sv = tf.train.Supervisor(is_chief=is_chief, logdir=FLAGS.train_dir, summary_op=None, init_op=init_op, recovery_wait_secs=1, global_step=global_step, save_model_secs=60, save_summaries_secs=60) sess_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) # device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index]) with sv.managed_session(master=server.target, config=sess_config) as sess: start_time = time.time() step = 1 # if is_chief: # if FLAGS.train_dir: # sv.start_standard_services(sess) queue_runners = tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS) sv.start_queue_runners(sess, queue_runners) if is_chief: # Chief worker will start the chief queue runner and call the init op. sv.start_queue_runners(sess, [chief_queue_runner]) sess.run(sync_init_op) try: while not sv.should_stop(): if step > 0 and step % 100 == 0: # Create the summary every 100 chief steps. _, loss_value, global_step_value, summ = sess.run( [train_op, loss, global_step, my_summary_op]) if is_chief: sv.summary_computed(sess, summ) duration = time.time() - start_time sec_per_batch = duration / (global_step_value * num_workers) format_str = ( "After %d training steps (%d global steps), " "loss on training batch is %g. " "(%.3f sec/batch)") print(format_str % (step, global_step_value, loss_value, sec_per_batch)) else: # Train normally _, loss_value, global_step_value = sess.run( [train_op, loss, global_step]) step += 1 except errors.OutOfRangeError: # OutOfRangeError is thrown when epoch limit per # tf.train.limit_epochs is reached. print('Caught OutOfRangeError. Stopping Training.')
def run_training(): """Train MNIST for a number of steps.""" # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Input images and labels. images, labels = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the loss calculation. loss = mnist.loss(logits, labels) # Add to the Graph operations that train the model. train_op = mnist.training(loss, FLAGS.learning_rate) # The op for initializing the variables. init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables()) # Create a session for running operations in the Graph. sess = tf.Session() # Initialize the variables (the trained variables and the # epoch counter). sess.run(init_op) # Start input enqueue threads. print( "Queue runners: %s" % ([qr.name for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS)])) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # waiting for queue to get loaded time.sleep(15) run_metadata = tf.RunMetadata() try: step = 0 while not coord.should_stop(): start_time = time.time() # Run one step of the model. The return values are # the activations from the `train_op` (which is # discarded) and the `loss` op. To inspect the values # of your ops or variables, you may include them in # the list passed to sess.run() and the value tensors # will be returned in the tuple from the call. if step == 500: _, loss_value = sess.run( [train_op, loss], options=tf.RunOptions( trace_level=tf.RunOptions.FULL_TRACE), run_metadata=run_metadata) with open("run_metadata.pbtxt", "w") as out: out.write(str(run_metadata)) from tensorflow.python.client import timeline trace = timeline.Timeline( step_stats=run_metadata.step_stats) trace_file = open('timeline.reader-1thread.json', 'w') trace_file.write(trace.generate_chrome_trace_format()) else: _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time # Print an overview fairly often. if step % 100 == 0: print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) step += 1 except tf.errors.OutOfRangeError: print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step)) finally: # When done, ask the threads to stop. coord.request_stop() # Wait for threads to finish. coord.join(threads) sess.close()
def run_training(): with tf.Graph().as_default(): # train data and run valid after each epoch, so nb_epochs=1 images, labels = inputs(train=True, batch_size=cfg.FLAGS.batch_size, nb_epochs=cfg.FLAGS.nb_epochs) logits = mnist.inference(images, cfg.FLAGS.hidden1, cfg.FLAGS.hidden2) loss = mnist.loss(logits, labels) train_op = mnist.training(loss, cfg.FLAGS.learning_rate) init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()) sess = tf.Session() sess.run(init_op) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) data_sets = mnist_datasets.read_data_sets(cfg.FLAGS.train_dir, dtype=tf.uint8, reshape=False, validation_size=cfg.FLAGS.validation_size) nb_train_samples = data_sets.train.num_examples # print('training samples: {}; batch_size: {}'.format(nb_train_samples, cfg.FLAGS.batch_size)) # .. 55000 and 100 # prepare validation data in terms of tf.constant image_valid_np = data_sets.validation.images.reshape((cfg.FLAGS.validation_size, mnist.IMAGE_PIXELS)) label_valid_np = data_sets.validation.labels # shape (5000,) # to fit the batch size idx_valid = np.random.choice(cfg.FLAGS.validation_size, cfg.FLAGS.batch_size, replace=False) image_valid_np = image_valid_np[idx_valid, :] image_valid_np = image_valid_np * (1. / 255) - 0.5 # remember to preprocessing label_valid_np = label_valid_np[idx_valid] step = 0 epoch_idx = 0 try: start_time = time.time() while not coord.should_stop(): _, loss_value = sess.run([train_op, loss]) step += 1 if step >= nb_train_samples // cfg.FLAGS.batch_size: epoch_idx += 1 end_time = time.time() duration = end_time - start_time print('Training Epoch {}, Step {}: loss = {:.02f} ({:.03f} sec)' .format(epoch_idx, step, loss_value, duration)) start_time = end_time # re-timing step = 0 # reset step counter # derive loss on validation dataset loss_valid_value = sess.run(loss, feed_dict={images: image_valid_np, labels: label_valid_np}) print('Validation Epoch {}: loss = {:.02f}' .format(epoch_idx, loss_valid_value)) except tf.errors.OutOfRangeError: print('Done training for epoch {}, {} steps'.format(epoch_idx, step)) finally: coord.request_stop() # # restart runner for validation data # coord = tf.train.Coordinator() # threads = tf.train.start_queue_runners(sess=sess, coord=coord) # # step = 0 # try: # start_time = time.time() # while not coord.should_stop(): # loss_value_valid = sess.run(loss_valid) # step += 1 # except tf.errors.OutOfRangeError: # print('Done validation for epoch {}, {} steps'.format(epoch_idx, step)) # finally: # coord.request_stop() # duration = time.time() - start_time # print('Validation: Epoch {}, Step {}: loss = {:.02f} ({:.03f} sec)' # .format(epoch_idx, step, loss_value_valid, duration)) coord.join(threads) sess.close()
def main(_): ps_hosts = FLAGS.ps_hosts.split(",") worker_hosts = FLAGS.worker_hosts.split(",") # Create a cluster from the parameter server and worker hosts. cluster = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts}) # Create and start a server for the local task. server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index) if FLAGS.job_name == "ps": server.join() elif FLAGS.job_name == "worker": is_chief = (FLAGS.task_index == 0) # Assigns ops to the local worker by default. with tf.device( tf.train.replica_device_setter( worker_device="/job:worker/task:%d" % FLAGS.task_index, cluster=cluster)): # Build model... # Input images and labels. images, labels = inputs(train=True, batch_size=FLAGS.batch_size, num_epochs=FLAGS.num_epochs) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the loss calculation. loss = mnist.loss(logits, labels) tf.summary.scalar(loss.op.name, loss) global_step = tf.contrib.framework.get_or_create_global_step() # opt = tf.train.AdagradOptimizer(0.01) opt = tf.train.GradientDescentOptimizer(0.001) num_workers = len(worker_hosts) if FLAGS.sync_replicas: if FLAGS.replicas_to_aggregate is None: replicas_to_aggregate = num_workers else: replicas_to_aggregate = FLAGS.replicas_to_aggregate opt = tf.train.SyncReplicasOptimizer( opt, replicas_to_aggregate=replicas_to_aggregate, total_num_replicas=num_workers, name="mnist_sync_replicas") train_op = opt.minimize(loss, global_step=global_step) # The StopAtStepHook handles stopping after running given steps. hooks = [tf.train.StopAtStepHook(last_step=100000)] if FLAGS.sync_replicas: hooks = [opt.make_session_run_hook(is_chief)] # The MonitoredTrainingSession takes care of session initialization, # restoring from a checkpoint, saving to a checkpoint, and closing when done # or an error occurs. config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False) with tf.train.MonitoredTrainingSession(master=server.target, is_chief=is_chief, checkpoint_dir=FLAGS.train_dir, hooks=hooks, save_summaries_steps=10, config=config) as mon_sess: start_time = time.time() while not mon_sess.should_stop(): # Run a training step asynchronously. # See `tf.train.SyncReplicasOptimizer` for additional details on how to # perform *synchronous* training. # mon_sess.run handles AbortedError in case of preempted PS. _, loss_value, step = mon_sess.run( [train_op, loss, global_step]) # Print an overview fairly often. if step % 100 == 0: duration = time.time() - start_time print('Step %d: loss = %.5f (%.3f sec)' % (step, loss_value, duration)) start_time = time.time()
'Must divide evenly into the dataset sizes.') flags.DEFINE_string('train_dir', 'data', 'Directory to put the training data.') flags.DEFINE_boolean('fake_data', False, 'If true, uses fake data ' 'for unit testing.') ## Download data and unpack ## data_sets is a custom DataSet data type data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) ## Initialize graph and start drawing on it with tf.Graph().as_default(): ## Prepare inputs and placeholders images_placeholder = tf.placeholder(tf.float32, shape=(FLAGS.batch_size, mnist.IMAGE_PIXELS)) labels_placeholder = tf.placeholder(tf.int32, shape=(FLAGS.batch_size)) ## mnist.inference() builds feed-forward portion of graph ## It takes the images placeholder and two integers, each representing the ## number of neurons for the respective hidden layers and returns logits logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels_placeholder) train_op = mnist.training(loss, FLAGS.learning_rate) eval_correct = mnist.evaluation(logits, labels_placeholder) ## Initialize variables, run session, and write summary writer data summary_op = tf.merge_all_summaries() init = tf.initialize_all_variables() sess = tf.Session() summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) sess.run(init)
def run_training(): """Train MNIST for a number of epochs.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): with tf.name_scope('input'): # Input data, pin to CPU because rest of pipeline is CPU-only with tf.device('/cpu:0'): input_images = tf.constant(data_sets.train.images) input_labels = tf.constant(data_sets.train.labels) image, label = tf.train.slice_input_producer( [input_images, input_labels], num_epochs=FLAGS.num_epochs) label = tf.cast(label, tf.int32) images, labels = tf.train.batch( [image, label], batch_size=FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create the op for initializing variables. init_op = tf.initialize_all_variables() # Create a session for running Ops on the Graph. sess = tf.Session() # Run the Op to initialize the variables. sess.run(init_op) # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) # Start input enqueue threads. coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) # And then after everything is built, start the training loop. try: step = 0 while not coord.should_stop(): start_time = time.time() # Run one step of the model. _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) step += 1 # Save a checkpoint periodically. if (step + 1) % 1000 == 0: print('Saving') saver.save(sess, FLAGS.train_dir, global_step=step) step += 1 except tf.errors.OutOfRangeError: print('Saving') saver.save(sess, FLAGS.train_dir, global_step=step) print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step)) finally: # When done, ask the threads to stop. coord.request_stop() # Wait for threads to finish. coord.join(threads) sess.close()
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Run the Op to initialize the variables. init = tf.initialize_all_variables() sess.run(init) # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, graph_def=sess.graph_def) # And then after everything is built, start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: saver.save(sess, FLAGS.train_dir, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def run_training(): ''' Training MNIST for number of steps ''' data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) #Tell Tensorflow that model will be built in default Graph with tf.Graph().as_default(): #Generate Placeholders for input images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) #Build a Graph that Computes predictions from inference models logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) #Add to the Graph ops for calculating loss loss = mnist.loss(logits, labels_placeholder) #Add to the Graph ops that calculate and apply gradients train_op = mnist.training(loss, FLAGS.learning_rate) #Add to Graph ops to compare logits to label during evaluation eval_correct = mnist.evaluation(logits, labels_placeholder) #Build summary tensor based on TF collection of summaries. summary = tf.summary.merge_all() init = tf.global_variables_initializer() #Saving checkpoints of training saver = tf.train.Saver() sess = tf.Session() #Instantiate SummaryWriter to write summaries summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) sess.run(init) #Start training Loop for step in xrange(FLAGS.max_steps): start_time = time.time() feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) #Run one step of model _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time #Write summaries and print overview if step % 100 == 0: print('Step %d : loss = %.2f (%.3f sec)' % (step, loss_value, duration)) summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() #Save Checkpoint and evaluate model if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) #Evaluating against training set print('Training Data Eval ') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) print('Validation Data Eval ') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) print('Testing Data Eval ') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def downpour_training_local_op(self): """ Validation baseline function: run locally. """ # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): FLAGS = self.flags.FLAGS images_placeholder, labels_placeholder = self.placeholder_inputs( FLAGS.batch_size) # Do inference: logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Calculate loss after generating logits: loss = mnist.loss(logits, labels_placeholder) # Add loss to training: train_op = mnist.training(loss, FLAGS.learning_rate) # Add summary summary = tf.merge_all_summaries() # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Initialize Variable init = tf.initialize_all_variables() sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) sess.run(init) for step in range(FLAGS.max_steps + 1): """ We want to inspect loss value on each step as a local benchmark for fully connected network. """ start_time = time.time() feed_dict = self.fill_feed_dict(self.data_set.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if step % 1000 == 0: print('Training Data Eval:') self.do_eval(sess, eval_correct, images_placeholder, labels_placeholder, self.data_set.train) # Evaluate against the validation set. print('Validation Data Eval:') self.do_eval(sess, eval_correct, images_placeholder, labels_placeholder, self.data_set.validation) # Evaluate against the test set. print('Test Data Eval:') self.do_eval(sess, eval_correct, images_placeholder, labels_placeholder, self.data_set.test)
(num_examples, true_count, precision)) def run_training(): """Train MNIST for a number of steps.""" #获取数据. data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) # 建图Graph. with tf.Graph().as_default(): # 为images和labels生成 placeholders. images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size) # 建立Graph从inference模型中计算预测. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # 向图中添加loss calculation的op. loss = mnist.loss(logits, labels_placeholder) # 向图中添加calculate和apply gradients的操作op. train_op = mnist.training(loss, FLAGS.learning_rate) # 向图中添加评估的准确率. eval_correct = mnist.evaluation(logits, labels_placeholder) # 汇总到summary. summary_op = tf.summary.merge_all() # 创建saver来写入.
def run_training(): data_sets = input_data.read_data_sets(FLAGS.train_dir, FLAGS.fake_data) # ----------图表 ----------------- with tf.Graph().as_default():# with 这个命令表明所有已经构建的操作都要与默认的tf.Graph全局实例关联起来。 images_placeholder, labels_placeholder = placeholder_inputs(FLAGS.batch_size) # 建立一个从推理模型计算预测的图表。 logits = mnist.inference(images_placeholder, FLAGS.hidden1,FLAGS.hidden2) #在图中添加损失计算的OPS。 loss = mnist.loss(logits, labels_placeholder) # 在图中添加计算和应用渐变的OPS。 train_op = mnist.training(loss, FLAGS.learning_rate) # 在进入训练循环之前,我们应该先调用mnist.py文件中的evaluation函数, eval_correct = mnist.evaluation(logits, labels_placeholder)# 传入的logits和标签参数要与loss函数的一致。这样做事为了先构建Eval操作。 # # evaluation函数会生成tf.nn.in_top_k 操作,如果在K个最有可能的预测中可以发现真的标签, # # 那么这个操作就会将模型输出标记为正确。在本文中,我们把K的值设置为1, # # 也就是只有在预测是真的标签时,才判定它是正确的。 # eval_correct = tf.nn.in_top_k(logits, labels, 1) # 状态可视化 # 为了释放TensorBoard所使用的事件文件(events file), # 所有的即时数据(在这里只有一个)都要在图表构建阶段合并至一个操作(op)中。 summary_op = tf.merge_all_summaries() # --------保存检查点(checkpoint)------------ # 为了得到可以用来后续恢复模型以进一步训练或评估的检查点文件(checkpoint file),我们实例化一个tf.train.Saver。 saver = tf.train.Saver() # 在图表上创建运行OPS的会话。 sess = tf.Session() # 运行OP初始化变量。 init = tf.initialize_all_variables() sess.run(init) # 在创建好会话(session)之后,可以实例化一个tf.train.SummaryWriter,用于写入包含了图表本身和即时数据具体值的事件文件。 summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,graph_def=sess.graph_def) # 然后在一切都建立后,开始训练循环。 for step in xrange(FLAGS.max_steps): start_time = time.time() feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # 在代码中明确其需要获取的两个值:[train_op, loss] _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time if step % 100 == 0: # 假设训练一切正常,没有出现NaN,训练循环会每隔100个训练步骤,就打印一行简单的状态文本,告知用户当前的训练状态。 print('步骤 %d: 损失 = %.2f (%.3f 秒)' % (step, loss_value, duration)) # Update the events file. # 最后,每次运行summary_op时,都会往事件文件中写入最新的即时数据, # 函数的输出会传入事件文件读写器(writer)的add_summary()函数。。 summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step)#summary_str是summary类型的,需要放入writer中,i步数(x轴) # 每隔一千个训练步骤,我们的代码会尝试使用训练数据集与测试数据集, # 对模型进行评估。do_eval函数会被调用三次,分别使用训练数据集、验证数据集合测试数据集。 if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: print "in" saver.save(sess, FLAGS.train_dir, global_step=step) print('训练数据的评价 :') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('验证数据评价:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('测试数据评价:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. If input_path is specified, download the data from GCS to # the folder expected by read_data_sets. data_dir = tempfile.mkdtemp() if FLAGS.input_path: files = [ os.path.join(FLAGS.input_path, file_name) for file_name in INPUT_FILES ] subprocess.check_call(['gsutil', '-m', '-q', 'cp', '-r'] + files + [data_dir]) data_sets = input_data.read_data_sets(data_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels and mark as input. placeholders = placeholder_inputs() keys_placeholder, images_placeholder, labels_placeholder = placeholders # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # To be able to extract the id, we need to add the identity function. keys = tf.identity(keys_placeholder) # The prediction will be the index in logits with the highest score. # We also use a softmax operation to produce a probability distribution # over all possible digits. prediction = tf.argmax(logits, 1) scores = tf.nn.softmax(logits) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary operation based on the TF collection of Summaries. # Remove this if once Tensorflow 0.12 is standard. try: summary_op = tf.contrib.deprecated.merge_all_summaries() except AttributeError: summary_op = tf.merge_all_summaries() # Add the variable initializer Op. init = tf.initialize_all_variables() # Create a saver for writing legacy training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. # Remove this if once Tensorflow 0.12 is standard. try: summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) except AttributeError: summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test) file_io.create_dir(FLAGS.model_dir) # Create a saver for writing SavedModel training checkpoints. saved_model_util.simple_save(sess, os.path.join(FLAGS.model_dir, 'saved_model'), inputs={ 'key': keys_placeholder, 'image': images_placeholder }, outputs={ 'key': keys, 'prediction': prediction, 'scores': scores }) logging.debug('Saved model path %s', os.path.join(FLAGS.model_dir, 'saved_model'))
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(tempfile.mkdtemp(), FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels and mark as input. placeholders = placeholder_inputs() keys_placeholder, images_placeholder, labels_placeholder = placeholders inputs = {'key': keys_placeholder.name, 'image': images_placeholder.name} tf.add_to_collection('inputs', json.dumps(inputs)) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # To be able to extract the id, we need to add the identity function. keys = tf.identity(keys_placeholder) # The prediction will be the index in logits with the highest score. # We also use a softmax operation to produce a probability distribution # over all possible digits. prediction = tf.argmax(logits, 1) scores = tf.nn.softmax(logits) # Mark the outputs. outputs = {'key': keys.name, 'prediction': prediction.name, 'scores': scores.name} tf.add_to_collection('outputs', json.dumps(outputs)) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary operation based on the TF collection of Summaries. # TODO(b/33420312): remove the if once 0.12 is fully rolled out to prod. if tf.__version__ < '0.12': summary_op = tf.merge_all_summaries() else: summary_op = tf.contrib.deprecated.merge_all_summaries() # Add the variable initializer Op. init = tf.initialize_all_variables() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test) # Export the model so that it can be loaded and used later for predictions. file_io.create_dir(FLAGS.model_dir) saver.save(sess, os.path.join(FLAGS.model_dir, 'export'))
def run_training(): """Train MNIST for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. data_sets = input_data.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. images_placeholder, labels_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) # Add to the Graph the Ops for loss calculation. loss = mnist.loss(logits, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = mnist.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = mnist.evaluation(logits, labels_placeholder) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.train) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.validation) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_correct, images_placeholder, labels_placeholder, data_sets.test)
def downpour_training_distributed_op(self): """ Set up workers with corresponding constants """ FLAGS = self.flags.FLAGS # Pass in by --ps_hosts=ps0.example.com:2222, ps1.example.com:2222 # ps_hosts = FLAGS.ps_hosts.split(",") # worker_hosts = FLAGS.worker_hosts.split(",") # Create cluster: cluster = tf.train.ClusterSpec({ "ps": ["localhost:2222"], "worker": ["localhost:2222", "localhost:2222"] }) server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index) if FLAGS.job_name == "ps": # Do something for parameter sharing scheme. # Currently updating all part. server.join() elif FLAGS.job_name == "worker": # Assign operations to local worker by default: with tf.device( tf.train.replica_device_setter( worker_device="/job:worker/replica:%d/task:%d/cpu:%d" % (0, FLAGS.task_index, 0))): # Bulid model: # Do something for parameter sharing scheme. # Currently updating all parameters. images_placeholder, labels_placeholder = self.placeholder_inputs( FLAGS.batch_size) logits = mnist.inference(images_placeholder, FLAGS.hidden1, FLAGS.hidden2) loss = mnist.loss(logits, labels_placeholder) # Create a variable to track the global step global_step = tf.Variable(0, name='global_step', trainable='False') # Add a scalar summary for the snapshot loss. tf.summary.scalar('loss', loss) # Create the gradient descent optimizer with the given learning rate. optimizer = tf.train.GradientDescentOptimizer( FLAGS.learning_rate) # Use the optimizer to apply the gradients that minimize the loss. # feed_dict somewhere. train_op = optimizer.minimize(loss, global_step=global_step) saver = tf.train.Saver() summary_op = tf.merge_all_summaries() init_op = tf.initialize_all_variables() # Create a "supervisor", which oversees the training process. sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0), logdir=FLAGS.train_log, init_op=init_op, summary_op=summary_op, saver=saver, global_step=global_step, save_model_secs=600) # The supervisor takes care of session initialization, restoring from # a checkpoint, and closing when done or an error occurs. with sv.managed_session(server.target, config=tf.ConfigProto( allow_soft_placement=True, log_device_placement=True)) as sess: # Loop until the supervisor shuts down or 1000 steps have completed. step = 0 while not sv.should_stop() and step < 1000: # Run a training step asynchronously. feed_dict = self.fill_feed_dict(self.data_set.train, images_placeholder, labels_placeholder) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, step = sess.run([train_op, loss], feed_dict=feed_dict) sv.stop()