def run_training(): """Train for a number of steps.""" data_sets = overexpress_data.read_data_sets( seq_file=FLAGS.seq_file, expr_file=FLAGS.expr_file, reg_names_file=FLAGS.reg_names_file, fold_change=FLAGS.fold_change) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_placeholder, meta_placeholder = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. y = regression_model.inference(seq_placeholder, reg_expr_placeholder, keep_prob_placeholder, FLAGS.batch_size) # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) saver = tf.train.import_meta_graph(FLAGS.graph) saver.restore(sess, tf.train.latest_checkpoint(FLAGS.log_dir)) # Make confusion matrix: # print(data_sets.overexpress_data.next_batch()) pred = prediction(sess, y, seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_placeholder, meta_placeholder, data_sets.overexpress_data) np.savetxt(FLAGS.out, pred, fmt=['%.3f', '%.3f', '%s', '%s', '%s'], delimiter='\t')
def run_training(): """Train for a number of steps.""" data_sets = input_data.read_data_sets(seq_file=FLAGS.seq_file, expr_file=FLAGS.expr_file, reg_names_file=FLAGS.reg_names_file) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_pl = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. y = regression_model.inference(seq_placeholder, reg_expr_placeholder, keep_prob_pl, FLAGS.batch_size) # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # And then after everything is built: # Run the Op to initialize the variables. sess.run(init) # saver = tf.train.import_meta_graph('../../../processed_data/dropout/model.ckpt-381999.meta') saver = tf.train.import_meta_graph(FLAGS.graph) saver.restore(sess, tf.train.latest_checkpoint(FLAGS.log_dir)) # Make confusion matrix: pred = prediction(sess, y, seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_pl, data_sets.test) np.savetxt("%s/prediction.txt" % (FLAGS.log_dir), pred)
def run_training(): """Train for a number of steps.""" data_sets = input_data.read_data_sets(seq_file=FLAGS.seq_file, expr_file=FLAGS.expr_file, reg_names_file=FLAGS.reg_names_file) # Tell TensorFlow that the model will be built into the default Graph. with tf.Graph().as_default(): # Generate placeholders for the images and labels. seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_pl = placeholder_inputs( FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. y = regression_model.inference(seq_placeholder, reg_expr_placeholder, keep_prob_pl, FLAGS.batch_size) # Add to the Graph the Ops for loss calculation. loss = regression_model.loss(y, labels_placeholder) # Add to the Graph the Ops that calculate and apply gradients. train_op = regression_model.training(loss, FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_sse = regression_model.evaluation(y, labels_placeholder) # Add summaries: tf.summary.scalar('loss', loss) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.summary.merge_all() # Add the variable initializer Op. init = tf.global_variables_initializer() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session(config=tf.ConfigProto( intra_op_parallelism_threads=FLAGS.threads)) # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) # And then after everything is built: # Open files to write: train_log = open(FLAGS.log_dir + '/train_mse.log', 'w') val_log = open(FLAGS.log_dir + '/val_mse.log', 'w') test_log = open(FLAGS.log_dir + '/test_mse.log', 'w') # Run the Op to initialize the variables. sess.run(init) # Start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = fill_feed_dict(data_sets.train, seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_pl, keep_prob=0.5, batch_size=FLAGS.batch_size) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) duration = time.time() - start_time # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. print('Step %d: loss = %.2f (%.3f sec)' % (step, loss_value, duration)) # Update the events file. summary_str = sess.run(summary, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) summary_writer.flush() # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt') saver.save(sess, checkpoint_file, global_step=step) # Evaluate against the training set. print('Training Data Eval:') do_eval(sess, eval_sse, seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_pl, data_sets.train, step, train_log) # Evaluate against the validation set. print('Validation Data Eval:') do_eval(sess, eval_sse, seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_pl, data_sets.validation, step, val_log) # Evaluate against the test set. print('Test Data Eval:') do_eval(sess, eval_sse, seq_placeholder, reg_expr_placeholder, labels_placeholder, keep_prob_pl, data_sets.test, step, test_log) # Close files and session: train_log.close() val_log.close() test_log.close() sess.close()