def evaluate(): """Eval CIFAR-10 for a number of steps.""" with tf.Graph().as_default() as g: # Get images and labels for CIFAR-10. eval_data = FLAGS.eval_data == 'test' images, labels = svhn.inputs(eval_data=eval_data) # Build a Graph that computes the logits predictions from the # inference model. logits = svhn.inference(images) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( svhn.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g) while True: eval_once(saver, summary_writer, top_k_op, summary_op) if FLAGS.run_once: break time.sleep(FLAGS.eval_interval_secs)
def evaluate(): """Eval CIFAR-10 for a number of steps.""" with tf.Graph().as_default(): # Get images and labels for CIFAR-10. images, labels = inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = svhn.inference(images) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) top_k_predict_op = tf.argmax(logits, 1) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage(svhn.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() graph_def = tf.get_default_graph().as_graph_def() summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, graph_def=graph_def) while True: eval_once(saver, summary_writer, top_k_op, top_k_predict_op, summary_op, images) break
def tower_loss(scope): """Calculate the total loss on a single tower running the CIFAR model. Args: scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0' Returns: Tensor of shape [] containing the total loss for a batch of data """ # Get images and labels for CIFAR-10. images, labels = svhn.distorted_inputs() # Build inference Graph. logits = svhn.inference(images) # Build the portion of the Graph calculating the losses. Note that we will # assemble the total_loss using a custom function below. _ = svhn.loss(logits, labels) # Assemble all of the losses for the current tower only. losses = tf.get_collection('losses', scope) # Calculate the total loss for the current tower. total_loss = tf.add_n(losses, name='total_loss') # Attach a scalar summary to all individual losses and the total loss; do the # same for the averaged version of the losses. for l in losses + [total_loss]: # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training # session. This helps the clarity of presentation on tensorboard. loss_name = re.sub('%s_[0-9]*/' % svhn.TOWER_NAME, '', l.op.name) tf.summary.scalar(loss_name, l) return total_loss
def evaluate(): """Eval CIFAR-10 for a number of steps.""" with tf.Graph().as_default(): # Get images and labels for CIFAR-10. images, labels = inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = svhn.inference(images) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) top_k_predict_op = tf.argmax(logits, 1) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( svhn.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() graph_def = tf.get_default_graph().as_graph_def() summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, graph_def=graph_def) while True: eval_once(saver, summary_writer, top_k_op, top_k_predict_op, summary_op, images) break
def train(): with tf.Graph().as_default(): global_step = tf.Variable(0, trainable=False) # Get images and labels for CIFAR-10. images, labels = svhn.distorted_inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = svhn.inference(images) # Calculate loss. loss = svhn.loss(logits, labels) # Build a Graph that trains the model with one batch of examples and # updates the model parameters. train_op = svhn.train(loss, global_step) # Create a saver. saver = tf.train.Saver(tf.all_variables()) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Build an initialization operation to run below. init = tf.initialize_all_variables() # Start running operations on the Graph. sess = tf.Session(config=tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)) sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, graph_def=sess.graph_def) for step in xrange(FLAGS.max_steps): start_time = time.time() _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time assert not np.isnan(loss_value), "Model diverged with loss = NaN" if step % 10 == 0: num_examples_per_step = FLAGS.batch_size examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) format_str = "%s: step %d, loss = %.2f (%.1f examples/sec; %.3f " "sec/batch)" print(format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch)) if step % 100 == 0: summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) # Save the model checkpoint periodically. if step % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(FLAGS.train_dir, "model.ckpt") saver.save(sess, checkpoint_path, global_step=step)
def train(): """Train CIFAR-10 for a number of steps.""" with tf.Graph().as_default(): global_step = tf.contrib.framework.get_or_create_global_step() # Force input pipeline to CPU:0 to avoid operations sometimes ending up on # GPU and resulting in a slow down. with tf.device('/cpu:0'): images, labels = svhn.distorted_inputs() # Build a Graph that computes the logits predictions from the # inference model. logits = svhn.inference(images) # Calculate loss. loss = svhn.loss(logits, labels) # Build a Graph that trains the model with one batch of examples and # updates the model parameters. train_op = svhn.train(loss, global_step) class _LoggerHook(tf.train.SessionRunHook): """Logs loss and runtime.""" def begin(self): self._step = -1 self._start_time = time.time() def before_run(self, run_context): self._step += 1 return tf.train.SessionRunArgs(loss) # Asks for loss value. def after_run(self, run_context, run_values): if self._step % FLAGS.log_frequency == 0: current_time = time.time() duration = current_time - self._start_time self._start_time = current_time loss_value = run_values.results examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration sec_per_batch = float(duration / FLAGS.log_frequency) format_str = ( '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print(format_str % (datetime.now(), self._step, loss_value, examples_per_sec, sec_per_batch)) with tf.train.MonitoredTrainingSession( checkpoint_dir=FLAGS.train_dir, hooks=[ tf.train.StopAtStepHook(last_step=FLAGS.max_steps), tf.train.NanTensorHook(loss), _LoggerHook() ], config=tf.ConfigProto(log_device_placement=FLAGS. log_device_placement)) as mon_sess: while not mon_sess.should_stop(): mon_sess.run(train_op)
def train(): """Train SVHN for a number of steps.""" with tf.Graph().as_default(): global_step = tf.Variable(0, trainable=False) # Get images and labels for SVHN with mat file images, labels = svhn.distorted_inputs() # Build a Graph that computes the logits predictions from # inference model. logits = svhn.inference(images) # Calculate loss. loss = svhn.loss(logits, labels) # Build a Graph that trains the model with one batch of examples # and updates the model parm train_op = svhn.train(loss, global_step) # Create a saver. saver = tf.train.Saver(tf.all_variables()) # Build an initialization operation to run. init = tf.initialize_all_variables() # Start running operations on the Graph. sess = tf.Session(config=tf.ConfigProto( log_device_placement=FLAGS.log_device_placement)) sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) # summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph) for step in xrange(FLAGS.max_steps): start_time = time.time() _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time assert not np.isnan(loss_value), 'Model diverged with loss = NaN' if step % 10 == 0: num_examples_per_step = FLAGS.batch_size examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) format_str = ( '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print(format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch)) # Save the model checkpoint periodically. if step % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step)
def train_with_noise_ce(train_data, train_labels, ckpt_path): tf.reset_default_graph() with tf.Graph().as_default() as g: train_data_shape = train_data.shape # train_data_node = tf.placeholder(dtype=tf.float32, shape=[None, train_data_shape], name='train_data_node') train_data_node = tf.placeholder(dtype=tf.float32, shape=[ None, train_data_shape[1], train_data_shape[2], train_data_shape[3] ], name='train_data_node') train_labels_node = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='test_labels_node') print('placeholder done') # logits = fc.inference(train_data_node) # loss = fc.loss_fun(logits, train_labels_node) if FLAGS.dataset == 'mnist': logits = mlenet.inference(train_data_node) loss = mlenet.loss_fun(logits, train_labels_node) elif FLAGS.dataset == 'svhn': logits = svhn.inference(train_data_node) loss = svhn.loss_fun(logits, train_labels_node) # print(loss.get_shape()) op = tf.train.AdamOptimizer(learning_rate=5e-4, beta1=0.9, beta2=0.999, name="student_op").minimize(loss) saver = tf.train.Saver(tf.global_variables()) init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) data_length = len(train_data) for step in xrange(FLAGS.max_steps): start_time = time.time() batch_indices = utils.random_batch_indices(data_length, FLAGS.batch_size) feed_dict = { train_data_node: train_data[batch_indices], train_labels_node: train_labels[batch_indices] } _, loss_value = sess.run([op, loss], feed_dict=feed_dict) duration = time.time() - start_time if step % 100 == 0: num_examples_per_step = FLAGS.batch_size examples_per_sec = num_examples_per_step / duration sec_per_batch = float(duration) format_str = ( '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)' ) print(format_str % (datetime.now(), step, np.mean(loss_value), examples_per_sec, sec_per_batch)) if step % 1000 == 0 or (step + 1) == FLAGS.max_steps: saver.save(sess, ckpt_path, global_step=step) return True
def softmax_preds(images, ckpt_path, return_logits=False): """ Compute softmax activations (probabilities) with the model saved in the path specified as an argument :param images: a np array of images :param ckpt_path: a TF model checkpoint :param logits: if set to True, return logits instead of probabilities :return: probabilities (or logits if logits is set to True) """ # Compute nb samples and deduce nb of batches data_length = len(images) nb_batches = math.ceil(len(images) / FLAGS.batch_size) # Declare data placeholder # train_data_node = tf.placeholder(dtype=tf.float32, shape=[None, images.shape[-1]]) train_data_node = tf.placeholder( dtype=tf.float32, shape=[None, images.shape[1], images.shape[2], images.shape[3]]) # Build a Graph that computes the logits predictions from the placeholder # logits = fc.inference(train_data_node) if FLAGS.dataset == 'mnist': logits = mlenet.inference(train_data_node) elif FLAGS.dataset == 'svhn': logits = svhn.inference(train_data_node) # logits = inference2(train_data_node) if return_logits: # We are returning the logits directly (no need to apply softmax) output = logits else: # Add softmax predictions to graph: will return probabilities output = tf.nn.softmax(logits) # Restore the moving average version of the learned variables for eval. # variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY) # variables_to_restore = variable_averages.variables_to_restore() # saver = tf.train.Saver(variables_to_restore) saver = tf.train.Saver() # Will hold the result preds = np.zeros((data_length, 10), dtype=np.float32) # Create TF session with tf.Session() as sess: # Restore TF session from checkpoint file saver.restore(sess, ckpt_path) # Parse data by batch for batch_nb in xrange(0, int(nb_batches + 1)): # Compute batch start and end indices start, end = utils.batch_indices(batch_nb, data_length, FLAGS.batch_size) # Prepare feed dictionary feed_dict = {train_data_node: images[start:end]} # Run session ([0] because run returns a batch with len 1st dim == 1) preds[start:end, :] = sess.run([output], feed_dict=feed_dict)[0] # Reset graph to allow multiple calls tf.reset_default_graph() return preds