def inference(images, num_classes, for_training=False, restore_logits=True, scope=None): """Build Inception v3 model architecture. See here for reference: http://arxiv.org/abs/1512.00567 Args: images: Images returned from inputs() or distorted_inputs(). num_classes: number of classes for_training: If set to `True`, build the inference model for training. Kernels that operate differently for inference during training e.g. dropout, are appropriately configured. restore_logits: whether or not the logits layers should be restored. Useful for fine-tuning a model with different num_classes. scope: optional prefix string identifying the ImageNet tower. reuse: weather to reuse weights or not (used for evaluation) Returns: Logits. 2-D float Tensor. Auxiliary Logits. 2-D float Tensor of side-head. Used for training only. """ # Arjun - check # Parameters for BatchNorm. batch_norm_params = { # Decay for the moving averages. 'decay': BATCHNORM_MOVING_AVERAGE_DECAY, # epsilon to prevent 0s in variance. 'epsilon': 0.001, } # Set weight_decay for weights in Conv and FC layers. with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): with slim.arg_scope([slim.ops.conv2d], stddev=0.1, activation=tf.nn.relu, batch_norm_params=batch_norm_params): logits, end_points = slim.inception.inception_v3( images, dropout_keep_prob=0.8, num_classes=num_classes, is_training=for_training, restore_logits=restore_logits, scope=scope) # Add summaries for viewing model statistics on TensorBoard. _activation_summaries(end_points) # Grab the logits associated with the side head. Employed during training. auxiliary_logits = end_points['aux_logits'] return logits, auxiliary_logits, end_points['predictions']
def testVariablesByLayer(self): batch_size = 5 height, width = 299, 299 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.ops.conv2d], batch_norm_params={'decay': 0.9997}): slim.inception.inception_v3(inputs) self.assertEqual(len(get_variables()), 388) self.assertEqual(len(get_variables('conv0')), 4) self.assertEqual(len(get_variables('conv1')), 4) self.assertEqual(len(get_variables('conv2')), 4) self.assertEqual(len(get_variables('conv3')), 4) self.assertEqual(len(get_variables('conv4')), 4) self.assertEqual(len(get_variables('mixed_35x35x256a')), 28) self.assertEqual(len(get_variables('mixed_35x35x288a')), 28) self.assertEqual(len(get_variables('mixed_35x35x288b')), 28) self.assertEqual(len(get_variables('mixed_17x17x768a')), 16) self.assertEqual(len(get_variables('mixed_17x17x768b')), 40) self.assertEqual(len(get_variables('mixed_17x17x768c')), 40) self.assertEqual(len(get_variables('mixed_17x17x768d')), 40) self.assertEqual(len(get_variables('mixed_17x17x768e')), 40) self.assertEqual(len(get_variables('mixed_8x8x2048a')), 36) self.assertEqual(len(get_variables('mixed_8x8x2048b')), 36) self.assertEqual(len(get_variables('logits')), 2) self.assertEqual(len(get_variables('aux_logits')), 10)
def testRegularizationLosses(self): batch_size = 5 height, width = 299, 299 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): slim.inception.inception_v3(inputs) losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(losses), len(get_variables_by_name('weights')))
def testVariablesToRestoreWithoutLogits(self): batch_size = 5 height, width = 299, 299 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.ops.conv2d], batch_norm_params={'decay': 0.9997}): slim.inception.inception_v3(inputs, restore_logits=False) variables_to_restore = tf.get_collection( slim.variables.VARIABLES_TO_RESTORE) self.assertEqual(len(variables_to_restore), 384)
def testVariablesWithoutBatchNorm(self): batch_size = 5 height, width = 299, 299 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) with slim.arg_scope([slim.ops.conv2d], batch_norm_params=None): slim.inception.inception_v3(inputs) self.assertEqual(len(get_variables()), 196) self.assertEqual(len(get_variables_by_name('weights')), 98) self.assertEqual(len(get_variables_by_name('biases')), 98) self.assertEqual(len(get_variables_by_name('beta')), 0) self.assertEqual(len(get_variables_by_name('gamma')), 0) self.assertEqual(len(get_variables_by_name('moving_mean')), 0) self.assertEqual(len(get_variables_by_name('moving_variance')), 0)
def build_heatmap(dataset): """Evaluate model on Dataset for a number of steps.""" with tf.Graph().as_default(): # Get images and labels from the dataset. images, cords = image_processing.inputs(dataset, BATCH_SIZE) #cords is label? print('images process is done') # Number of classes in the Dataset label set plus 1. # Label 0 is reserved for an (unused) background class. num_classes = dataset.num_classes() assert BATCH_SIZE % FLAGS.num_threads == 0, 'BATCH_SIZE must be divisible by FLAGS.num_threads' # Build a Graph that computes the logits predictions from the # inference model. images_splits = tf.split(images, FLAGS.num_threads, axis=0) cords_splits = tf.split(cords, FLAGS.num_threads, axis=0) prob_ops = [] cords_ops = [] for i in range(FLAGS.num_threads): with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope: with slim.arg_scope([slim.variables.variable], device='/cpu:%d' % i): print('i=%d' % i) _, _, prob_op = inception.inference(images_splits[i], num_classes, scope=scope) tf.get_variable_scope().reuse_variables() cords_op = tf.reshape( cords_splits[i], (int(BATCH_SIZE / FLAGS.num_threads), 1)) prob_ops.append(prob_op) cords_ops.append(cords_op) # Restore the moving average version of the learned variables for eval. variable_averages = tf.train.ExponentialMovingAverage( inception.MOVING_AVERAGE_DECAY) variables_to_restore = variable_averages.variables_to_restore() saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.summary.merge_all() graph_def = tf.get_default_graph().as_graph_def() summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, graph_def=graph_def) generate_heatmap(saver, dataset, summary_writer, prob_ops, cords_ops, summary_op)
def testTotalLossWithRegularization(self): batch_size = 5 height, width = 299, 299 num_classes = 1000 with self.test_session(): inputs = tf.random_uniform((batch_size, height, width, 3)) dense_labels = tf.random_uniform((batch_size, num_classes)) with slim.arg_scope([slim.ops.conv2d, slim.ops.fc], weight_decay=0.00004): logits, end_points = slim.inception.inception_v3(inputs, num_classes) # Cross entropy loss for the main softmax prediction. slim.losses.cross_entropy_loss(logits, dense_labels, label_smoothing=0.1, weight=1.0) # Cross entropy loss for the auxiliary softmax head. slim.losses.cross_entropy_loss(end_points['aux_logits'], dense_labels, label_smoothing=0.1, weight=0.4, scope='aux_loss') losses = tf.get_collection(slim.losses.LOSSES_COLLECTION) self.assertEqual(len(losses), 2) reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) self.assertEqual(len(reg_losses), 98)
def train(dataset): """Train on dataset for a number of steps.""" with tf.Graph().as_default(), tf.device('/cpu:0'): # Create a variable to count the number of train() calls. This equals the # number of batches processed * FLAGS.num_gpus. global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) # Calculate the learning rate schedule. num_batches_per_epoch = (dataset.num_examples_per_epoch() / FLAGS.batch_size) # decay_steps = int(num_batches_per_epoch * FLAGS.num_epochs_per_decay) # Decay the learning rate exponentially based on the number of steps. lr = tf.train.exponential_decay(FLAGS.initial_learning_rate, global_step, 60000, FLAGS.learning_rate_decay_factor, staircase=True) # Create an optimizer that performs gradient descent. opt = tf.train.GradientDescentOptimizer(lr) # opt = tf.train.RMSPropOptimizer(lr, RMSPROP_DECAY, # momentum=RMSPROP_MOMENTUM, # epsilon=RMSPROP_EPSILON) # Get images and labels for ImageNet and split the batch across GPUs. #代码无用?多个batch在多个GPU上共同训练 # assert FLAGS.batch_size % FLAGS.num_gpus == 0, ( # 'Batch size must be divisible by number of GPUs') #split_batch_size = int(FLAGS.batch_size / FLAGS.num_gpus) # Override the number of preprocessing threads to account for the increased # # num_preprocess_threads: 有多少个线程参加IO读取,每个GPU4个. num_preprocess_threads = FLAGS.num_preprocess_threads * FLAGS.num_gpus # images, labels = image_processing.distorted_inputs( dataset, num_preprocess_threads=num_preprocess_threads) input_summaries = copy.copy(tf.get_collection(tf.GraphKeys.SUMMARIES)) # updated - Arjun num_classes = dataset.num_classes() # Split the batch of images and labels for towers. images_splits = tf.split(images, FLAGS.num_gpus, axis=0) labels_splits = tf.split(labels, FLAGS.num_gpus, axis=0) # Calculate the gradients for each model tower. tower_grads = [] reuse_variables = None for i in range(FLAGS.num_gpus): with tf.device('/gpu:%d' % i): with tf.name_scope('%s_%d' % (inception.TOWER_NAME, i)) as scope: # Force all Variables to reside on the CPU. with slim.arg_scope([slim.variables.variable], device='/cpu:0'): # Calculate the loss for one tower of the ImageNet model. This # function constructs the entire ImageNet model but shares the # variables across all towers. loss = _tower_loss(images_splits[i], labels_splits[i], num_classes, scope, reuse_variables) # Reuse variables for the next tower. reuse_variables = True # Retain the summaries from the final tower. summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope) # Retain the Batch Normalization updates operations only from the # final tower. Ideally, we should grab the updates from all towers # but these stats accumulate extremely fast so we can ignore the # other stats from the other towers without significant detriment. batch_norm_updates = tf.get_collection( slim.ops.UPDATE_OPS_COLLECTION, scope) # Calculate the gradients for the batch of data on this ImageNet # tower. grads = opt.compute_gradients(loss) # Keep track of the gradients across all towers. tower_grads.append(grads) # We must calculate the mean of each gradient. Note that this is the # synchronization point across all towers. grads = _average_gradients(tower_grads) # Add a summaries for the input processing and global_step. summaries.extend(input_summaries) # Add a summary to track the learning rate. summaries.append(tf.summary.scalar('learning_rate', lr)) # Add histograms for gradients. for grad, var in grads: if grad is not None: summaries.append( tf.summary.histogram(var.op.name + '/gradients', grad)) # Apply the gradients to adjust the shared variables. apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) # Add histograms for trainable variables. for var in tf.trainable_variables(): summaries.append(tf.summary.histogram(var.op.name, var)) # Track the moving averages of all trainable variables. # Note that we maintain a "double-average" of the BatchNormalization # global statistics. This is more complicated then need be but we employ # this for backward-compatibility with our previous models. variable_averages = tf.train.ExponentialMovingAverage( inception.MOVING_AVERAGE_DECAY, global_step) # Another possiblility is to use tf.slim.get_variables(). variables_to_average = (tf.trainable_variables() + tf.moving_average_variables()) variables_averages_op = variable_averages.apply(variables_to_average) # Group all updates to into a single train op. batch_norm_updates_op = tf.group(*batch_norm_updates) train_op = tf.group(apply_gradient_op, variables_averages_op, batch_norm_updates_op) # Create a saver. saver = tf.train.Saver(tf.all_variables(), max_to_keep=1000) # Build the summary operation from the last tower summaries. summary_op = tf.summary.merge(summaries) # Build an initialization operation to run below. init = tf.initialize_all_variables() # Start running operations on the Graph. allow_soft_placement must be set to # True to build towers on GPU, as some of the ops do not have GPU # implementations. config = tf.ConfigProto( allow_soft_placement=True, log_device_placement=FLAGS.log_device_placement) config.gpu_options.per_process_gpu_memory_fraction = 0.6 sess = tf.Session(config=config) sess.run(init) if not FLAGS.fine_tune: # assert tf.gfile.Exists(FLAGS.pretrained_model_checkpoint_path) print('model path: %s' % FLAGS.pretrained_model_checkpoint_path) variables_to_restore = tf.get_collection( slim.variables.VARIABLES_TO_RESTORE) restorer = tf.train.Saver(variables_to_restore) restorer.restore(sess, FLAGS.pretrained_model_checkpoint_path) print('%s: Pre-trained model restored from %s' % (datetime.now(), FLAGS.pretrained_model_checkpoint_path)) # Start the queue runners.这个函数将会启动输入管道的线程,填充样本到队列中,以便出队操作可以从队列中拿到样本 tf.train.start_queue_runners(sess=sess) summary_writer = tf.summary.FileWriter( FLAGS.train_logs, graph=sess.graph.as_graph_def(add_shapes=True)) for step in range(FLAGS.max_steps): start_time = time.time() _, loss_value = sess.run([train_op, loss]) duration = time.time() - start_time assert not np.isnan(loss_value), 'Model diverged with loss = NaN' if step % 10 == 0: examples_per_sec = FLAGS.batch_size / float(duration) format_str = ( '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f ' 'sec/batch)') print(format_str % (datetime.now(), step, loss_value, examples_per_sec, duration)) if step % 100 == 0: summary_str = sess.run(summary_op) summary_writer.add_summary(summary_str, step) # Save the model checkpoint periodically. if step % 5000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(FLAGS.train_models, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=step)