예제 #1
0
 def testModelVariables(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         vgg.vgg_a(inputs, num_classes)
         expected_names = [
             'vgg_a/conv1/conv1_1/weights',
             'vgg_a/conv1/conv1_1/biases',
             'vgg_a/conv2/conv2_1/weights',
             'vgg_a/conv2/conv2_1/biases',
             'vgg_a/conv3/conv3_1/weights',
             'vgg_a/conv3/conv3_1/biases',
             'vgg_a/conv3/conv3_2/weights',
             'vgg_a/conv3/conv3_2/biases',
             'vgg_a/conv4/conv4_1/weights',
             'vgg_a/conv4/conv4_1/biases',
             'vgg_a/conv4/conv4_2/weights',
             'vgg_a/conv4/conv4_2/biases',
             'vgg_a/conv5/conv5_1/weights',
             'vgg_a/conv5/conv5_1/biases',
             'vgg_a/conv5/conv5_2/weights',
             'vgg_a/conv5/conv5_2/biases',
             'vgg_a/fc6/weights',
             'vgg_a/fc6/biases',
             'vgg_a/fc7/weights',
             'vgg_a/fc7/biases',
             'vgg_a/fc8/weights',
             'vgg_a/fc8/biases',
         ]
         model_variables = [
             v.op.name for v in variables_lib.get_model_variables()
         ]
         self.assertSetEqual(set(model_variables), set(expected_names))
예제 #2
0
 def testForward(self):
     batch_size = 1
     height, width = 224, 224
     with self.test_session() as sess:
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(inputs)
         sess.run(variables.global_variables_initializer())
         output = sess.run(logits)
         self.assertTrue(output.any())
예제 #3
0
 def testFullyConvolutional(self):
     batch_size = 1
     height, width = 256, 256
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
         self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, 2, 2, num_classes])
예제 #4
0
 def testBuild(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = random_ops.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(inputs, num_classes)
         self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
예제 #5
0
 def testEvaluation(self):
     batch_size = 2
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         eval_inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
         predictions = tf.argmax(logits, 1)
         self.assertListEqual(predictions.get_shape().as_list(),
                              [batch_size])
예제 #6
0
 def testTrainEvalWithReuse(self):
     train_batch_size = 2
     eval_batch_size = 1
     train_height, train_width = 224, 224
     eval_height, eval_width = 256, 256
     num_classes = 1000
     with self.test_session():
         train_inputs = random_ops.random_uniform(
             (train_batch_size, train_height, train_width, 3))
         logits, _ = vgg.vgg_a(train_inputs)
         self.assertListEqual(logits.get_shape().as_list(),
                              [train_batch_size, num_classes])
         variable_scope.get_variable_scope().reuse_variables()
         eval_inputs = random_ops.random_uniform(
             (eval_batch_size, eval_height, eval_width, 3))
         logits, _ = vgg.vgg_a(eval_inputs,
                               is_training=False,
                               spatial_squeeze=False)
         self.assertListEqual(logits.get_shape().as_list(),
                              [eval_batch_size, 2, 2, num_classes])
         logits = math_ops.reduce_mean(logits, [1, 2])
         predictions = math_ops.argmax(logits, 1)
         self.assertEquals(predictions.get_shape().as_list(),
                           [eval_batch_size])
예제 #7
0
 def testEndPoints(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     with self.test_session():
         inputs = tf.random_uniform((batch_size, height, width, 3))
         _, end_points = vgg.vgg_a(inputs, num_classes)
         expected_names = [
             'vgg_a/conv1/conv1_1', 'vgg_a/pool1', 'vgg_a/conv2/conv2_1',
             'vgg_a/pool2', 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',
             'vgg_a/pool3', 'vgg_a/conv4/conv4_1', 'vgg_a/conv4/conv4_2',
             'vgg_a/pool4', 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',
             'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'
         ]
         self.assertSetEqual(set(end_points.keys()), set(expected_names))
예제 #8
0
 def testEndPoints(self):
     batch_size = 5
     height, width = 224, 224
     num_classes = 1000
     for is_training in [True, False]:
         with ops.Graph().as_default():
             inputs = random_ops.random_uniform(
                 (batch_size, height, width, 3))
             _, end_points = vgg.vgg_a(inputs,
                                       num_classes,
                                       is_training=is_training)
             expected_names = [
                 'vgg_a/conv1/conv1_1', 'vgg_a/pool1',
                 'vgg_a/conv2/conv2_1', 'vgg_a/pool2',
                 'vgg_a/conv3/conv3_1', 'vgg_a/conv3/conv3_2',
                 'vgg_a/pool3', 'vgg_a/conv4/conv4_1',
                 'vgg_a/conv4/conv4_2', 'vgg_a/pool4',
                 'vgg_a/conv5/conv5_1', 'vgg_a/conv5/conv5_2',
                 'vgg_a/pool5', 'vgg_a/fc6', 'vgg_a/fc7', 'vgg_a/fc8'
             ]
             self.assertSetEqual(set(end_points.keys()),
                                 set(expected_names))
예제 #9
0
#       net = slim.dropout(net, 0.5, scope='dropout6')
#     net = slim.fully_connected(net, 4096, scope='fc7')
#     if is_training:
#       net = slim.dropout(net, 0.5, scope='dropout7')
#     net = slim.fully_connected(net, 300, activation_fn=None, scope='fc8')
#   return net

train_log_dir = './log'
if not tf.gfile.Exists(train_log_dir):
    tf.gfile.MakeDirs(train_log_dir)

with tf.Graph().as_default():
    # Set up the data loading:
    images, labels = zjltf.get_tfrecord(BATCH_SIZE, isTrain=True)
    # Define the model:
    predictions, endp = vgg.vgg_a(images, is_training=True)

    # Specify the loss function:
    #print(predictions)
    #print(endp)
    slim.losses.softmax_cross_entropy(predictions, labels)

    total_loss = slim.losses.get_total_loss()
    tf.summary.scalar('losses/total_loss', total_loss)

    # Specify the optimization scheme:
    optimizer = tf.train.GradientDescentOptimizer(learning_rate=.001)

    # create_train_op that ensures that when we evaluate it to get the loss,
    # the update_ops are done and the gradient updates are computed.
    train_tensor = slim.learning.create_train_op(total_loss, optimizer)