Beispiel #1
0
 def testVariablesSetDevice(self):
     batch_size = 5
     height, width = 299, 299
     num_classes = 1000
     inputs = tf.random_uniform((batch_size, height, width, 3))
     # Force all Variables to reside on the device.
     with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
         inception.inception_v4(inputs, num_classes)
     with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
         inception.inception_v4(inputs, num_classes)
     for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                scope='on_cpu'):
         self.assertDeviceEqual(v.device, '/cpu:0')
     for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                scope='on_gpu'):
         self.assertDeviceEqual(v.device, '/gpu:0')
Beispiel #2
0
 def testTrainEvalWithReuse(self):
     train_batch_size = 5
     eval_batch_size = 2
     height, width = 150, 150
     num_classes = 1000
     with self.test_session() as sess:
         train_inputs = tf.random_uniform(
             (train_batch_size, height, width, 3))
         inception.inception_v4(train_inputs, num_classes)
         eval_inputs = tf.random_uniform(
             (eval_batch_size, height, width, 3))
         logits, _ = inception.inception_v4(eval_inputs,
                                            num_classes,
                                            is_training=False,
                                            reuse=True)
         predictions = tf.argmax(logits, 1)
         sess.run(tf.global_variables_initializer())
         output = sess.run(predictions)
         self.assertEquals(output.shape, (eval_batch_size, ))
Beispiel #3
0
 def testBuildWithoutAuxLogits(self):
     batch_size = 5
     height, width = 299, 299
     num_classes = 1000
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, endpoints = inception.inception_v4(inputs,
                                                num_classes,
                                                create_aux_logits=False)
     self.assertFalse('AuxLogits' in endpoints)
     self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
Beispiel #4
0
 def testHalfSizeImages(self):
     batch_size = 5
     height, width = 150, 150
     num_classes = 1000
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, end_points = inception.inception_v4(inputs, num_classes)
     self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     pre_pool = end_points['Mixed_7d']
     self.assertListEqual(pre_pool.get_shape().as_list(),
                          [batch_size, 3, 3, 1536])
Beispiel #5
0
 def testEvaluation(self):
     batch_size = 2
     height, width = 299, 299
     num_classes = 1000
     with self.test_session() as sess:
         eval_inputs = tf.random_uniform((batch_size, height, width, 3))
         logits, _ = inception.inception_v4(eval_inputs,
                                            num_classes,
                                            is_training=False)
         predictions = tf.argmax(logits, 1)
         sess.run(tf.global_variables_initializer())
         output = sess.run(predictions)
         self.assertEquals(output.shape, (batch_size, ))
Beispiel #6
0
 def __call__(self, x_input):
   """Constructs model and return probabilities for given input."""
   reuse = True if self.built else None
   x_input = image_normalize(x_input, 'default')
   with slim.arg_scope(inception.inception_v4_arg_scope()):
     logit, end_points = inception.inception_v4(
         x_input, num_classes=self.num_classes, is_training=True,
         reuse=reuse)
   self.built = True
   output = end_points['Predictions']
   # Strip off the extra reshape op at the output
   probs = output.op.inputs[0]
   return logit
Beispiel #7
0
    def network(self):
        with slim.arg_scope(inception.inception_v4_arg_scope()):
            end_points = inception.inception_v4(inputs=self.x,
                                                num_classes=1001,
                                                is_training=self.is_training,
                                                dropout_keep_prob=self.keep)
            # print(end_points[1])
        net = end_points[1]['PreLogitsFlatten']
        print(net)
        self.net = tf.stop_gradient(net)  # 这层与之前的层都不进行梯度更新
        print(net.shape)

        with tf.variable_scope('D'):  #重新建立两层全连接层
            fc1 = slim.fully_connected(self.net,
                                       512,
                                       activation_fn=tf.nn.elu,
                                       scope='fc1')
            fc = slim.fully_connected(fc1,
                                      48,
                                      activation_fn=tf.nn.sigmoid,
                                      scope='coding_layer')
            self.y = slim.fully_connected(fc,
                                          self.num_classes,
                                          activation_fn=tf.nn.softmax,
                                          scope='output')

        tvars = tf.trainable_variables()  # 获取所有可以更新的变量
        d_params = [v for v in tvars if v.name.startswith('D/')
                    ]  # 只更新fc1,coding_layer与output的参数,其他的参数不更新

        #计算loss值
        self.cost = tf.losses.softmax_cross_entropy(onehot_labels=self.y_,
                                                    logits=self.y)
        tf.summary.scalar('loss', self.cost)
        self.global_step = tf.Variable(0, trainable=False)

        #迭代递减的学习速率
        lr = tf.train.exponential_decay(
            self.config.lr, self.global_step,
            self.config.total_nums / self.config.batch_size,
            self.config.decay_rate)
        #设置优化器
        self.train_op = tf.train.GradientDescentOptimizer(lr).minimize(
            loss=self.cost, global_step=self.global_step, var_list=d_params)
        #计算正确率
        self.correct_prediction = tf.equal(tf.argmax(self.y, 1),
                                           tf.argmax(self.y_, 1))
        self.accuracy = tf.reduce_mean(
            tf.cast(self.correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', self.accuracy)
Beispiel #8
0
 def testUnknownBatchSize(self):
     batch_size = 1
     height, width = 299, 299
     num_classes = 1000
     with self.test_session() as sess:
         inputs = tf.placeholder(tf.float32, (None, height, width, 3))
         logits, _ = inception.inception_v4(inputs, num_classes)
         self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
         self.assertListEqual(logits.get_shape().as_list(),
                              [None, num_classes])
         images = tf.random_uniform((batch_size, height, width, 3))
         sess.run(tf.global_variables_initializer())
         output = sess.run(logits, {inputs: images.eval()})
         self.assertEquals(output.shape, (batch_size, num_classes))
Beispiel #9
0
 def testAllEndPointsShapes(self):
     batch_size = 5
     height, width = 299, 299
     num_classes = 1000
     inputs = tf.random_uniform((batch_size, height, width, 3))
     _, end_points = inception.inception_v4(inputs, num_classes)
     endpoints_shapes = {
         'Conv2d_1a_3x3': [batch_size, 149, 149, 32],
         'Conv2d_2a_3x3': [batch_size, 147, 147, 32],
         'Conv2d_2b_3x3': [batch_size, 147, 147, 64],
         'Mixed_3a': [batch_size, 73, 73, 160],
         'Mixed_4a': [batch_size, 71, 71, 192],
         'Mixed_5a': [batch_size, 35, 35, 384],
         # 4 x Inception-A blocks
         'Mixed_5b': [batch_size, 35, 35, 384],
         'Mixed_5c': [batch_size, 35, 35, 384],
         'Mixed_5d': [batch_size, 35, 35, 384],
         'Mixed_5e': [batch_size, 35, 35, 384],
         # Reduction-A block
         'Mixed_6a': [batch_size, 17, 17, 1024],
         # 7 x Inception-B blocks
         'Mixed_6b': [batch_size, 17, 17, 1024],
         'Mixed_6c': [batch_size, 17, 17, 1024],
         'Mixed_6d': [batch_size, 17, 17, 1024],
         'Mixed_6e': [batch_size, 17, 17, 1024],
         'Mixed_6f': [batch_size, 17, 17, 1024],
         'Mixed_6g': [batch_size, 17, 17, 1024],
         'Mixed_6h': [batch_size, 17, 17, 1024],
         # Reduction-A block
         'Mixed_7a': [batch_size, 8, 8, 1536],
         # 3 x Inception-C blocks
         'Mixed_7b': [batch_size, 8, 8, 1536],
         'Mixed_7c': [batch_size, 8, 8, 1536],
         'Mixed_7d': [batch_size, 8, 8, 1536],
         # Logits and predictions
         'AuxLogits': [batch_size, num_classes],
         'PreLogitsFlatten': [batch_size, 1536],
         'Logits': [batch_size, num_classes],
         'Predictions': [batch_size, num_classes]
     }
     self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
     for endpoint_name in endpoints_shapes:
         expected_shape = endpoints_shapes[endpoint_name]
         self.assertTrue(endpoint_name in end_points)
         self.assertListEqual(
             end_points[endpoint_name].get_shape().as_list(),
             expected_shape)
Beispiel #10
0
 def testBuildLogits(self):
     batch_size = 5
     height, width = 299, 299
     num_classes = 1000
     inputs = tf.random_uniform((batch_size, height, width, 3))
     logits, end_points = inception.inception_v4(inputs, num_classes)
     auxlogits = end_points['AuxLogits']
     predictions = end_points['Predictions']
     self.assertTrue(auxlogits.op.name.startswith('InceptionV4/AuxLogits'))
     self.assertListEqual(auxlogits.get_shape().as_list(),
                          [batch_size, num_classes])
     self.assertTrue(logits.op.name.startswith('InceptionV4/Logits'))
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     self.assertTrue(
         predictions.op.name.startswith('InceptionV4/Logits/Predictions'))
     self.assertListEqual(predictions.get_shape().as_list(),
                          [batch_size, num_classes])