def testRaiseValueErrorWithInvalidDepthMultiplier(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    with self.assertRaises(ValueError):
      _ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
    with self.assertRaises(ValueError):
      _ = inception_v2.inception_v2(inputs, num_classes, depth_multiplier=0.0)
Example #2
0
    def testRaiseValueErrorWithInvalidDepthMultiplier(self):
        batch_size = 5
        height, width = 224, 224
        num_classes = 1000

        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        with self.assertRaises(ValueError):
            _ = inception_v2.inception_v2(inputs,
                                          num_classes,
                                          depth_multiplier=-0.1)
        with self.assertRaises(ValueError):
            _ = inception_v2.inception_v2(inputs,
                                          num_classes,
                                          depth_multiplier=0.0)
def network_inception_v2():
    input_shape = [1, 224, 224, 3]
    input_ = tf.placeholder(dtype=tf.float32, name='input', shape=input_shape)
    net, _end_points = inception_v2(input_,
                                    num_classes=1000,
                                    is_training=False)
    return net
  def testTrainEvalWithReuse(self):
    train_batch_size = 5
    eval_batch_size = 2
    height, width = 150, 150
    num_classes = 1000

    train_inputs = random_ops.random_uniform(
        (train_batch_size, height, width, 3))
    inception_v2.inception_v2(train_inputs, num_classes)
    eval_inputs = random_ops.random_uniform((eval_batch_size, height, width, 3))
    logits, _ = inception_v2.inception_v2(eval_inputs, num_classes, reuse=True)
    predictions = math_ops.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (eval_batch_size,))
Example #5
0
 def inception_v2_layer(self):
     out_dimension = self.w_len
     with slim.arg_scope(inception_v2_arg_scope()):
         out, end_points = inception_v2(inputs=self.img_input_expand,
                                        num_classes=out_dimension,
                                        dropout_keep_prob=self.keep_prob,
                                        is_training=self.is_training)
     return out
  def testLogitsNotSqueezed(self):
    num_classes = 25
    images = random_ops.random_uniform([1, 224, 224, 3])
    logits, _ = inception_v2.inception_v2(
        images, num_classes=num_classes, spatial_squeeze=False)

    with self.test_session() as sess:
      variables.global_variables_initializer().run()
      logits_out = sess.run(logits)
      self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
Example #7
0
    def _build(self, inputs, gt_embedding):
        scope = tf.get_variable_scope()
        scope.set_initializer(
            tf.random_uniform_initializer(-self.params["init_scale"],
                                          self.params["init_scale"]))

        max_input_seq_len = self.params["max_input_seq_length"]
        input_embeding_dim = self.params["input_embeding_dim"]
        batch_size = tf.shape(inputs)[0]

        # Patch or trim dynamic input to fixed size.
        # inputs data is RNN result in shape of [batch, seq_len, embedding_dim]
        # Patch with 0 or trim extra length and make the tensor shape to be
        # [batch, max_input_seq_len, embedding_dim]
        target_size = ops.convert_to_tensor(max_input_seq_len,
                                            dtype=dtypes.int32,
                                            name="max_input_seq_len")

        def _pad_fn(inputs, max_input_seq_len):
            target_pad = max_input_seq_len - tf.shape(inputs)[1]
            # inputs = tf.Print(inputs, [tf.shape(inputs), tf.transpose(inputs, [0, 2, 1])], message="input before pad: ", summarize = 1000)
            padded_inputs = tf.pad(inputs, [[0, 0], [0, target_pad], [0, 0]],
                                   "CONSTANT")
            # padded_inputs = tf.Print(padded_inputs, [tf.shape(padded_inputs), tf.transpose(padded_inputs, [0, 2, 1])], message="input after pad: ", summarize = 1000)
            return padded_inputs

        pad_inputs = tf.cond(tf.shape(inputs)[1] < max_input_seq_len,
                             lambda: _pad_fn(inputs, max_input_seq_len),
                             lambda: tf.identity(inputs),
                             name="maybe_padding")

        def _trim_fn(inputs, max_input_seq_len):
            return inputs

        final_inputs = tf.cond(tf.shape(pad_inputs)[1] > max_input_seq_len,
                               lambda: _trim_fn(pad_inputs, max_input_seq_len),
                               lambda: tf.identity(pad_inputs),
                               name="maybe_trimming")

        # Inception input must be rank 4: [batch, row, col, channel], we have to
        # manually expand our rank3 input.
        inception_input = tf.expand_dims(final_inputs, 3)
        # Input size for inception must be 224*224
        inception_input = tf.image.resize_images(
            images=inception_input,
            size=[224, 224],
            method=tf.image.ResizeMethod.BILINEAR)

        logits, _ = inception_v2(inputs=inception_input,
                                 num_classes=self.vocab_size,
                                 is_training=True)
        predictions = math_ops.cast(math_ops.argmax(logits, axis=-1),
                                    dtypes.int32)
        return DecoderOutput(logits=logits, predicted_ids=predictions), None
Example #8
0
    def testTrainEvalWithReuse(self):
        train_batch_size = 5
        eval_batch_size = 2
        height, width = 150, 150
        num_classes = 1000

        train_inputs = random_ops.random_uniform(
            (train_batch_size, height, width, 3))
        inception_v2.inception_v2(train_inputs, num_classes)
        eval_inputs = random_ops.random_uniform(
            (eval_batch_size, height, width, 3))
        logits, _ = inception_v2.inception_v2(eval_inputs,
                                              num_classes,
                                              reuse=True)
        predictions = math_ops.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (eval_batch_size, ))
  def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    _, end_points = inception_v2.inception_v2(inputs, num_classes)

    endpoint_keys = [
        key for key in end_points.keys()
        if key.startswith('Mixed') or key.startswith('Conv')
    ]

    _, end_points_with_multiplier = inception_v2.inception_v2(
        inputs, num_classes, scope='depth_multiplied_net', depth_multiplier=2.0)

    for key in endpoint_keys:
      original_depth = end_points[key].get_shape().as_list()[3]
      new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
      self.assertEqual(2.0 * original_depth, new_depth)
Example #10
0
    def testLogitsNotSqueezed(self):
        num_classes = 25
        images = random_ops.random_uniform([1, 224, 224, 3])
        logits, _ = inception_v2.inception_v2(images,
                                              num_classes=num_classes,
                                              spatial_squeeze=False)

        with self.test_session() as sess:
            variables.global_variables_initializer().run()
            logits_out = sess.run(logits)
            self.assertListEqual(list(logits_out.shape),
                                 [1, 1, 1, num_classes])
  def testHalfSizeImages(self):
    batch_size = 5
    height, width = 112, 112
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception_v2.inception_v2(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    pre_pool = end_points['Mixed_5c']
    self.assertListEqual(pre_pool.get_shape().as_list(),
                         [batch_size, 4, 4, 1024])
Example #12
0
    def testBuildClassificationNetwork(self):
        batch_size = 5
        height, width = 224, 224
        num_classes = 1000

        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        logits, end_points = inception_v2.inception_v2(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
        self.assertListEqual(logits.get_shape().as_list(),
                             [batch_size, num_classes])
        self.assertTrue('Predictions' in end_points)
        self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                             [batch_size, num_classes])
Example #13
0
    def testHalfSizeImages(self):
        batch_size = 5
        height, width = 112, 112
        num_classes = 1000

        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        logits, end_points = inception_v2.inception_v2(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
        self.assertListEqual(logits.get_shape().as_list(),
                             [batch_size, num_classes])
        pre_pool = end_points['Mixed_5c']
        self.assertListEqual(pre_pool.get_shape().as_list(),
                             [batch_size, 4, 4, 1024])
  def testBuildClassificationNetwork(self):
    batch_size = 5
    height, width = 224, 224
    num_classes = 1000

    inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, end_points = inception_v2.inception_v2(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
    self.assertListEqual(logits.get_shape().as_list(),
                         [batch_size, num_classes])
    self.assertTrue('Predictions' in end_points)
    self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
                         [batch_size, num_classes])
Example #15
0
    def inference(self):

        x = tf.reshape(self.x,
                       shape=[
                           -1, self.input_shape[0], self.input_shape[1],
                           self.input_shape[2]
                       ])
        with slim.arg_scope(inception_v2.inception_v2_arg_scope()):
            logits, end_points = inception_v2.inception_v2(
                x,
                num_classes=self.nclasses,
                is_training=self.is_training,
                spatial_squeeze=True)
        return logits
  def testEvaluation(self):
    batch_size = 2
    height, width = 224, 224
    num_classes = 1000

    eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
    logits, _ = inception_v2.inception_v2(
        eval_inputs, num_classes, is_training=False)
    predictions = math_ops.argmax(logits, 1)

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(predictions)
      self.assertEquals(output.shape, (batch_size,))
Example #17
0
    def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
        batch_size = 5
        height, width = 224, 224
        num_classes = 1000

        inputs = random_ops.random_uniform((batch_size, height, width, 3))
        _, end_points = inception_v2.inception_v2(inputs, num_classes)

        endpoint_keys = [
            key for key in end_points.keys()
            if key.startswith('Mixed') or key.startswith('Conv')
        ]

        _, end_points_with_multiplier = inception_v2.inception_v2(
            inputs,
            num_classes,
            scope='depth_multiplied_net',
            depth_multiplier=2.0)

        for key in endpoint_keys:
            original_depth = end_points[key].get_shape().as_list()[3]
            new_depth = end_points_with_multiplier[key].get_shape().as_list(
            )[3]
            self.assertEqual(2.0 * original_depth, new_depth)
  def testUnknownBatchSize(self):
    batch_size = 1
    height, width = 224, 224
    num_classes = 1000

    inputs = array_ops.placeholder(dtypes.float32, (None, height, width, 3))
    logits, _ = inception_v2.inception_v2(inputs, num_classes)
    self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
    self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
    images = random_ops.random_uniform((batch_size, height, width, 3))

    with self.test_session() as sess:
      sess.run(variables.global_variables_initializer())
      output = sess.run(logits, {inputs: images.eval()})
      self.assertEquals(output.shape, (batch_size, num_classes))
Example #19
0
    def testEvaluation(self):
        batch_size = 2
        height, width = 224, 224
        num_classes = 1000

        eval_inputs = random_ops.random_uniform((batch_size, height, width, 3))
        logits, _ = inception_v2.inception_v2(eval_inputs,
                                              num_classes,
                                              is_training=False)
        predictions = math_ops.argmax(logits, 1)

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(predictions)
            self.assertEquals(output.shape, (batch_size, ))
Example #20
0
def test_program():
    with tf.Graph().as_default():
        slim = tf.contrib.slim

        checkpoint_dir = '/home/taivu/workspace/NudityDetection/Trained_weight'

        input_tensor = tf.placeholder(tf.float32,
                                      shape=(None, 224, 224, 3),
                                      name='input_image')
        scaled_input_tensor = tf.scalar_mul((1.0 / 255), input_tensor)
        scaled_input_tensor = tf.sub(scaled_input_tensor, 0.5)
        scaled_input_tensor = tf.mul(scaled_input_tensor, 2.0)

        arg_scope = inception_v2_arg_scope()
        with slim.arg_scope(arg_scope):
            logits, end_points = inception_v2(scaled_input_tensor,
                                              is_training=False)

        init = tf.global_variables_initializer()

        saver = tf.train.Saver(tf.global_variables())

        coord = tf.train.Coordinator()

        with tf.Session() as sess:

            sess.run(init)

            threads = tf.train.start_queue_runners(sess, coord)

            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)

            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)

            im = Image.open(
                '/home/taivu/workspace/NudityDetection/dog.jpeg').resize(
                    (224, 224))
            im = np.array(im)
            im = im.reshape(1, 224, 224, 3)

            predict_values, logit_values = sess.run(
                [end_points['Predictions'], logits],
                feed_dict={input_tensor: im})

            print(np.max(predict_values), np.max(logit_values))

        coord.request_stop()
Example #21
0
    def testUnknownBatchSize(self):
        batch_size = 1
        height, width = 224, 224
        num_classes = 1000

        inputs = array_ops.placeholder(dtypes.float32,
                                       (None, height, width, 3))
        logits, _ = inception_v2.inception_v2(inputs, num_classes)
        self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
        self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
        images = random_ops.random_uniform((batch_size, height, width, 3))

        with self.test_session() as sess:
            sess.run(variables.global_variables_initializer())
            output = sess.run(logits, {inputs: images.eval()})
            self.assertEquals(output.shape, (batch_size, num_classes))
 def testUnknownImageShape(self):
   ops.reset_default_graph()
   batch_size = 2
   height, width = 224, 224
   num_classes = 1000
   input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
   with self.test_session() as sess:
     inputs = array_ops.placeholder(
         dtypes.float32, shape=(batch_size, None, None, 3))
     logits, end_points = inception_v2.inception_v2(inputs, num_classes)
     self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
     self.assertListEqual(logits.get_shape().as_list(),
                          [batch_size, num_classes])
     pre_pool = end_points['Mixed_5c']
     feed_dict = {inputs: input_np}
     variables.global_variables_initializer().run()
     pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
     self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
Example #23
0
 def testUnknownImageShape(self):
     ops.reset_default_graph()
     batch_size = 2
     height, width = 224, 224
     num_classes = 1000
     input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
     with self.test_session() as sess:
         inputs = array_ops.placeholder(dtypes.float32,
                                        shape=(batch_size, None, None, 3))
         logits, end_points = inception_v2.inception_v2(inputs, num_classes)
         self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
         self.assertListEqual(logits.get_shape().as_list(),
                              [batch_size, num_classes])
         pre_pool = end_points['Mixed_5c']
         feed_dict = {inputs: input_np}
         variables.global_variables_initializer().run()
         pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
         self.assertListEqual(list(pre_pool_out.shape),
                              [batch_size, 7, 7, 1024])