def build_image_embeddings(self):
    """Builds the image model subgraph and generates image embeddings.

    Inputs:
      self.images

    Outputs:
      self.image_embeddings
    """
    inception_output = image_embedding.inception_v3(
        self.images,
        trainable=self.train_inception,
        is_training=self.is_training())
    self.inception_variables = tf.get_collection(
        tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

    # Map inception output into embedding space.
    with tf.variable_scope("image_embedding") as scope:
      image_embeddings = tf.contrib.layers.fully_connected(
          inputs=inception_output,
          num_outputs=self.config.embedding_size,
          activation_fn=None,
          weights_initializer=self.initializer,
          biases_initializer=None,
          scope=scope)

    # Save the embedding size in the graph.
    tf.constant(self.config.embedding_size, name="embedding_size")

    self.image_embeddings = image_embeddings
Beispiel #2
0
  def build_image_embeddings(self):
    """Builds the image model subgraph and generates image embeddings.

    Inputs:
      self.images

    Outputs:
      self.image_embeddings
    """
    inception_output = image_embedding.inception_v3(
        self.images,
        trainable=self.train_inception,
        is_training=self.is_training())
    self.inception_variables = tf.get_collection(
        tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

    # Map inception output into embedding space.
    with tf.variable_scope("image_embedding") as scope:
      image_embeddings = tf.contrib.layers.fully_connected(
          inputs=inception_output,
          num_outputs=self.config.embedding_size,
          activation_fn=None,
          weights_initializer=self.initializer,
          biases_initializer=None,
          scope=scope)

    # Save the embedding size in the graph.
    tf.constant(self.config.embedding_size, name="embedding_size")

    self.image_embeddings = image_embeddings
Beispiel #3
0
    def build_image_embeddings(self):
        """Builds the image model subgraph and generates image embeddings.

        Inputs:
            self.images

        Outputs:
            self.image_embeddings
        """
        inception_output = image_embedding.inception_v3(
                self.images,
                trainable=self.train_inception,
                is_training=self.is_training())
        self.inception_variables = tf.get_collection(
                tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

        # Compute the average pool of the outputs from inception
        context_tensor = tf.reduce_mean(inception_output, axis=[1, 2])

        # Map inception output into embedding space.
        with tf.variable_scope("image_embedding"):

            image_embedding_map = tf.get_variable(
                    name="image_map",
                    shape=[context_tensor.shape[1], self.config.embedding_size],
                    initializer=self.initializer)
            image_embeddings = tf.tensordot(context_tensor, image_embedding_map, 1)

        # Save the embedding size in the graph.
        tf.constant(self.config.embedding_size, name="embedding_size")

        self.inception_output = inception_output
        self.image_embedding_map = image_embedding_map
        self.image_embeddings = image_embeddings
  def testTrainableTrueIsTrainingFalse(self):
    embeddings = image_embedding.inception_v3(
        self._images, trainable=True, is_training=False)
    self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())

    self._verifyParameterCounts()
    self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
    self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
    self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
    self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
    self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
    self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
  def testTrainableTrueIsTrainingFalse(self):
    embeddings = image_embedding.inception_v3(
        self._images, trainable=True, is_training=False)
    self.assertEqual([self._batch_size, 2048], embeddings.get_shape().as_list())

    self._verifyParameterCounts()
    self._assertCollectionSize(376, tf.GraphKeys.GLOBAL_VARIABLES)
    self._assertCollectionSize(188, tf.GraphKeys.TRAINABLE_VARIABLES)
    self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
    self._assertCollectionSize(94, tf.GraphKeys.REGULARIZATION_LOSSES)
    self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
    self._assertCollectionSize(23, tf.GraphKeys.SUMMARIES)
Beispiel #6
0
    def build_image_embeddings(self):
        """Builds the image model subgraph and generates image embeddings.

    Inputs:
      self.images

    Outputs:
      self.image_embeddings
    """

        if self.config.CNN_name == 'InceptionV3':
            cnn_output = image_embedding.inception_v3(
                self.images,
                trainable=self.train_inception,
                is_training=self.is_training())
        elif self.config.CNN_name == 'InceptionV4':
            cnn_output = image_embedding.inception_v4(
                self.images,
                trainable=self.train_inception,
                is_training=self.is_training())
        elif self.config.CNN_name == 'DenseNet':
            cnn_output = image_embedding.DenseNet(x=self.images,
                                                  nb_blocks=None,
                                                  filters=24,
                                                  training=self.is_training(),
                                                  scope='DenseNet').model
        elif self.config.CNN_name == 'ResNet':
            raise ValueError(
                "================== ResNet尚未实现! ====================" %
                self.config.CNN_name)
        else:
            raise ValueError(
                'CNN_name [%s] was not recognized.(InceptionV3 InceptionV4 DenseNet ResNet)'
                % self.config.CNN_name)

        self.inception_variables = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope=self.config.CNN_name)

        # Map inception output into embedding space.
        with tf.variable_scope("image_embedding") as scope:
            image_embeddings = tf.contrib.layers.fully_connected(
                inputs=cnn_output,
                num_outputs=self.config.embedding_size,
                activation_fn=None,
                weights_initializer=self.initializer,
                biases_initializer=None,
                scope=scope)

        # Save the embedding size in the graph.
        tf.constant(self.config.embedding_size, name="embedding_size")

        self.image_embeddings = image_embeddings
def make_image_embeddings(images):
    inception_output = image_embedding.inception_v3(images,
                                                    trainable=False,
                                                    is_training=True)

    initializer = tf.random_uniform_initializer(minval=-0.08, maxval=0.08)

    # Map inception output into embedding space.
    with tf.variable_scope("image_embedding") as scope:
        image_embeddings = tf.contrib.layers.fully_connected(
            inputs=inception_output,
            num_outputs=512,
            activation_fn=None,
            weights_initializer=initializer,
            biases_initializer=None,
            scope=scope)

    return inception_output, image_embeddings
Beispiel #8
0
    def build_image_embeddings(self):
        # Get image representation via Inception V3 model
        inception_output_0 = image_embedding.inception_v3(
            self.images_0,
            trainable=self.train_inception,
            is_training=self.is_training(),
            scope="InceptionV3")
        self.inception_variables = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

        inception_output_1 = image_embedding.inception_v3(
            self.images_1,
            trainable=self.train_inception,
            is_training=self.is_training(),
            scope="InceptionV3")
        self.inception_variables = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

        inception_output_2 = image_embedding.inception_v3(
            self.images_2,
            trainable=self.train_inception,
            is_training=self.is_training(),
            scope="InceptionV3")
        self.inception_variables = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

        inception_output_3 = image_embedding.inception_v3(
            self.images_3,
            trainable=self.train_inception,
            is_training=self.is_training(),
            scope="InceptionV3")
        self.inception_variables = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

        inception_output_4 = image_embedding.inception_v3(
            self.images_4,
            trainable=self.train_inception,
            is_training=self.is_training(),
            scope="InceptionV3")
        self.inception_variables = tf.get_collection(
            tf.GraphKeys.GLOBAL_VARIABLES, scope="InceptionV3")

        # Map inception output into embedding space.
        with tf.variable_scope("image_embedding") as scope:
            image_embeddings_0 = tf.contrib.layers.fully_connected(
                inputs=inception_output_0,
                num_outputs=self.config.embedding_size,
                activation_fn=None,
                weights_initializer=self.initializer,
                biases_initializer=None,
                scope=scope)

            scope.reuse_variables()

            image_embeddings_1 = tf.contrib.layers.fully_connected(
                inputs=inception_output_1,
                num_outputs=self.config.embedding_size,
                activation_fn=None,
                weights_initializer=self.initializer,
                biases_initializer=None,
                scope=scope)

            image_embeddings_2 = tf.contrib.layers.fully_connected(
                inputs=inception_output_2,
                num_outputs=self.config.embedding_size,
                activation_fn=None,
                weights_initializer=self.initializer,
                biases_initializer=None,
                scope=scope)

            image_embeddings_3 = tf.contrib.layers.fully_connected(
                inputs=inception_output_3,
                num_outputs=self.config.embedding_size,
                activation_fn=None,
                weights_initializer=self.initializer,
                biases_initializer=None,
                scope=scope)

            image_embeddings_4 = tf.contrib.layers.fully_connected(
                inputs=inception_output_4,
                num_outputs=self.config.embedding_size,
                activation_fn=None,
                weights_initializer=self.initializer,
                biases_initializer=None,
                scope=scope)

        tf.constant(self.config.embedding_size, name="embedding_size")

        self.image_embeddings_0 = image_embeddings_0
        self.image_embeddings_1 = image_embeddings_1
        self.image_embeddings_2 = image_embeddings_2
        self.image_embeddings_3 = image_embeddings_3
        self.image_embeddings_4 = image_embeddings_4