示例#1
0
    def testTrainableFalseIsTrainingFalse(self):
        embeddings = full_context_embeddings_cnn(self._images,
                                                 trainable=False,
                                                 is_training=False)
        self.assertEqual([self._batch_size, 64],
                         embeddings.get_shape().as_list())

        self._verifyParameterCounts()
        self._assertCollectionSize(16, tf.GraphKeys.VARIABLES)
        self._assertCollectionSize(0, tf.GraphKeys.TRAINABLE_VARIABLES)
        self._assertCollectionSize(0, tf.GraphKeys.UPDATE_OPS)
        self._assertCollectionSize(0, tf.GraphKeys.REGULARIZATION_LOSSES)
        self._assertCollectionSize(0, tf.GraphKeys.LOSSES)
        self._assertCollectionSize(4, tf.GraphKeys.SUMMARIES)
示例#2
0
    def build_fully_conditional_embedding_g(self):
        """Builds the fully conditional embedding g

        Inputs:
          self.sounds

        Outputs:
          self.g_embeddings
        """
        model_output = full_context_embeddings_cnn(
            self.support_set_sounds,
            trainable=self.train_model,
            is_training=self.is_training(),
            scope="g_embedding_support_vectors")

        self.g_model_variables = tf.get_collection(
            tf.GraphKeys.VARIABLES, scope="g_embedding_support_vectors")

        with tf.variable_scope("fce_embedding_g") as scope:
            # sound_embeddings = tf.contrib.layers.fully_connected(
            #     inputs=model_output,
            #     num_outputs=self.config.embedding_size,
            #     activation_fn=None,
            #     weights_initializer=self.initializer,
            #     biases_initializer=None,
            #     scope=scope)

            model_output = tf.expand_dims(model_output, 0)
            model_output = tf.unstack(model_output)

            cell_fw = tf.contrib.rnn.LSTMCell(self.config.embedding_size * 0.5,
                                              initializer=self.initializer,
                                              use_peepholes=True,
                                              state_is_tuple=True)
            # Backward direction cell
            cell_bw = tf.contrib.rnn.LSTMCell(self.config.embedding_size * 0.5,
                                              initializer=self.initializer,
                                              use_peepholes=True,
                                              state_is_tuple=True)

            (outputs, state,
             _) = tf.contrib.rnn.static_bidirectional_rnn(cell_fw,
                                                          cell_bw,
                                                          model_output,
                                                          dtype=tf.float32)

        self.g_embedding = outputs
示例#3
0
    def build_fully_conditional_embedding_f(self):
        """Builds the fully conditional embedding f

        Inputs:
          self.sounds

        Outputs:
          self.sound_embeddings
        """
        model_output = full_context_embeddings_cnn(
            self.test_sound,
            trainable=self.train_model,
            is_training=self.is_training(),
            scope="f_test_support_vector")

        self.f_model_variables = tf.get_collection(
            tf.GraphKeys.VARIABLES, scope="f_test_support_vector")

        with tf.variable_scope('fce_embedding_f',
                               initializer=self.initializer) as scope:
            # sound_embeddings = tf.contrib.layers.fully_connected(
            #     inputs=model_output,
            #     num_outputs=self.config.embedding_size,
            #     activation_fn=None,
            #     weights_initializer=self.initializer,
            #     biases_initializer=None,
            #     scope=scope)

            # Feed the test image embeddings to set the initial LSTM state.
            cell = tf.contrib.rnn.LSTMCell(
                num_units=self.config.embedding_size,
                state_is_tuple=False,
                use_peepholes=True)

            zero_state = cell.zero_state(
                batch_size=model_output.get_shape()[0], dtype=tf.float32)

            output, initial_state = cell(model_output, zero_state)
            output = tf.add(model_output, output)

            attention = tf.nn.softmax((tf.matmul(self.g_embedding[0],
                                                 tf.transpose(output))))
            read_out = tf.reduce_sum(tf.matmul(tf.transpose(attention),
                                               self.g_embedding[0]),
                                     0,
                                     keep_dims=True)
            h_concatenated = tf.concat([output, read_out], 1)

            scope.reuse_variables()
            # Embedding shared by the input and outputs.

            for i in xrange(self.config.lstm_processing_steps):
                output, initial_state = cell(model_output, h_concatenated)
                output = tf.add(model_output, output)
                attention = tf.nn.softmax((tf.matmul(self.g_embedding[0],
                                                     tf.transpose(output))))
                read_out = tf.reduce_sum(tf.matmul(tf.transpose(attention),
                                                   self.g_embedding[0]),
                                         0,
                                         keep_dims=True)
                h_concatenated = tf.concat([output, read_out], 1)

        self.f_embedding = output