예제 #1
0
파일: sero_core.py 프로젝트: clover3/Chair
    def call(self, input_vectors, use_context):
        # input_vectors : [num_window, hidden_size]
        batch_size, seq_length, hidden_dim = bc.get_shape_list2(input_vectors)
        # Add position embedding
        input_vectors = bc.embedding_postprocessor2(
            input_tensor=input_vectors,
            token_type_table=self.token_type_table,
            full_position_embeddings=self.full_position_embeddings,
            use_token_type=False,
            token_type_ids=None,
            token_type_vocab_size=1,
            use_position_embeddings=True,
            max_position_embeddings=self.config.max_num_window,
            dropout_prob=self.config.hidden_dropout_prob)

        input_shape = [batch_size, seq_length]

        attention_mask = tf.ones([batch_size, seq_length, seq_length],
                                 tf.int32) * tf.expand_dims(use_context, 2)
        with tf.compat.v1.variable_scope("mid"):
            prev_output = bc.reshape_to_matrix(input_vectors)
            for layer_idx in range(self.n_layers):
                with tf.compat.v1.variable_scope("layer_%d" % layer_idx):
                    intermediate_output, prev_output = self.layer_list[
                        layer_idx].apply(prev_output, batch_size, seq_length,
                                         attention_mask)
                    final_output = bc.reshape_from_matrix2(
                        prev_output, input_shape)
                    self.all_layer_outputs.append(final_output)

        return prev_output
예제 #2
0
파일: units.py 프로젝트: clover3/Chair
    def apply(self, input_ids, segment_ids, initializer_range, vocab_size,
              hidden_size, type_vocab_size, max_position_embeddings,
              hidden_dropout_prob, use_one_hot_embeddings):
        initializer = bc.create_initializer(initializer_range)
        self.embedding_table = tf.compat.v1.get_variable(
            name="word_embeddings",
            shape=[vocab_size, hidden_size],
            initializer=initializer)
        self.token_type_table = tf.compat.v1.get_variable(
            name="token_type_embeddings",
            shape=[type_vocab_size, hidden_size],
            initializer=initializer)
        self.full_position_embeddings = tf.compat.v1.get_variable(
            name="position_embeddings",
            shape=[max_position_embeddings, hidden_size],
            initializer=initializer)

        # Perform embedding lookup on the word ids.
        (self.embedding_output, self.embedding_table) = bc.embedding_lookup2(
            input_ids=input_ids,
            embedding_table=self.embedding_table,
            vocab_size=vocab_size,
            embedding_size=hidden_size,
            use_one_hot_embeddings=use_one_hot_embeddings)

        # Add positional embeddings and token type embeddings, then layer
        # normalize and perform dropout.
        self.embedding_output = bc.embedding_postprocessor2(
            input_tensor=self.embedding_output,
            token_type_table=self.token_type_table,
            full_position_embeddings=self.full_position_embeddings,
            use_token_type=True,
            token_type_ids=segment_ids,
            token_type_vocab_size=type_vocab_size,
            use_position_embeddings=True,
            max_position_embeddings=max_position_embeddings,
            dropout_prob=hidden_dropout_prob)
        return self.embedding_output