コード例 #1
0
ファイル: coref_model.py プロジェクト: cbiehl/e2e-coref
  def lstm_contextualize(self, text_emb, text_len, text_len_mask):
    num_sentences = tf.shape(text_emb)[0]

    current_inputs = text_emb # [num_sentences, max_sentence_length, emb]

    for layer in range(self.config["contextualization_layers"]):
      with tf.variable_scope("layer_{}".format(layer)):
        with tf.variable_scope("fw_cell"):
          cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
        with tf.variable_scope("bw_cell"):
          cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
        state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
        state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))

        (fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
          cell_fw=cell_fw,
          cell_bw=cell_bw,
          inputs=current_inputs,
          sequence_length=text_len,
          initial_state_fw=state_fw,
          initial_state_bw=state_bw)

        text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
        text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)
        if layer > 0:
          highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]
          text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
        current_inputs = text_outputs

    return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
コード例 #2
0
    def encode_sentences(self, text_emb, text_len, text_len_mask):
        num_sentences = tf.shape(text_emb)[0]
        max_sentence_length = tf.shape(text_emb)[1]

        # Transpose before and after for efficiency.
        inputs = tf.transpose(
            text_emb, [1, 0, 2])  # [max_sentence_length, num_sentences, emb]

        with tf.variable_scope("fw_cell"):
            cell_fw = util.CustomLSTMCell(self.config["lstm_size"],
                                          num_sentences, self.dropout)
            preprocessed_inputs_fw = cell_fw.preprocess_input(inputs)
        with tf.variable_scope("bw_cell"):
            cell_bw = util.CustomLSTMCell(self.config["lstm_size"],
                                          num_sentences, self.dropout)
            preprocessed_inputs_bw = cell_bw.preprocess_input(inputs)
            preprocessed_inputs_bw = tf.reverse_sequence(
                preprocessed_inputs_bw,
                seq_lengths=text_len,
                seq_dim=0,
                batch_dim=1)
        state_fw = tf.contrib.rnn.LSTMStateTuple(
            tf.tile(cell_fw.initial_state.c, [num_sentences, 1]),
            tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
        state_bw = tf.contrib.rnn.LSTMStateTuple(
            tf.tile(cell_bw.initial_state.c, [num_sentences, 1]),
            tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))
        with tf.variable_scope("lstm"):
            with tf.variable_scope("fw_lstm"):
                fw_outputs, fw_states = tf.nn.dynamic_rnn(
                    cell=cell_fw,
                    inputs=preprocessed_inputs_fw,
                    sequence_length=text_len,
                    initial_state=state_fw,
                    time_major=True)
            with tf.variable_scope("bw_lstm"):
                bw_outputs, bw_states = tf.nn.dynamic_rnn(
                    cell=cell_bw,
                    inputs=preprocessed_inputs_bw,
                    sequence_length=text_len,
                    initial_state=state_bw,
                    time_major=True)

        bw_outputs = tf.reverse_sequence(bw_outputs,
                                         seq_lengths=text_len,
                                         seq_dim=0,
                                         batch_dim=1)

        text_outputs = tf.concat([fw_outputs, bw_outputs], 2)
        text_outputs = tf.transpose(
            text_outputs,
            [1, 0, 2])  # [num_sentences, max_sentence_length, emb]
        return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
コード例 #3
0
    def encode_sentences(self, text_emb, text_len, text_len_mask):
        num_sentences = text_emb.shape[0]
        max_sentence_length = text_emb.shape[1]

        # Transpose before and after for efficiency.
        inputs = tf.transpose(
            text_emb, [1, 0, 2])  # [max_sentence_length, num_sentences, emb]

        # with tf.variable_scope("fw_cell"):
        cell_fw = util.CustomLSTMCell(self.config["lstm_size"], num_sentences,
                                      self.dropout)
        preprocessed_inputs_fw = cell_fw.preprocess_input(inputs)
        # with tf.variable_scope("bw_cell"):
        cell_bw = util.CustomLSTMCell(self.config["lstm_size"], num_sentences,
                                      self.dropout)
        preprocessed_inputs_bw = cell_bw.preprocess_input(inputs)
        # preprocessed_inputs_bw = tf.reverse_sequence(preprocessed_inputs_bw, seq_lengths=text_len, seq_dim=0, batch_dim=1)
        preprocessed_inputs_bw = self.reverse_tensor(preprocessed_inputs_bw,
                                                     seq_lengths=text_len,
                                                     seq_dim=0,
                                                     batch_dim=1)

        state_fw = nn.LSTMCell(
            cell_fw.initial_state.c.repeat(num_sentences, 1),
            cell_fw.initial_state.h.repeat(num_sentences, 1))
        state_bw = nn.LSTMCell(
            cell_bw.initial_state.c.repeat([num_sentences, 1]),
            cell_bw.initial_state.h.repeat([num_sentences, 1]))
        # with tf.variable_scope("lstm"):
        #     with tf.variable_scope("fw_lstm"):
        # fw_outputs, fw_states = tf.nn.dynamic_rnn(cell=cell_fw, inputs=preprocessed_inputs_fw, sequence_length=text_len, initial_state=state_fw, time_major=True)
        fw_outputs, fw_states = cell_fw(preprocessed_inputs_fw, state_fw)

        # with tf.variable_scope("bw_lstm"):
        # bw_outputs, bw_states = tf.nn.dynamic_rnn(cell=cell_bw,inputs=preprocessed_inputs_bw,sequence_length=text_len,initial_state=state_bw,time_major=True)
        bw_outputs, bw_states = cell_bw(preprocessed_inputs_bw, state_bw)

        # bw_outputs = tf.reverse_sequence(bw_outputs, seq_lengths=text_len, seq_dim=0, batch_dim=1)
        bw_outputs = self.reverse_tensor(bw_outputs,
                                         seq_lengths=text_len,
                                         seq_dim=0,
                                         batch_dim=1)

        text_outputs = tf.cat([fw_outputs, bw_outputs], 2)
        text_outputs = tf.transpose(
            text_outputs,
            [1, 0, 2])  # [num_sentences, max_sentence_length, emb]
        return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
コード例 #4
0
  def lstm_contextualize(self, text_emb, text_len, text_len_mask):
    # self.a = text_emb
    # self.b = text_len
    # self.c = text_len_mask
    num_sentences = tf.shape(text_emb)[0]
    # text_emb = model.a
    # text_len = model.b
    # text_len_mask = model.c
    # num_sentences = tf.shape(text_emb)[0]
    # max_sentence_length = tf.shape(text_emb)[1]
    # session.run([num_sentences, max_sentence_length])
    # sentence_indices = tf.tile(tf.expand_dims(tf.range(num_sentences), 1), [1, max_sentence_length]) # [num_sentences, max_sentence_length]
    # x=  session.run([sentence_indices, num_sentences  ,  max_sentence_length , text_len_mask , text_len , text_emb ])
    # flattened_sentence_indices = model.flatten_emb_by_sentence(sentence_indices, text_len_mask) # [num_words]
    # flattened_text_emb = self..flatten_emb_by_sentence(text_emb, text_len_mask) # [num_words]
    # s = tf.shape(flattened_text_emb)
    # paddings = [[0, 500 - s[0]], [0, 0]]         
    # paddings = [[0, 0], [0, 4-tf.shape(t)[0]]]
    # paddings = tf.constant([[0, paddings_size-flattened_text_emb.shape[0],], [0, 0]])
    # padded_embd = tf.pad(flattened_text_emb, paddings, "CONSTANT")

    current_inputs = text_emb # [num_sentences, max_sentence_length, emb]

    for layer in range(self.config["contextualization_layers"]):
      with tf.variable_scope("layer_{}".format(layer)):
        with tf.variable_scope("fw_cell"):
          cell_fw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
        with tf.variable_scope("bw_cell"):
          cell_bw = util.CustomLSTMCell(self.config["contextualization_size"], num_sentences, self.lstm_dropout)
        state_fw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_fw.initial_state.c, [num_sentences, 1]), tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
        state_bw = tf.contrib.rnn.LSTMStateTuple(tf.tile(cell_bw.initial_state.c, [num_sentences, 1]), tf.tile(cell_bw.initial_state.h, [num_sentences, 1]))

        (fw_outputs, bw_outputs), _ = tf.nn.bidirectional_dynamic_rnn(
          cell_fw=cell_fw,
          cell_bw=cell_bw,
          inputs=current_inputs,
          sequence_length=text_len,
          initial_state_fw=state_fw,
          initial_state_bw=state_bw)

        text_outputs = tf.concat([fw_outputs, bw_outputs], 2) # [num_sentences, max_sentence_length, emb]
        text_outputs = tf.nn.dropout(text_outputs, self.lstm_dropout)
        if layer > 0:
          highway_gates = tf.sigmoid(util.projection(text_outputs, util.shape(text_outputs, 2))) # [num_sentences, max_sentence_length, emb]
          text_outputs = highway_gates * text_outputs + (1 - highway_gates) * current_inputs
        current_inputs = text_outputs

    return self.flatten_emb_by_sentence(text_outputs, text_len_mask)
コード例 #5
0
    def encode_sentences_unilstm(self, text_conv):
        num_sentences = tf.shape(text_conv)[0]
        max_sentence_length = tf.shape(text_conv)[1]

        # Transpose before and after for efficiency.
        inputs = tf.transpose(
            text_conv, [1, 0, 2])  # [max_sentence_length, num_sentences, emb]

        with tf.variable_scope("fw_cell_uni"):
            cell_fw = util.CustomLSTMCell(self.config["lstm_size"],
                                          num_sentences, self.dropout)
            preprocessed_inputs_fw = cell_fw.preprocess_input(inputs)
        state_fw = tf.contrib.rnn.LSTMStateTuple(
            tf.tile(cell_fw.initial_state.c, [num_sentences, 1]),
            tf.tile(cell_fw.initial_state.h, [num_sentences, 1]))
        with tf.variable_scope("lstm_uni"):
            with tf.variable_scope("fw_lstm_uni"):
                fw_outputs, fw_states = tf.nn.dynamic_rnn(
                    cell=cell_fw,
                    inputs=preprocessed_inputs_fw,
                    initial_state=state_fw,
                    time_major=True)

        text_outputs = tf.transpose(
            fw_outputs, [1, 0, 2])  # [num_sentences, max_sentence_length, emb]
        return text_outputs