示例#1
0
文件: layers.py 项目: kiminh/debias
 def apply(self, is_train, x, mask=None):
     if is_train:
         shape = get_shape_tuple(x)
         return tf.nn.dropout(x,
                              rate=self.dropout_rate,
                              noise_shape=[shape[0], 1, shape[2]])
     else:
         return x
示例#2
0
def words_to_char_ids(words, word_length, cpu=True):
    flat_words = tf.reshape(words, [-1])
    # Surprisingly, using a py_func here is much faster then the pure tensorflow option
    # Presumably because we have to .map in the tensorflow version which is very slow
    flat_char_ids = tf.py_func(lambda x: words_to_char_ids_py(x, word_length),
                               [flat_words], [tf.int32], False)
    return tf.reshape(flat_char_ids,
                      ops.get_shape_tuple(words) + [word_length])
示例#3
0
文件: layers.py 项目: kiminh/debias
    def apply(self, is_train, x, mask=None):
        x_shape = get_shape_tuple(x)
        dim = x_shape[-1]
        time = x_shape[-2]

        if mask is not None:
            x *= tf.expand_dims(tf.sequence_mask(mask, x_shape[1], tf.float32),
                                2)

        if self.leftpad:
            if self.same:
                raise ValueError()
            x = tf.pad(x, [[0, 0], [self.filter_size - 1, 0], [0, 0]])

        n_filters = self.num_filters
        if self.activation == "glu":
            n_filters *= 2

        if len(x_shape) != 3:
            x = tf.reshape(x, [-1, time, dim])

        filter_ = tf.get_variable("conv1d/filters",
                                  shape=[self.filter_size, dim, n_filters],
                                  dtype='float')
        out = tf.nn.conv1d(x, filter_, 1, "SAME" if self.same else "VALID")

        if self.activation is not None:
            bias = tf.get_variable("conv1d/bias",
                                   shape=[self.num_filters],
                                   dtype='float',
                                   initializer=tf.zeros_initializer())

            if self.activation == "glu":
                gates, lin = tf.split(out, 2, -1)
                out = tf.nn.sigmoid(gates + bias) * lin
            else:
                out = activation_fn(out + bias, self.activation)

        if len(x_shape) != 3:
            out = tf.reshape(out, x_shape[:-2] + get_shape_tuple(out)[-2:])
        return out
示例#4
0
def eval_triviaqa_op(logits, tokens, actual_answers, bound):
    """Tensorflow op to compute em/f1 scores using TriviaQA metrics"""
    answer_spans = ops.get_best_span(logits, bound)

    # Unlike SQuAD, for TriviaQA we don't bother properly untokenizing the
    # span, and just return the tokens with space seperators, since
    # that is almost always good enough for TriviaQA
    answer_text = tf.map_fn(lambda i: tf.reduce_join(
        tokens[i, answer_spans[i][0]:answer_spans[i][1] + 1
               ], 0, separator=" "),
                            tf.range(ops.get_shape_tuple(logits, 0)),
                            dtype=tf.string,
                            back_prop=False)
    scores = tf.py_func(_eval_triviaqa_decode, [answer_text, actual_answers],
                        tf.float32, False)
    scores.set_shape([logits.shape.as_list()[0], 2])
    return scores
示例#5
0
 def compute_clf_loss(self, hidden, logits, bias, labels, mask=None):
   loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels)
   label_one_hot = tf.one_hot(labels, ops.get_shape_tuple(logits, 1))
   weights = 1 - tf.reduce_sum(tf.exp(bias) * label_one_hot, 1)
   return tf.reduce_sum(weights*loss) / tf.reduce_sum(weights)
示例#6
0
    def apply(self, is_train, x, mask=None, inital_states=None):
        x_t = tf.transpose(x, [1, 0, 2])
        batch = ops.get_shape_tuple(x_t, 1)
        bidr = self.direction == "bi"

        if inital_states is not None:
            inital_states = tuple(tf.expand_dims(x, 0) for x in inital_states)

        if self.learn_initial_states and inital_states is None:
            if bidr:
                names = ["fw_h", "fw_c", "bw_h", "bw_c"]
            else:
                names = ["fw_h", "fw_c"]

            initial_states = []
            for n in names:
                v = tf.get_variable(n, (1, self.n_out), tf.float32,
                                    tf.zeros_initializer())
                initial_states.append(
                    tf.tile(tf.expand_dims(v, 1), [1, batch, 1]))
        else:
            initial_states = [
                tf.zeros((1, batch, self.n_out)) for _ in range(2 + 2 * bidr)
            ]

        if self.direction == 'bi':
            with tf.variable_scope("forward"):
                fw = self._apply_transposed(is_train,
                                            x_t,
                                            initial_states=initial_states[:2])
            with tf.variable_scope("backward"):
                x_bw = x_t[::-1] if mask is None else tf.reverse_sequence(
                    x_t, mask, 0, 1)
                bw = self._apply_transposed(is_train,
                                            x_bw,
                                            initial_states=initial_states[2:])
                bw = bw[::-1] if mask is None else tf.reverse_sequence(
                    bw, mask, 0, 1)
            out = tf.concat([fw, bw], axis=2)
        elif self.direction == "fw":
            out = self._apply_transposed(is_train,
                                         x_t,
                                         initial_states=initial_states)
        elif self.direction == "bw":
            x_bw = x_t[::-1] if mask is None else tf.reverse_sequence(
                x_t, mask, 0, 1)
            bw = self._apply_transposed(is_train,
                                        x_bw,
                                        initial_states=initial_states)
            out = bw[::-1] if mask is None else tf.reverse_sequence(
                bw, mask, 0, 1)
        else:
            raise ValueError()

        out = tf.transpose(out, [1, 0, 2])

        if mask is not None:
            out *= tf.expand_dims(
                tf.cast(tf.sequence_mask(mask,
                                         tf.shape(out)[1]), tf.float32), 2)
        return out
示例#7
0
 def get_logit_masked_scores(self, is_train, tensor_1, tensor_2, mask1,
                             mask2):
     atten = self.get_scores(is_train, tensor_1, tensor_2)
     dim1, dim2 = ops.get_shape_tuple(atten)[1:]
     mask = compute_attention_mask(mask1, mask2, dim1, dim2)
     return ops.mask_logits(atten, mask)
示例#8
0
  def _embed_words_from_ids(self, is_train, tensors, additional_wids=None):
    if self._input_vocab is None:
      raise NotImplementedError()
    wids = [x[0] for x in tensors]

    shapes = [ops.get_shape_tuple(x) for x in wids]
    unique_wids = [ops.flatten(x) for x in wids]
    sizes = [ops.get_shape_tuple(x, 0) for x in unique_wids]

    if additional_wids is not None:
      unique_wids.append(additional_wids)

    unique_wids = tf.concat(unique_wids, 0)
    wixs, w_mapping = tf.unique(ops.flatten(unique_wids), tf.int32)

    if additional_wids is not None:
      w_mapping = w_mapping[:-ops.get_shape_tuple(additional_wids)[0]]

    if self.use_word_vecs:
      if self.embed_cpu:
        with tf.device("/cpu:0"):
          embeding_var = ops.as_initialized_variable(self._embeddings, "embeddings")
          w_embed = [tf.nn.embedding_lookup(embeding_var, wixs)]
      else:
        embeding_var = ops.as_initialized_variable(self._embeddings, "embeddings")
        w_embed = [tf.nn.embedding_lookup(embeding_var, wixs)]
    else:
      w_embed = []

    if self.use_chars:
      with tf.device("/cpu:0"):
        cids = tf.gather(self._cached_char_ids, wixs)
        # dim = self._cached_char_ids.shape[1]
        # cids = tf.matmul(self._cached_char_ids,
        #                  tf.one_hot(wixs, dim, dtype=tf.int32))

      char_emb = char_encoder.embed_ids(cids, self.char_embed_dim)

      with tf.variable_scope("char-map"):
        char_emb = self.character_mapper.apply(is_train, char_emb)

      with tf.variable_scope("char-pool"):
        char_emb = self.character_pooler.apply(is_train, char_emb)

      w_embed.append(char_emb)

    w_embed = tf.concat(w_embed, 1)
    unique_word_embeddings = w_embed

    if self.word_mapper is not None:
      with tf.variable_scope("word-map"):
        w_embed = self.word_mapper.apply(is_train, w_embed)

    dim = w_embed.shape.as_list()[-1]

    # Undo tf.unique
    w_embeds = tf.gather(w_embed, w_mapping)

    # Undo tf.concat
    w_embeds = tf.split(w_embeds, sizes, 0)
    w_mapping = tf.split(w_mapping, sizes, 0)

    # Undo ops.flatten
    w_embeds = [tf.reshape(t, s + [dim]) for t, s in zip(w_embeds, shapes)]
    w_mapping = [tf.reshape(t, s) for t, s in zip(w_mapping, shapes)]
    return w_embeds, w_mapping, unique_word_embeddings