コード例 #1
0
ファイル: recurrent.py プロジェクト: juliakreutzer/xnmt
    def transduce(
        self, es: 'expression_seqs.ExpressionSequence'
    ) -> 'expression_seqs.ExpressionSequence':
        mask = es.mask
        # first layer
        forward_es = self.forward_layers[0].transduce(es)
        rev_backward_es = self.backward_layers[0].transduce(
            expression_seqs.ReversedExpressionSequence(es))

        for layer_i in range(1, len(self.forward_layers)):
            new_forward_es = self.forward_layers[layer_i].transduce([
                forward_es,
                expression_seqs.ReversedExpressionSequence(rev_backward_es)
            ])
            rev_backward_es = expression_seqs.ExpressionSequence(
                self.backward_layers[layer_i].transduce([
                    expression_seqs.ReversedExpressionSequence(forward_es),
                    rev_backward_es
                ]).as_list(),
                mask=mask)
            forward_es = new_forward_es

        self._final_states = [
          transducers.FinalTransducerState(dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].main_expr(),
                                                           self.backward_layers[layer_i].get_final_states()[
                                                             0].main_expr()]),
                                           dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].cell_expr(),
                                                           self.backward_layers[layer_i].get_final_states()[
                                                             0].cell_expr()])) \
          for layer_i in range(len(self.forward_layers))]
        return expression_seqs.ExpressionSequence(expr_list=[
            dy.concatenate([forward_es[i], rev_backward_es[-i - 1]])
            for i in range(len(forward_es))
        ],
                                                  mask=mask)
コード例 #2
0
ファイル: pyramidal.py プロジェクト: yzhen-li/xnmt
  def transduce(self, es: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:
    """
    returns the list of output Expressions obtained by adding the given inputs
    to the current state, one by one, to both the forward and backward RNNs,
    and concatenating.

    Args:
      es: an ExpressionSequence
    """
    es_list = [es]

    for layer_i, (fb, bb) in enumerate(self.builder_layers):
      reduce_factor = self._reduce_factor_for_layer(layer_i)

      if es_list[0].mask is None: mask_out = None
      else: mask_out = es_list[0].mask.lin_subsampled(reduce_factor)

      if self.downsampling_method=="concat" and es_list[0].sent_len() % reduce_factor != 0:
        raise ValueError(f"For 'concat' subsampling, sequence lengths must be multiples of the total reduce factor, "
                         f"but got sequence length={es_list[0].sent_len()} for reduce_factor={reduce_factor}. "
                         f"Set Batcher's pad_src_to_multiple argument accordingly.")
      fs = fb.transduce(es_list)
      bs = bb.transduce([expression_seqs.ReversedExpressionSequence(es_item) for es_item in es_list])
      if layer_i < len(self.builder_layers) - 1:
        if self.downsampling_method=="skip":
          es_list = [expression_seqs.ExpressionSequence(expr_list=fs[::reduce_factor], mask=mask_out),
                     expression_seqs.ExpressionSequence(expr_list=bs[::reduce_factor][::-1], mask=mask_out)]
        elif self.downsampling_method=="concat":
          es_len = es_list[0].sent_len()
          es_list_fwd = []
          es_list_bwd = []
          for i in range(0, es_len, reduce_factor):
            for j in range(reduce_factor):
              if i==0:
                es_list_fwd.append([])
                es_list_bwd.append([])
              es_list_fwd[j].append(fs[i+j])
              es_list_bwd[j].append(bs[es_list[0].sent_len()-reduce_factor+j-i])
          es_list = [expression_seqs.ExpressionSequence(expr_list=es_list_fwd[j], mask=mask_out) for j in range(reduce_factor)] + \
                    [expression_seqs.ExpressionSequence(expr_list=es_list_bwd[j], mask=mask_out) for j in range(reduce_factor)]
        else:
          raise RuntimeError(f"unknown downsampling_method {self.downsampling_method}")
      else:
        # concat final outputs
        ret_es = expression_seqs.ExpressionSequence(
          expr_list=[tt.concatenate([f, b]) for f, b in zip(fs, expression_seqs.ReversedExpressionSequence(bs))], mask=mask_out)

    self._final_states = [transducers.FinalTransducerState(tt.concatenate([fb.get_final_states()[0].main_expr(),
                                                                           bb.get_final_states()[0].main_expr()]),
                                                           tt.concatenate([fb.get_final_states()[0].cell_expr(),
                                                                           bb.get_final_states()[0].cell_expr()])) \
                          for (fb, bb) in self.builder_layers]
    return ret_es
コード例 #3
0
    def transduce(self, es):
        mask = es.mask
        # first layer
        forward_es = self.forward_layers[0].transduce(es)
        rev_backward_es = self.backward_layers[0].transduce(
            expression_seqs.ReversedExpressionSequence(es))

        # TODO: concat input of each layer to its output; or, maybe just add standard residual connections
        for layer_i in range(1, len(self.forward_layers)):
            new_forward_es = self.forward_layers[layer_i].transduce([
                forward_es,
                expression_seqs.ReversedExpressionSequence(rev_backward_es)
            ])
            mask_out = mask
            if mask_out is not None and new_forward_es.mask.np_arr.shape != mask_out.np_arr.shape:
                mask_out = mask_out.lin_subsampled(trg_len=len(new_forward_es))
            rev_backward_es = expression_seqs.ExpressionSequence(
                self.backward_layers[layer_i].transduce([
                    expression_seqs.ReversedExpressionSequence(forward_es),
                    rev_backward_es
                ]).as_list(),
                mask=mask_out)
            forward_es = new_forward_es

        self._final_states = [
          transducers.FinalTransducerState(dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].main_expr(),
                                                           self.backward_layers[layer_i].get_final_states()[
                                                             0].main_expr()]),
                                           dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].cell_expr(),
                                                           self.backward_layers[layer_i].get_final_states()[
                                                             0].cell_expr()])) \
          for layer_i in range(len(self.forward_layers))]
        mask_out = mask
        if mask_out is not None and forward_es.mask.np_arr.shape != mask_out.np_arr.shape:
            mask_out = mask_out.lin_subsampled(trg_len=len(forward_es))
        return expression_seqs.ExpressionSequence(expr_list=[
            dy.concatenate([forward_es[i], rev_backward_es[-i - 1]])
            for i in range(len(forward_es))
        ],
                                                  mask=mask_out)
コード例 #4
0
ファイル: nin_lstm.py プロジェクト: seeledu/xnmt-devel
  def transduce(self, es: expression_seqs.ExpressionSequence) -> expression_seqs.ExpressionSequence:

    for layer_i, (fb, bb) in enumerate(self.lstm_layers):
      fs = fb.transduce(es)
      bs = bb.transduce(expression_seqs.ReversedExpressionSequence(es))
      interleaved = []

      if es.mask is None: mask = None
      else:
        mask = es.mask.lin_subsampled(0.5) # upsample the mask to encompass interleaved fwd / bwd expressions

      for pos in range(len(fs)):
        interleaved.append(fs[pos])
        interleaved.append(bs[-pos-1])
      
      projected = expression_seqs.ExpressionSequence(expr_list=interleaved, mask=mask)
      projected = self.nin_layers[layer_i].transduce(projected)
      assert math.ceil(len(es) / float(self.stride))==len(projected), \
        f"mismatched len(es)=={len(es)}, stride=={self.stride}, len(projected)=={len(projected)}"
      es = projected

    self._final_states = [transducers.FinalTransducerState(projected[-1])]
    return projected
コード例 #5
0
ファイル: lattice.py プロジェクト: juliakreutzer/xnmt
    def transduce(
        self, expr_sequence: expression_seqs.ExpressionSequence
    ) -> expression_seqs.ExpressionSequence:
        # first layer
        forward_es = self.forward_layers[0].transduce(expr_sequence)
        rev_backward_es = self.backward_layers[0].transduce(
            expression_seqs.ReversedExpressionSequence(expr_sequence))

        for layer_i in range(1, len(self.forward_layers)):
            concat_fwd = expression_seqs.ExpressionSequence(expr_list=[
                dy.concatenate([fwd_expr, bwd_expr])
                for fwd_expr, bwd_expr in zip(
                    forward_es.as_list(), reversed(rev_backward_es.as_list()))
            ])
            concat_bwd = expression_seqs.ExpressionSequence(expr_list=[
                dy.concatenate([fwd_expr, bwd_expr])
                for fwd_expr, bwd_expr in zip(reversed(forward_es.as_list()),
                                              rev_backward_es.as_list())
            ])
            new_forward_es = self.forward_layers[layer_i].transduce(concat_fwd)
            rev_backward_es = self.backward_layers[layer_i].transduce(
                concat_bwd)
            forward_es = new_forward_es

        self._final_states = [
          transducers.FinalTransducerState(dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].main_expr(),
                                                           self.backward_layers[layer_i].get_final_states()[
                                                             0].main_expr()]),
                                           dy.concatenate([self.forward_layers[layer_i].get_final_states()[0].cell_expr(),
                                                           self.backward_layers[layer_i].get_final_states()[
                                                             0].cell_expr()])) \
          for layer_i in range(len(self.forward_layers))]
        return expression_seqs.ExpressionSequence(expr_list=[
            dy.concatenate([forward_es[i], rev_backward_es[-i - 1]])
            for i in range(len(forward_es))
        ])