コード例 #1
0
    def sharded_compute_loss(self, batch, output, shard_size, normalization):
        """Compute the forward loss and backpropagate.  Computation is done
        with shards and optionally truncation for memory efficiency.

        Also supports truncated BPTT for long sequences by taking a
        range in the decoder output sequence to back propagate in.
        Range is from `(cur_trunc, cur_trunc + trunc_size)`.

        Note sharding is an exact efficiency trick to relieve memory
        required for the generation buffers. Truncation is an
        approximate efficiency trick to relieve the memory required
        in the RNN buffers.

        Args:
          batch (batch) : batch of labeled examples
          output (:obj:`FloatTensor`) :
              output of decoder model `[tgt_len x batch x hidden]`
          attns (dict) : dictionary of attention distributions
              `[tgt_len x batch x src_len]`
          cur_trunc (int) : starting position of truncation window
          trunc_size (int) : length of truncation window
          shard_size (int) : maximum number of examples in a shard
          normalization (int) : Loss is divided by this number

        Returns:
            :obj:`onmt.utils.Statistics`: validation loss statistics

        """
        batch_stats = Statistics()
        shard_state = self._make_shard_state(batch, output)
        for shard in shards(shard_state, shard_size):
            loss, stats = self._compute_loss(batch, **shard)
            loss.div(float(normalization)).backward()
            batch_stats.update(stats)

        return batch_stats
コード例 #2
0
    def sharded_compute_loss(self,
                             batch,
                             output,
                             shard_size,
                             normalization,
                             copy_params=None):
        """Compute the forward loss and backpropagate.  Computation is done
        with shards and optionally truncation for memory efficiency.

        Also supports truncated BPTT for long sequences by taking a
        range in the decoder output sequence to back propagate in.
        Range is from `(cur_trunc, cur_trunc + trunc_size)`.

        Note sharding is an exact efficiency trick to relieve memory
        required for the generation buffers. Truncation is an
        approximate efficiency trick to relieve the memory required
        in the RNN buffers.

        Args:
          batch (batch) : batch of labeled examples
          output (:obj:`FloatTensor`) :
              output of decoder model `[tgt_len x batch x hidden]`
          attns (dict) : dictionary of attention distributions
              `[tgt_len x batch x src_len]`
          cur_trunc (int) : starting position of truncation window
          trunc_size (int) : length of truncation window
          shard_size (int) : maximum number of examples in a shard
          normalization (int) : Loss is divided by this number

        Returns:
            :obj:`onmt.utils.Statistics`: validation loss statistics

        """
        batch_stats = Statistics()
        # print("batch = ")
        # print(batch)
        shard_state = self._make_shard_state(batch, output, copy_params)
        # print("keys")
        # print(shard_state.keys())
        for shard in shards(shard_state, shard_size):
            # print("shard")
            # print(shard)
            output = shard['output']
            target = shard['target']
            if copy_params is not None:
                g = shard['copy_params[1]']
                ext_dist = shard['copy_params[0]']
                if len(shard) > 2:
                    ext_loss = shard['copy_params[2]']
                # print("ext_loss", ext_loss.size())
                # else:
                #     ext_loss = None
                # exit()
                # copy_params = (shard['copy_params[0]'], shard['copy_params[1]'])
                if len(copy_params) > 2:
                    loss, stats = self._compute_loss(batch, output, target, g,
                                                     ext_dist, ext_loss)
                else:
                    loss, stats = self._compute_loss(batch, output, target, g,
                                                     ext_dist)
            else:
                loss, stats = self._compute_loss(batch, output, target)
            # print("copy_params: ")
            # print(copy_params[0].size())
            # print(copy_params[0])
            # print(copy_params[1].size())
            # print(copy_params[1])
            # print("111111111111")
            # loss.div(float(normalization)).backward(retain_graph=True)
            # print("normalization = ", normalization)
            # print('copy2 = ', ext_loss.size())
            (loss.div(float(normalization)) + ext_loss.mean() * 2).backward()
            # print("loss1 ", loss.div(float(normalization)))
            # print("loss2 ", ext_loss.mean())
            # exit()
            batch_stats.update(stats)

        return batch_stats