Exemplo n.º 1
0
 def init(self, encoder_outputs):
     """Tile for the batch of the encoder inputs."""
     encoder_outputs = TransformerEncoderOutput(
         repeat_batch(encoder_outputs.output, self.K),
         repeat_batch(encoder_outputs.src_mask, self.K)
     )
     return encoder_outputs
Exemplo n.º 2
0
 def forward(self, encoder_output, hsz, beam_width=1):
     h_i = self.get_state(encoder_output)
     context = encoder_output.output
     if beam_width > 1:
         with torch.no_grad():
             context = repeat_batch(context, beam_width)
             if type(h_i) is tuple:
                 h_i = repeat_batch(h_i[0], beam_width, dim=1), repeat_batch(h_i[1], beam_width, dim=1)
             else:
                 h_i = repeat_batch(h_i, beam_width, dim=1)
     batch_size = context.shape[0]
     h_size = (batch_size, hsz)
     with torch.no_grad():
         init_zeros = context.data.new(*h_size).zero_()
     return h_i, init_zeros, context
Exemplo n.º 3
0
 def step(self, paths, _):
     probs = np.array(self.path_dist[self.i], dtype=np.float32)
     self.i += 1
     probs = probs.reshape((B, V))
     single = np.log(probs)
     single = torch.from_numpy(single)
     return repeat_batch(single, self.K), None
Exemplo n.º 4
0
 def init(self, encoder_outputs):
     """Tile batches for encoder inputs and the likes."""
     src_mask = repeat_batch(encoder_outputs.src_mask, self.K)
     h_i, dec_out, context = self.parent.arc_policy(
         encoder_outputs, self.parent.hsz, self.K)
     return h_i, dec_out, context, src_mask