def unwrap_sentence(self, sentences, transitions): # Build Tokens x = sentences # Build Transitions t = transitions example = Example() example.tokens = to_gpu(Variable(torch.from_numpy(x), volatile=not self.training)) example.transitions = t return example
def build_example(self, sentences, transitions): batch_size = sentences.shape[0] # Build Tokens x = sentences # Build Transitions t = transitions example = Example() example.tokens = to_gpu(Variable(torch.from_numpy(x), volatile=not self.training)) example.transitions = t return example
def unwrap_sentence_pair(self, sentences, transitions): # Build Tokens x_prem = sentences[:, :, 0] x_hyp = sentences[:, :, 1] x = np.concatenate([x_prem, x_hyp], axis=0) # Build Transitions t_prem = transitions[:, :, 0] t_hyp = transitions[:, :, 1] t = np.concatenate([t_prem, t_hyp], axis=0) example = Example() example.tokens = to_gpu(Variable(torch.from_numpy(x), volatile=not self.training)) example.transitions = t return example
def build_example(self, sentences, transitions): batch_size = sentences.shape[0] # sentences: (#batches, #feature, #2) # Build Tokens x_prem = sentences[:,:,0] x_hyp = sentences[:,:,1] x = np.concatenate([x_prem, x_hyp], axis=0) # Build Transitions t_prem = transitions[:,:,0] t_hyp = transitions[:,:,1] t = np.concatenate([t_prem, t_hyp], axis=0) example = Example() example.tokens = to_gpu(Variable(torch.from_numpy(x), volatile=not self.training)) example.transitions = t return example