Exemplo n.º 1
0
 def batch_tensors(
         self,
         tensor_list: List[Dict[str,
                                torch.Tensor]]) -> Dict[str, torch.Tensor]:
     batched_text = nn_util.batch_tensor_dicts(
         tensor["text"] for tensor in tensor_list)  # type: ignore
     batched_linking = torch.stack(
         [tensor["linking"] for tensor in tensor_list])
     return {"text": batched_text, "linking": batched_linking}
Exemplo n.º 2
0
 def batch_tensors(
         self,
         tensor_list: List[Dict[str,
                                torch.Tensor]]) -> Dict[str, torch.Tensor]:
     # pylint: disable=no-self-use
     batched_text = nn_util.batch_tensor_dicts(
         tensor['text'] for tensor in tensor_list)  # type: ignore
     batched_linking = torch.stack(
         [tensor['linking'] for tensor in tensor_list])
     return {'text': batched_text, 'linking': batched_linking}
Exemplo n.º 3
0
 def batch_tensors(
         self,
         tensor_list: List[Dict[str,
                                torch.Tensor]]) -> Dict[str, torch.Tensor]:
     result: Dict[str, torch.Tensor] = util.batch_tensor_dicts(tensor_list)
     # Transformer models need LongTensors for indices, just in case we have more than 2 billion
     # different tokens. To save space, we make the switch as late as possible, i.e., here.
     result = {
         name: t.to(torch.int64) if t.dtype == torch.int32 else t
         for name, t in result.items()
     }
     return result
Exemplo n.º 4
0
 def batch_tensors(self, tensor_list: List[TextFieldTensors]) -> TextFieldTensors:
     # This is creating a dict of {token_indexer_name: {token_indexer_outputs: batched_tensor}}
     # for each token indexer used to index this field.
     indexer_lists: Dict[str, List[Dict[str, torch.Tensor]]] = defaultdict(list)
     for tensor_dict in tensor_list:
         for indexer_name, indexer_output in tensor_dict.items():
             indexer_lists[indexer_name].append(indexer_output)
     batched_tensors = {
         # NOTE(mattg): if an indexer has its own nested structure, rather than one tensor per
         # argument, then this will break.  If that ever happens, we should move this to an
         # `indexer.batch_tensors` method, with this logic as the default implementation in the
         # base class.
         indexer_name: util.batch_tensor_dicts(indexer_outputs)
         for indexer_name, indexer_outputs in indexer_lists.items()
     }
     return batched_tensors
Exemplo n.º 5
0
 def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
     # pylint: disable=no-self-use
     # This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used
     # to index this field.
     return util.batch_tensor_dicts(tensor_list)
Exemplo n.º 6
0
 def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
     # pylint: disable=no-self-use
     # This is creating a dict of {token_indexer_key: batch_tensor} for each token indexer used
     # to index this field.
     return util.batch_tensor_dicts(tensor_list)
Exemplo n.º 7
0
 def batch_tensors(self, tensor_list: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
     # pylint: disable=no-self-use
     batched_text = nn_util.batch_tensor_dicts(tensor['text'] for tensor in tensor_list)  # type: ignore
     batched_linking = torch.stack([tensor['linking'] for tensor in tensor_list])
     return {'text': batched_text, 'linking': batched_linking}