Пример #1
0
    def __init__(self,
                 pretrained_model_name: Optional[str] = None,
                 cache_dir: Optional[str] = None,
                 hparams=None):
        super().__init__(hparams=hparams)

        self.load_pretrained_config(pretrained_model_name, cache_dir)

        # Word embedding
        self.word_embedder = WordEmbedder(vocab_size=self._hparams.vocab_size,
                                          hparams=self._hparams.embed)

        # Segment embedding for each type of tokens
        self.segment_embedder = None
        if self._hparams.get('type_vocab_size', 0) > 0:
            self.segment_embedder = WordEmbedder(
                vocab_size=self._hparams.type_vocab_size,
                hparams=self._hparams.segment_embed)

        # Position embedding
        self.position_embedder = PositionEmbedder(
            position_size=self._hparams.position_size,
            hparams=self._hparams.position_embed)

        # The BERT encoder (a TransformerEncoder)
        self.encoder = TransformerEncoder(hparams=self._hparams.encoder)

        self.pooler = nn.Sequential(
            nn.Linear(self._hparams.hidden_size, self._hparams.hidden_size),
            nn.Tanh())

        self.init_pretrained_weights()
Пример #2
0
    def test_word_embedder_trainable(self):
        """Tests freezing the embedding parameters.
        """
        init_value = np.expand_dims(np.arange(5), 1)

        embedder = WordEmbedder(init_value=init_value,
                                hparams={"trainable": False})
        self.assertEqual(len(embedder.trainable_variables), 0)

        embedder = WordEmbedder(init_value=init_value)
        self.assertEqual(len(embedder.trainable_variables), 1)
Пример #3
0
    def _test_word_embedder(self, hparams):
        """Tests :class:`texar.torch.modules.WordEmbedder`.
        """
        embedder = WordEmbedder(vocab_size=100, hparams=hparams)

        inputs = torch.randint(embedder.vocab_size, (64, 16), dtype=torch.long)
        outputs = embedder(inputs)

        inputs_soft = torch.randn((64, 16, embedder.vocab_size),
                                  dtype=torch.float32)
        outputs_soft = embedder(soft_ids=inputs_soft)

        if isinstance(embedder.dim, (list, tuple)):
            emb_dim = tuple(embedder.dim)
        else:
            emb_dim = (embedder.dim, )

        if isinstance(hparams["dim"], (list, tuple)):
            hparams_dim = tuple(hparams["dim"])
        else:
            hparams_dim = (hparams["dim"], )

        self.assertEqual(outputs.size(), (64, 16) + emb_dim)
        self.assertEqual(outputs.size(-1), embedder.output_size)
        self.assertEqual(outputs_soft.size(), (64, 16) + emb_dim)
        self.assertEqual(outputs_soft.size(-1), embedder.output_size)
        self.assertEqual(emb_dim, hparams_dim)
        self.assertEqual(embedder.vocab_size, 100)
        self.assertEqual(outputs.size(), (64, 16) + emb_dim)
        self.assertEqual(outputs_soft.size(), (64, 16) + emb_dim)
Пример #4
0
 def setUp(self):
     self._vocab_size = 4
     self._max_time = 8
     self._batch_size = 16
     self._emb_dim = 20
     self._inputs = torch.randint(
         self._vocab_size, size=(self._batch_size, self._max_time))
     embedding = torch.rand(
         self._vocab_size, self._emb_dim, dtype=torch.float)
     self._embedder = WordEmbedder(init_value=embedding)
     self._hparams = HParams(None, BasicRNNDecoder.default_hparams())
Пример #5
0
    def test_word_embedder_soft_ids(self):
        """Tests the correctness of using soft ids.
        """
        init_value = np.expand_dims(np.arange(5), 1)
        embedder = WordEmbedder(init_value=init_value)

        ids = torch.tensor([3])
        soft_ids = torch.tensor([0, 0, 0, 1, 0], dtype=torch.float)

        outputs = embedder(ids=ids)
        soft_outputs = embedder(soft_ids=soft_ids)
        self.assertEqual(outputs, soft_outputs)
Пример #6
0
    def setUp(self):
        self._vocab_size = 10
        self._max_time = 16
        self._batch_size = 8
        self._emb_dim = 20
        self._attention_dim = 256
        self._inputs = torch.randint(
            self._vocab_size, size=(self._batch_size, self._max_time))
        embedding = torch.rand(
            self._vocab_size, self._emb_dim, dtype=torch.float)
        self._embedder = WordEmbedder(init_value=embedding)
        self._encoder_output = torch.rand(
            self._batch_size, self._max_time, 64)

        self._test_hparams = {}  # (cell_type, is_multi) -> hparams
        for cell_type in ["RNNCell", "LSTMCell", "GRUCell"]:
            hparams = {
                "rnn_cell": {
                    'type': cell_type,
                    'kwargs': {
                        'num_units': 256,
                    },
                },
                "attention": {
                    "kwargs": {
                        "num_units": self._attention_dim
                    },
                }
            }
            self._test_hparams[(cell_type, False)] = HParams(
                hparams, AttentionRNNDecoder.default_hparams())

        hparams = {
            "rnn_cell": {
                'type': 'LSTMCell',
                'kwargs': {
                    'num_units': 256,
                },
                'num_layers': 3,
            },
            "attention": {
                "kwargs": {
                    "num_units": self._attention_dim
                },
            }
        }
        self._test_hparams[("LSTMCell", True)] = HParams(
            hparams, AttentionRNNDecoder.default_hparams())
Пример #7
0
    def test_embedder_multi_calls(self):
        """Tests embedders called by multiple times.
        """
        hparams = {"dim": 26, "dropout_rate": 0.3, "dropout_strategy": "item"}
        embedder = WordEmbedder(vocab_size=100, hparams=hparams)
        inputs = torch.randint(embedder.vocab_size, (64, 16), dtype=torch.long)
        outputs = embedder(inputs)

        if isinstance(embedder.dim, (list, tuple)):
            emb_dim = tuple(embedder.dim)
        else:
            emb_dim = (embedder.dim, )
        self.assertEqual(outputs.size(), (64, 16) + emb_dim)

        # Call with inputs in a different shape
        inputs = torch.randint(embedder.vocab_size, (64, 10, 20),
                               dtype=torch.long)
        outputs = embedder(inputs)

        self.assertEqual(outputs.size(), (64, 10, 20) + emb_dim)
Пример #8
0
    def __init__(self,
                 pretrained_model_name: Optional[str] = None,
                 cache_dir: Optional[str] = None,
                 hparams=None):
        super().__init__(hparams=hparams)

        self.load_pretrained_config(pretrained_model_name, cache_dir)

        # Word embedding
        self.word_embedder = WordEmbedder(vocab_size=self._hparams.vocab_size,
                                          hparams=self._hparams.embed)

        # The encoder (a TransformerEncoder)
        self.encoder = T5Encoder(hparams=self._hparams.encoder)

        # The decoder (a TransformerDecoder)
        self.decoder = T5Decoder(token_embedder=self._embedding_fn,
                                 output_layer=Identity(),
                                 hparams=self._hparams.decoder)

        self.init_pretrained_weights()
Пример #9
0
    def __init__(self,
                 pretrained_model_name: Optional[str] = None,
                 cache_dir: Optional[str] = None,
                 hparams=None):
        super().__init__(hparams=hparams)

        self.load_pretrained_config(pretrained_model_name, cache_dir)

        # Word embedding
        self.word_embedder = WordEmbedder(vocab_size=self._hparams.vocab_size,
                                          hparams=self._hparams.embed)

        # Position embedding
        self.position_embedder = PositionEmbedder(
            position_size=self._hparams.position_size,
            hparams=self._hparams.position_embed)

        # The GPT2 encoder (a TransformerEncoder)
        self.encoder = TransformerEncoder(hparams=self._hparams.encoder)

        self.init_pretrained_weights(load_output_layer=False)
Пример #10
0
    def __init__(self, hparams=None):
        super().__init__(hparams=hparams)

        # Segment embedding for each type of tokens
        self.segment_embedder = None
        if self._hparams.get('type_vocab_size', 0) > 0:
            self.segment_embedder = WordEmbedder(
                vocab_size=self._hparams.type_vocab_size,
                hparams=self._hparams.segment_embed)

        # Position embedding
        self.position_embedder = PositionEmbedder(
            position_size=self._hparams.position_size,
            hparams=self._hparams.position_embed)

        # The BERT encoder (a TransformerEncoder)
        self.encoder = TransformerEncoder(hparams=self._hparams.encoder)

        self.pooler = nn.Sequential(
            nn.Linear(self._hparams.encoder.dim, self._hparams.hidden_size),
            nn.Tanh())
Пример #11
0
    def __init__(self,
                 pretrained_model_name: Optional[str] = None,
                 cache_dir: Optional[str] = None,
                 hparams=None):
        self.load_pretrained_config(pretrained_model_name, cache_dir, hparams)

        # Word embedding
        word_embedder = WordEmbedder(vocab_size=self._hparams.vocab_size,
                                     hparams=self._hparams.embed)

        # Position embedding
        position_embedder = PositionEmbedder(
            position_size=self._hparams.position_size,
            hparams=self._hparams.position_embed)

        # The GPT2 encoder (a TransformerEncoder)
        super().__init__(hparams=None)

        # Register modules after `__init__` is called.
        self.word_embedder = word_embedder
        self.position_embedder = position_embedder

        self.init_pretrained_weights(load_output_layer=False)