Example #1
0
    def test_read_embedding_file_inside_archive(self):
        token2vec = {
                "think": torch.Tensor([0.143, 0.189, 0.555, 0.361, 0.472]),
                "make": torch.Tensor([0.878, 0.651, 0.044, 0.264, 0.872]),
                "difference": torch.Tensor([0.053, 0.162, 0.671, 0.110, 0.259]),
                "àèìòù": torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0])
                }
        vocab = Vocabulary()
        for token in token2vec:
            vocab.add_token_to_namespace(token)

        params = Params({
                'pretrained_file': str(self.FIXTURES_ROOT / 'embeddings/multi-file-archive.zip'),
                'embedding_dim': 5
                })
        with pytest.raises(ValueError,
                           match="The archive .*/embeddings/multi-file-archive.zip contains multiple files, "
                                 "so you must select one of the files inside "
                                 "providing a uri of the type: "
                                 "\\(path_or_url_to_archive\\)#path_inside_archive\\."):
            Embedding.from_params(vocab, params)

        for ext in ['.zip', '.tar.gz']:
            archive_path = str(self.FIXTURES_ROOT / 'embeddings/multi-file-archive') + ext
            file_uri = format_embeddings_file_uri(archive_path, 'folder/fake_embeddings.5d.txt')
            params = Params({
                    'pretrained_file': file_uri,
                    'embedding_dim': 5
                    })
            embeddings = Embedding.from_params(vocab, params).weight.data
            for tok, vec in token2vec.items():
                i = vocab.get_token_index(tok)
                assert torch.equal(embeddings[i], vec), 'Problem with format ' + archive_path
Example #2
0
    def __init__(self, vocab, use_postags_only=True, embed_dim=100, hidden_size=200, recurrent_dropout_probability=0.3,
                 use_highway=False,
                 maxpool=True):
        super(BLSTMModel, self).__init__()

        self.embeds = Embedding.from_params(
            vocab,
            Params({'vocab_namespace': 'pos' if use_postags_only else 'tokens',
                    'embedding_dim': embed_dim,
                    'trainable': True,
                    'padding_index': 0,
                    'pretrained_file': None if use_postags_only else 'https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz',
                    }))
        self.binary_feature_embedding = Embedding(2, embed_dim)

        self.fwd_lstm = PytorchSeq2SeqWrapper(AugmentedLstm(
            input_size=embed_dim * 2, hidden_size=hidden_size, go_forward=True,
            recurrent_dropout_probability=recurrent_dropout_probability,
            use_input_projection_bias=False, use_highway=use_highway), stateful=False)

        self.bwd_lstm = PytorchSeq2SeqWrapper(AugmentedLstm(
            input_size=embed_dim * 2, hidden_size=hidden_size, go_forward=False,
            recurrent_dropout_probability=recurrent_dropout_probability,
            use_input_projection_bias=False, use_highway=use_highway), stateful=False)

        self.maxpool = maxpool
        self.fc = nn.Linear(hidden_size * 2, 1, bias=False)
Example #3
0
    def test_read_embedding_file_inside_archive(self):
        token2vec = {
                "think": torch.Tensor([0.143, 0.189, 0.555, 0.361, 0.472]),
                "make": torch.Tensor([0.878, 0.651, 0.044, 0.264, 0.872]),
                "difference": torch.Tensor([0.053, 0.162, 0.671, 0.110, 0.259]),
                "àèìòù": torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0])
                }
        vocab = Vocabulary()
        for token in token2vec:
            vocab.add_token_to_namespace(token)

        params = Params({
                'pretrained_file': str(self.FIXTURES_ROOT / 'embeddings/multi-file-archive.zip'),
                'embedding_dim': 5
                })
        with pytest.raises(ValueError, message="No ValueError when pretrained_file is a multi-file archive"):
            Embedding.from_params(vocab, params)

        for ext in ['.zip', '.tar.gz']:
            archive_path = str(self.FIXTURES_ROOT / 'embeddings/multi-file-archive') + ext
            file_uri = format_embeddings_file_uri(archive_path, 'folder/fake_embeddings.5d.txt')
            params = Params({
                    'pretrained_file': file_uri,
                    'embedding_dim': 5
                    })
            embeddings = Embedding.from_params(vocab, params).weight.data
            for tok, vec in token2vec.items():
                i = vocab.get_token_index(tok)
                assert torch.equal(embeddings[i], vec), 'Problem with format ' + archive_path
Example #4
0
    def test_read_embedding_file_inside_archive(self):
        token2vec = {
                u"think": torch.Tensor([0.143, 0.189, 0.555, 0.361, 0.472]),
                u"make": torch.Tensor([0.878, 0.651, 0.044, 0.264, 0.872]),
                u"difference": torch.Tensor([0.053, 0.162, 0.671, 0.110, 0.259]),
                u"àèìòù": torch.Tensor([1.0, 2.0, 3.0, 4.0, 5.0])
                }
        vocab = Vocabulary()
        for token in token2vec:
            vocab.add_token_to_namespace(token)

        params = Params({
                u'pretrained_file': unicode(self.FIXTURES_ROOT / u'embeddings/multi-file-archive.zip'),
                u'embedding_dim': 5
                })
        with pytest.raises(ValueError, message=u"No ValueError when pretrained_file is a multi-file archive"):
            Embedding.from_params(vocab, params)

        for ext in [u'.zip', u'.tar.gz']:
            archive_path = unicode(self.FIXTURES_ROOT / u'embeddings/multi-file-archive') + ext
            file_uri = format_embeddings_file_uri(archive_path, u'folder/fake_embeddings.5d.txt')
            params = Params({
                    u'pretrained_file': file_uri,
                    u'embedding_dim': 5
                    })
            embeddings = Embedding.from_params(vocab, params).weight.data
            for tok, vec in list(token2vec.items()):
                i = vocab.get_token_index(tok)
                assert torch.equal(embeddings[i], vec), u'Problem with format ' + archive_path
Example #5
0
    def __init__(self,
                 input_dim: int,
                 forward_combination: str = "y-x",
                 backward_combination: str = "y-x",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_sentinels: bool = True) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._forward_combination = forward_combination
        self._backward_combination = backward_combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        if self._input_dim % 2 != 0:
            raise ConfigurationError("The input dimension is not divisible by 2, but the "
                                     "BidirectionalEndpointSpanExtractor assumes the embedded representation "
                                     "is bidirectional (and hence divisible by 2).")
        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)
        elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError("To use a span width embedding representation, you must"
                                     "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None

        self._use_sentinels = use_sentinels
        if use_sentinels:
            self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
            self._end_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
Example #6
0
    def test_embedding_vocab_extension_is_no_op_when_extension_should_not_happen(
            self):
        # Case1: When vocab is already in sync with embeddings it should be a no-op.
        vocab = Vocabulary({"tokens": {"word1": 1, "word2": 1}})
        embedding_params = Params({
            "vocab_namespace": "tokens",
            "embedding_dim": 10
        })
        embedder = Embedding.from_params(embedding_params, vocab=vocab)
        original_weight = embedder.weight
        embedder.extend_vocab(vocab, "tokens")
        assert torch.all(embedder.weight == original_weight)

        # Case2: Shouldn't wrongly assuming "tokens" namespace for extension if no
        # information on vocab_namespece is available. Rather log a warning and be a no-op.
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word1", "tokens")
        vocab.add_token_to_namespace("word2", "tokens")
        embedding_params = Params({
            "vocab_namespace": "tokens",
            "embedding_dim": 10
        })
        embedder = Embedding.from_params(embedding_params, vocab=vocab)
        # Previous models won't have _vocab_namespace attribute. Force it to be None
        embedder._vocab_namespace = None
        embedder.weight = torch.nn.Parameter(embedder.weight[:1, :])
        assert embedder.weight.shape[0] == 1
        embedder.extend_vocab(vocab)  # Don't specify namespace
        assert embedder.weight.shape[0] == 1
Example #7
0
    def __init__(self,
                 input_dim: int,
                 combination: str = "max",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_exclusive_start_indices: bool = False) -> None:
        super().__init__()

        self._input_dim = input_dim
        self._combination = combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths
        if bucket_widths:
            raise ConfigurationError("not support")

        self._use_exclusive_start_indices = use_exclusive_start_indices
        if use_exclusive_start_indices:
            raise ConfigurationError("not support")

        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings,
                                                   span_width_embedding_dim)
        elif not all(
            [num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError(
                "To use a span width embedding representation, you must"
                "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None

        # the allennlp SelfAttentiveSpanExtractor doesn't include span width embedding.
        self._self_attentive = SelfAttentiveSpanExtractor(self._input_dim)
Example #8
0
    def __init__(
        self,
        input_dim: int,
        combination: str = "x,y",
        num_width_embeddings: int = None,
        span_width_embedding_dim: int = None,
        bucket_widths: bool = False,
        use_exclusive_start_indices: bool = False,
    ) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._combination = combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        self._use_exclusive_start_indices = use_exclusive_start_indices
        if use_exclusive_start_indices:
            self._start_sentinel = Parameter(
                torch.randn([1, 1, int(input_dim)]))

        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings,
                                                   span_width_embedding_dim)
        elif not all(
            [num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError(
                "To use a span width embedding representation, you must"
                "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None
    def from_params(cls,params: Params, vocab: Vocabulary) -> 'Embedding':  # type: ignore

        cuda_device = params.pop("cuda_device",-1)
        use_glove_embedding = params.pop("use_glove_embedding", False)
        #glove_dimension_size = params.pop("glove_dimension_size",300)
        use_elmo_embedding = params.pop("use_elmo_embedding", False)
        use_verb_index_embedding = params.pop("use_verb_index_embedding",False)
        verb_index_embedding_dimension = params.pop("verb_index_embedding_dimension",50)
        use_visual_score_embedding = params.pop("use_visual_score_embedding",False)

        num_embeddings = vocab.get_vocab_size() #0 = padding, 1 = unknow, the rest is vocabulary
        embedding_dim = 0
        
        # test if to use elmo embedding 
        if use_elmo_embedding:
            elmo_token_embedder = Elmo.from_params(params.pop("elmo"))
            embedding_dim = embedding_dim + elmo_token_embedder.get_output_dim() # current dimension for elmo embedding - 512*2 = 1024 
        else:
            elmo_token_embedder = None

        if use_glove_embedding:
            # glove_embeddings an Embeddings with dimension of 300
            #glove_embedder = get_glove_embedder(num_embeddings,glove_dimension_size,vocab)
            glove_embedder = Embedding.from_params(vocab, params.pop("glove_embedder"))
            embedding_dim = embedding_dim + glove_embedder.get_output_dim()
        else:
            glove_embedder = None
			
        if use_verb_index_embedding:
            # suffix_embeddings: need two elements for 0 (non-metaphore) and 1 (is metaphore)
            verb_index_embedder = Embedding(2, verb_index_embedding_dimension)
            embedding_dim = embedding_dim + verb_index_embedder.get_output_dim() # for suffix embedding
        else:
            verb_index_embedder = None
			
        if use_visual_score_embedding:
            # use pretrained weight matrix
            visual_score_embedder = Embedding.from_params(vocab, params.pop("visual_embedder"))
            embedding_dim = embedding_dim + visual_score_embedder.get_output_dim()
        else:
            visual_score_embedder = None
			
        if cuda_device == -1:
            is_gpu = False
        else:
            is_gpu = True
	
        return cls(num_embeddings=num_embeddings,embedding_dim=embedding_dim, glove_embedder=glove_embedder, 
                    elmo_embedder=elmo_token_embedder, verb_index_embedder=verb_index_embedder, 
                    visual_score_embedder=visual_score_embedder,is_gpu=is_gpu)
Example #10
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'TreeAttention':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)

        premise_encoder_params = params.pop("premise_encoder", None)
        premise_encoder = Seq2SeqEncoder.from_params(premise_encoder_params)

        attention_similarity = SimilarityFunction.from_params(params.pop('attention_similarity'))
        phrase_probability = FeedForward.from_params(params.pop('phrase_probability'))
        edge_probability = FeedForward.from_params(params.pop('edge_probability'))

        edge_embedding = Embedding.from_params(vocab, params.pop('edge_embedding'))
        use_encoding_for_node = params.pop('use_encoding_for_node')
        ignore_edges = params.pop('ignore_edges', False)

        init_params = params.pop('initializer', None)
        initializer = (InitializerApplicator.from_params(init_params)
                       if init_params is not None
                       else InitializerApplicator())

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   phrase_probability=phrase_probability,
                   edge_probability=edge_probability,
                   premise_encoder=premise_encoder,
                   edge_embedding=edge_embedding,
                   use_encoding_for_node=use_encoding_for_node,
                   attention_similarity=attention_similarity,
                   ignore_edges=ignore_edges,
                   initializer=initializer)
Example #11
0
    def test_embedding_constructed_directly_with_pretrained_file(self):

        vocab = Vocabulary()
        vocab.add_token_to_namespace("word")
        vocab.add_token_to_namespace("word2")
        unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
        vocab.add_token_to_namespace(unicode_space)
        embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
        with gzip.open(embeddings_filename, "wb") as embeddings_file:
            embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
            embeddings_file.write(
                f"{unicode_space} 3.4 3.3 5.0\n".encode("utf-8"))

        num_embeddings = vocab.get_vocab_size()
        embedding_layer = Embedding(
            embedding_dim=3,
            num_embeddings=num_embeddings,
            pretrained_file=embeddings_filename,
            vocab=vocab,
        )
        word_vector = embedding_layer.weight.data[vocab.get_token_index(
            "word")]
        assert numpy.allclose(word_vector.numpy(),
                              numpy.array([1.0, 2.3, -1.0]))
        word_vector = embedding_layer.weight.data[vocab.get_token_index(
            unicode_space)]
        assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3,
                                                                5.0]))
        word_vector = embedding_layer.weight.data[vocab.get_token_index(
            "word2")]
        assert not numpy.allclose(word_vector.numpy(),
                                  numpy.array([1.0, 2.3, -1.0]))
Example #12
0
    def test_embedding_vocab_extension_without_stored_namespace(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word1", "tokens_a")
        vocab.add_token_to_namespace("word2", "tokens_a")
        embedding_params = Params({
            "vocab_namespace": "tokens_a",
            "embedding_dim": 10
        })
        embedder = Embedding.from_vocab_or_file(
            vocab, **embedding_params.as_dict(quiet=True))

        # Previous models won't have _vocab_namespace attribute. Force it to be None
        embedder._vocab_namespace = None
        original_weight = embedder.weight

        assert original_weight.shape[0] == 4

        extension_counter = {"tokens_a": {"word3": 1}}
        vocab._extend(extension_counter)

        embedder.extend_vocab(vocab, "tokens_a")  # specified namespace

        extended_weight = embedder.weight
        assert extended_weight.shape[0] == 5
        assert torch.all(extended_weight[:4, :] == original_weight[:4, :])
Example #13
0
    def test_embedding_vocab_extension_works_with_pretrained_embedding_file(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace('word1')
        vocab.add_token_to_namespace('word2')

        embeddings_filename = str(self.TEST_DIR / "embeddings2.gz")
        with gzip.open(embeddings_filename, 'wb') as embeddings_file:
            embeddings_file.write("word3 0.5 0.3 -6.0\n".encode('utf-8'))
            embeddings_file.write("word4 1.0 2.3 -1.0\n".encode('utf-8'))
            embeddings_file.write("word2 0.1 0.4 -4.0\n".encode('utf-8'))
            embeddings_file.write("word1 1.0 2.3 -1.0\n".encode('utf-8'))

        embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 3,
                                   "pretrained_file": embeddings_filename})
        embedder = Embedding.from_params(vocab, embedding_params)

        # Change weight to simulate embedding training
        embedder.weight.data += 1
        assert torch.all(embedder.weight[2:, :] == torch.Tensor([[2.0, 3.3, 0.0], [1.1, 1.4, -3.0]]))
        original_weight = embedder.weight

        assert tuple(original_weight.size()) == (4, 3)  # 4 because of padding and OOV

        vocab.add_token_to_namespace('word3')
        embedder.extend_vocab(vocab, extension_pretrained_file=embeddings_filename) # default namespace
        extended_weight = embedder.weight

        # Make sure extenstion happened for extra token in extended vocab
        assert tuple(extended_weight.size()) == (5, 3)

        # Make sure extension doesn't change original trained weights.
        assert torch.all(original_weight[:4, :] == extended_weight[:4, :])

        # Make sure extended weight is taken from the embedding file.
        assert torch.all(extended_weight[4, :] == torch.Tensor([0.5, 0.3, -6.0]))
Example #14
0
 def test_embedding_layer_actually_initializes_word_vectors_correctly(self):
     vocab = Vocabulary()
     vocab.add_token_to_namespace("word")
     vocab.add_token_to_namespace("word2")
     unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
     vocab.add_token_to_namespace(unicode_space)
     embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
     with gzip.open(embeddings_filename, "wb") as embeddings_file:
         embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
         embeddings_file.write(
             f"{unicode_space} 3.4 3.3 5.0\n".encode("utf-8"))
     params = Params({
         "pretrained_file": embeddings_filename,
         "embedding_dim": 3
     })
     embedding_layer = Embedding.from_params(params, vocab=vocab)
     word_vector = embedding_layer.weight.data[vocab.get_token_index(
         "word")]
     assert numpy.allclose(word_vector.numpy(),
                           numpy.array([1.0, 2.3, -1.0]))
     word_vector = embedding_layer.weight.data[vocab.get_token_index(
         unicode_space)]
     assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3,
                                                             5.0]))
     word_vector = embedding_layer.weight.data[vocab.get_token_index(
         "word2")]
     assert not numpy.allclose(word_vector.numpy(),
                               numpy.array([1.0, 2.3, -1.0]))
    def __init__(self,
                 input_dim: int,
                 forward_combination: str = "y-x",
                 backward_combination: str = "y-x",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_sentinels: bool = True) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._forward_combination = forward_combination
        self._backward_combination = backward_combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        if self._input_dim % 2 != 0:
            raise ConfigurationError("The input dimension is not divisible by 2, but the "
                                     "BidirectionalEndpointSpanExtractor assumes the embedded representation "
                                     "is bidirectional (and hence divisible by 2).")
        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)
        elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError("To use a span width embedding representation, you must"
                                     "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None

        self._use_sentinels = use_sentinels
        if use_sentinels:
            self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
            self._end_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
Example #16
0
    def __init__(self):
        # CopyNet model initialization parameters
        self.vocabulary = Vocabulary()
        self.vocabulary = self.vocabulary.from_files(
            "C:/Users/Selma/PycharmProjects/ROS2SemanticParser/"
            "CN_model_weights/no_embedds/model.tar.gz")
        self.source_embedder = BasicTextFieldEmbedder(
            token_embedders={
                'tokens':
                Embedding(num_embeddings=self.vocabulary.get_vocab_size(
                    'source_tokens'),
                          embedding_dim=310)
            })
        self.dataset_reader = CopyNetDatasetReader(
            target_namespace="target_tokens")
        self.encoder = PytorchSeq2SeqWrapper(
            torch.nn.LSTM(input_size=310,
                          hidden_size=128,
                          num_layers=1,
                          batch_first=True))
        self.attention = BilinearAttention(vector_dim=128, matrix_dim=128)
        self.beam_size = 5
        self.max_decoding_steps = 200
        self.target_embedding_dim = 150

        self.semantic_parser = CopyNetSeq2Seq(
            vocab=self.vocabulary,
            source_embedder=self.source_embedder,
            encoder=self.encoder,
            attention=self.attention,
            beam_size=self.beam_size,
            max_decoding_steps=self.max_decoding_steps,
            target_embedding_dim=self.target_embedding_dim)
Example #17
0
 def test_embedding_vocab_extension_raises_error_for_incorrect_vocab(self):
     # When vocab namespace of extension vocab is smaller than embeddings
     # it should raise configuration error.
     vocab = Vocabulary({"tokens": {"word1": 1, "word2": 1}})
     embedding_params = Params({"vocab_namespace": "tokens", "embedding_dim": 10})
     embedder = Embedding.from_params(vocab, embedding_params)
     with pytest.raises(ConfigurationError):
         embedder.extend_vocab(Vocabulary(), "tokens")
Example #18
0
    def __init__(self,
                 input_dim: int,
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)
        elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError("To use a span width embedding representation, you must"
                                     "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None
Example #19
0
    def __init__(self, 
                 vocab: Vocabulary,
                 bert_embedder: Optional[PretrainedBertEmbedder] = None,
                 encoder: Optional[Seq2SeqEncoder] = None,
                 dropout: Optional[float] = None,
                 use_crf: bool = True) -> None:
        super().__init__(vocab)

        if bert_embedder:
            self.use_bert = True
            self.bert_embedder = bert_embedder
        else:
            self.use_bert = False
            self.basic_embedder = BasicTextFieldEmbedder({
                "tokens": Embedding(vocab.get_vocab_size(namespace="tokens"), 1024)
            })
            self.rnn = Seq2SeqEncoder.from_params(Params({     
                "type": "lstm",
                "input_size": 1024,
                "hidden_size": 512,
                "bidirectional": True,
                "batch_first": True
            }))

        self.encoder = encoder

        if encoder:
            hidden2tag_in_dim = encoder.get_output_dim()
        else:
            hidden2tag_in_dim = bert_embedder.get_output_dim()
        self.hidden2tag = TimeDistributed(torch.nn.Linear(
            in_features=hidden2tag_in_dim,
            out_features=vocab.get_vocab_size("labels")))
        
        if dropout:
            self.dropout = torch.nn.Dropout(dropout)
        else:
            self.dropout = None
        
        self.use_crf = use_crf
        if use_crf:
            crf_constraints = allowed_transitions(
                constraint_type="BIO",
                labels=vocab.get_index_to_token_vocabulary("labels")
            )
            self.crf = ConditionalRandomField(
                num_tags=vocab.get_vocab_size("labels"),
                constraints=crf_constraints,
                include_start_end_transitions=True
            )
        
        self.f1 = SpanBasedF1Measure(vocab, 
                                     tag_namespace="labels",
                                     ignore_classes=["news/type","negation",
                                                     "demonstrative_reference",
                                                     "timer/noun","timer/attributes"],
                                     label_encoding="BIO")
Example #20
0
    def __init__(self, vocab, embed_dim=100, window_sizes=(2, 3, 4, 5), num_filters=128):
        super(CNNModel, self).__init__()

        self.embeds = Embedding.from_params(
            vocab,
            Params({'vocab_namespace': 'tokens',
                    'embedding_dim': embed_dim,
                    'trainable': True,
                    'padding_index': 0,
                    'pretrained_file':
                        'https://s3-us-west-2.amazonaws.com/allennlp/datasets/glove/glove.6B.100d.txt.gz'
                    }))
        self.binary_feature_embedding = Embedding(2, embed_dim)

        self.convs = nn.ModuleList([
            nn.Conv1d(embed_dim * 2, num_filters, kernel_size=window_size, padding=window_size - 1) for window_size in
            window_sizes
        ])
        self.fc = nn.Linear(num_filters * len(window_sizes), 1, bias=False)
Example #21
0
 def from_params(cls, vocab: Vocabulary,
                 params: Params) -> 'AfixEmbedding':  # type: ignore
     # pylint: disable=arguments-differ
     embedding_params: Params = params.pop("embedding")
     # Embedding.from_params() uses "tokens" as the default namespace, but we need to change
     # that to be "token_characters" by default.
     embedding_params.setdefault("vocab_namespace", "afixes")
     embedding = Embedding.from_params(vocab, embedding_params)
     dropout = params.pop_float("dropout", 0.0)
     params.assert_empty(cls.__name__)
     return cls(embedding, dropout)
Example #22
0
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenCharactersEncoder':
     embedding_params: Params = params.pop("embedding")
     # Embedding.from_params() uses "tokens" as the default namespace, but we need to change
     # that to be "tokens" by default.
     embedding_params.setdefault("vocab_namespace", "token_bpe")
     embedding = Embedding.from_params(vocab, embedding_params)
     encoder_params: Params = params.pop("encoder")
     encoder = Seq2VecEncoder.from_params(encoder_params)
     dropout = params.pop("dropout", 0.0)
     params.assert_empty(cls.__name__)
     return cls(embedding, encoder, dropout)
 def from_params(cls, vocab, params):  # type: ignore
     # pylint: disable=arguments-differ
     embedding_params = params.pop(u"embedding")
     # Embedding.from_params() uses "tokens" as the default namespace, but we need to change
     # that to be "token_characters" by default.
     embedding_params.setdefault(u"vocab_namespace", u"token_characters")
     embedding = Embedding.from_params(vocab, embedding_params)
     encoder_params = params.pop(u"encoder")
     encoder = Seq2VecEncoder.from_params(encoder_params)
     dropout = params.pop_float(u"dropout", 0.0)
     params.assert_empty(cls.__name__)
     return cls(embedding, encoder, dropout)
Example #24
0
def _build_model(config,
                 vocab,
                 lemmatize_helper,
                 morpho_vectorizer,
                 bert_max_length=None):
    embedder = _load_embedder(config, vocab, bert_max_length)

    input_dim = embedder.get_output_dim()
    if config.embedder.use_pymorphy:
        input_dim += morpho_vectorizer.morpho_vector_dim

    pos_tag_embedding = None
    if config.task.task_type == 'single' and config.task.params['use_pos_tag']:
        pos_tag_embedding = Embedding(
            num_embeddings=vocab.get_vocab_size('grammar_value_tags'),
            embedding_dim=config.task.params['pos_embedding_dim'])
        input_dim += config.task.params['pos_embedding_dim']

    encoder = None
    if config.encoder.encoder_type != 'lstm':
        encoder = PassThroughEncoder(input_dim=input_dim)
    elif config.encoder.use_weight_drop:
        encoder = LstmWeightDropSeq2SeqEncoder(
            input_dim,
            config.encoder.hidden_dim,
            num_layers=config.encoder.num_layers,
            bidirectional=True,
            dropout=config.encoder.dropout,
            variational_dropout=config.encoder.variational_dropout)
    else:
        encoder = PytorchSeq2SeqWrapper(
            torch.nn.LSTM(input_dim,
                          config.encoder.hidden_dim,
                          num_layers=config.encoder.num_layers,
                          dropout=config.encoder.dropout,
                          bidirectional=True,
                          batch_first=True))

    return DependencyParser(
        vocab=vocab,
        text_field_embedder=embedder,
        encoder=encoder,
        lemmatize_helper=lemmatize_helper,
        task_config=config.task,
        pos_tag_embedding=pos_tag_embedding,
        morpho_vector_dim=morpho_vectorizer.morpho_vector_dim
        if config.embedder.use_pymorphy else 0,
        tag_representation_dim=config.parser.tag_representation_dim,
        arc_representation_dim=config.parser.arc_representation_dim,
        dropout=config.parser.dropout,
        input_dropout=config.embedder.dropout,
        gram_val_representation_dim=config.parser.gram_val_representation_dim,
        lemma_representation_dim=config.parser.lemma_representation_dim)
 def from_params(cls, vocab: Vocabulary, params: Params) -> 'TokenCharactersEncoder':  # type: ignore
     # pylint: disable=arguments-differ
     embedding_params: Params = params.pop("embedding")
     # Embedding.from_params() uses "tokens" as the default namespace, but we need to change
     # that to be "token_characters" by default.
     embedding_params.setdefault("vocab_namespace", "token_characters")
     embedding = Embedding.from_params(vocab, embedding_params)
     encoder_params: Params = params.pop("encoder")
     encoder = Seq2VecEncoder.from_params(encoder_params)
     dropout = params.pop_float("dropout", 0.0)
     params.assert_empty(cls.__name__)
     return cls(embedding, encoder, dropout)
Example #26
0
    def __init__(self,
                 input_dim: int,
                 combination: str = "max",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_exclusive_start_indices: bool = False) -> None:
        super().__init__()

        self._input_dim = input_dim
        self._filter_size = int(combination.split(',')[0])
        if self._filter_size % 2 != 1:
            raise ConfigurationError("The filter size must be an odd.")
        self._combination = combination.split(',')[1]
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths
        if bucket_widths:
            raise ConfigurationError("not support")

        self._use_exclusive_start_indices = use_exclusive_start_indices
        if use_exclusive_start_indices:
            raise ConfigurationError("not support")

        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings,
                                                   span_width_embedding_dim)
        elif not all(
            [num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError(
                "To use a span width embedding representation, you must"
                "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None

        self._conv = Conv1d(self._input_dim,
                            self._input_dim,
                            kernel_size=self._filter_size,
                            padding=int(floor(self._filter_size / 2)))
        xavier_normal_(self._conv.weight)
Example #27
0
 def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
     vocab = Vocabulary()
     vocab.add_token_to_namespace(u"word")
     vocab.add_token_to_namespace(u"word2")
     embeddings_filename = unicode(self.TEST_DIR / u"embeddings.gz")
     with gzip.open(embeddings_filename, u'wb') as embeddings_file:
         embeddings_file.write(u"word 1.0 2.3 -1.0\n".encode(u'utf-8'))
     params = Params({
             u'pretrained_file': embeddings_filename,
             u'embedding_dim': 3,
             })
     embedding_layer = Embedding.from_params(vocab, params)
     word_vector = embedding_layer.weight.data[vocab.get_token_index(u"word2")]
     assert not numpy.allclose(word_vector.numpy(), numpy.array([0.0, 0.0, 0.0]))
Example #28
0
 def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(self):
     vocab = Vocabulary()
     vocab.add_token_to_namespace("word")
     vocab.add_token_to_namespace("word2")
     embeddings_filename = self.TEST_DIR + "embeddings.gz"
     with gzip.open(embeddings_filename, 'wb') as embeddings_file:
         embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
     params = Params({
             'pretrained_file': embeddings_filename,
             'embedding_dim': 3,
             })
     embedding_layer = Embedding.from_params(vocab, params)
     word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
     assert not numpy.allclose(word_vector.numpy(), numpy.array([0.0, 0.0, 0.0]))
Example #29
0
def main():
    args = parse_args()
    vocabulary = os.path.join('.', 'vocabulary')
    vocab = Vocabulary.from_files(vocabulary)
    embedding = Embedding(num_embeddings=26729,
                          embedding_dim=200,
                          padding_index=0,
                          trainable=False)
    token_embedders = {'tokens': embedding}
    basic_text_field_embedder = BasicTextFieldEmbedder(token_embedders)
    transformer = Transformer(attention_dropout_prob=0.1,
                              attention_type="dot_product",
                              dropout_prob=0.1,
                              input_size=200,
                              intermediate_act_fn="gelu",
                              intermediate_size=3072,
                              key_depth=1024,
                              max_position_embeddings=256,
                              memory_size=200,
                              num_heads=16,
                              num_hidden_layers=6,
                              type_vocab_size=2,
                              use_fp16=False,
                              value_depth=1024,
                              use_token_type=True,
                              use_position_embeddings=True)
    model = JointIntentSlotDepsModel(
        text_field_embedder=basic_text_field_embedder,
        transformer=transformer,
        vocab=vocab,
        label_encoding="BIO",
        constrain_crf_decoding=True,
        calculate_span_f1=True,
        include_start_end_transitions=True,
        use_fp16=False)
    dummy_input = torch.ones(1, 14, 200, dtype=torch.float)
    dummy_mask = torch.ones(1, 14, dtype=torch.float)
    segment_ids = torch.ones(1, 14, dtype=torch.float)
    output = model._transformer(dummy_input, dummy_mask, segment_ids)
    model_state = torch.load('/tmp/model.th')
    model.load_state_dict(model_state)

    torch.onnx.export(model=model._transformer,
                      args=(dummy_input, dummy_mask, segment_ids),
                      f=args.output_pt,
                      verbose=True,
                      export_params=True)
    return 0
Example #30
0
    def test_read_hdf5_raises_on_invalid_shape(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace(u"word")
        embeddings_filename = unicode(self.TEST_DIR / u"embeddings.hdf5")
        embeddings = numpy.random.rand(vocab.get_vocab_size(), 10)
        with h5py.File(embeddings_filename, u'w') as fout:
            _ = fout.create_dataset(
                    u'embedding', embeddings.shape, dtype=u'float32', data=embeddings
            )

        params = Params({
                u'pretrained_file': embeddings_filename,
                u'embedding_dim': 5,
                })
        with pytest.raises(ConfigurationError):
            _ = Embedding.from_params(vocab, params)
Example #31
0
 def test_embedding_layer_actually_initializes_word_vectors_correctly(self):
     vocab = Vocabulary()
     vocab.add_token_to_namespace("word")
     vocab.add_token_to_namespace("word2")
     embeddings_filename = self.TEST_DIR + "embeddings.gz"
     with gzip.open(embeddings_filename, 'wb') as embeddings_file:
         embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
     params = Params({
             'pretrained_file': embeddings_filename,
             'embedding_dim': 3,
             })
     embedding_layer = Embedding.from_params(vocab, params)
     word_vector = embedding_layer.weight.data[vocab.get_token_index("word")]
     assert numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
     word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
     assert not numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
Example #32
0
    def test_read_hdf5_raises_on_invalid_shape(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word")
        embeddings_filename = self.TEST_DIR + "embeddings.hdf5"
        embeddings = numpy.random.rand(vocab.get_vocab_size(), 10)
        with h5py.File(embeddings_filename, 'w') as fout:
            _ = fout.create_dataset(
                    'embedding', embeddings.shape, dtype='float32', data=embeddings
            )

        params = Params({
                'pretrained_file': embeddings_filename,
                'embedding_dim': 5,
                })
        with pytest.raises(ConfigurationError):
            _ = Embedding.from_params(vocab, params)
Example #33
0
 def test_min_pretrained_embeddings(self):
     vocab = Vocabulary()
     vocab.add_token_to_namespace('the')
     vocab.add_token_to_namespace('a')
     params = Params({
         'pretrained_file':
         str(self.FIXTURES_ROOT / 'embeddings/glove.6B.100d.sample.txt.gz'),
         'embedding_dim':
         100,
         'min_pretrained_embeddings':
         50
     })
     # This will now update vocab
     _ = Embedding.from_params(vocab, params)
     assert vocab.get_vocab_size() >= 50
     assert vocab.get_token_index("his") > 1  # not @@UNKNOWN@@
Example #34
0
def test_concat_position_embeddings():
    # Test the normal case
    batch_size = 2
    number_targets = 2
    text_seq_length = 3
    encoded_dim = 4

    encoded_text_tensor = [[[0.5, 0.3, 0.2, 0.6], [0.2, 0.3, 0.4, 0.7],
                            [0.5, 0.4, 0.6, 0.2]],
                           [[0.4, 0.5, 0.3, 0.7], [0.3, 0.1, 0.2, 0.0],
                            [0.0, 0.0, 0.0, 0.0]],
                           [[0.5, 0.3, 0.2, 0.3], [0.0, 0.0, 0.0, 0.0],
                            [0.0, 0.0, 0.0, 0.0]],
                           [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0],
                            [0.0, 0.0, 0.0, 0.0]]]
    encoded_text_tensor = torch.Tensor(encoded_text_tensor)
    assert (batch_size * number_targets, text_seq_length,
            encoded_dim) == encoded_text_tensor.shape

    position_indexes = torch.Tensor([[[2, 1, 2], [1, 2, 0]],
                                     [[1, 0, 0], [0, 0, 0]]])
    position_indexes = position_indexes.type(torch.long)
    assert (batch_size, number_targets,
            text_seq_length) == position_indexes.shape
    position_indexes = {'position_tokens': {'tokens': position_indexes}}

    embedding = Embedding(num_embeddings=3, embedding_dim=5, trainable=False)
    target_position_embedding = BasicTextFieldEmbedder(
        {'position_tokens': embedding})
    assert (batch_size, number_targets, text_seq_length,
            5) == target_position_embedding(position_indexes).shape

    test_encoded_text_tensor = util.concat_position_embeddings(
        encoded_text_tensor, position_indexes, target_position_embedding)
    assert (batch_size * number_targets, text_seq_length,
            encoded_dim + 5) == test_encoded_text_tensor.shape

    # Test the case where it should return just the original encoded_text_tensor
    test_encoded_text_tensor = util.concat_position_embeddings(
        encoded_text_tensor, None, None)
    assert torch.all(torch.eq(test_encoded_text_tensor, encoded_text_tensor))

    # Test ValueError when the `target_position_embedding` is not None but
    # position_indexes is None
    with pytest.raises(ValueError):
        util.concat_position_embeddings(encoded_text_tensor, None,
                                        target_position_embedding)
Example #35
0
 def test_get_embedding_layer_initializes_unseen_words_randomly_not_zero(
         self):
     vocab = Vocabulary()
     vocab.add_token_to_namespace("word")
     vocab.add_token_to_namespace("word2")
     embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
     with gzip.open(embeddings_filename, "wb") as embeddings_file:
         embeddings_file.write("word 1.0 2.3 -1.0\n".encode("utf-8"))
     params = Params({
         "pretrained_file": embeddings_filename,
         "embedding_dim": 3
     })
     embedding_layer = Embedding.from_params(params, vocab=vocab)
     word_vector = embedding_layer.weight.data[vocab.get_token_index(
         "word2")]
     assert not numpy.allclose(word_vector.numpy(),
                               numpy.array([0.0, 0.0, 0.0]))
Example #36
0
    def test_read_hdf5_format_file(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace("word")
        vocab.add_token_to_namespace("word2")
        embeddings_filename = self.TEST_DIR + "embeddings.hdf5"
        embeddings = numpy.random.rand(vocab.get_vocab_size(), 5)
        with h5py.File(embeddings_filename, 'w') as fout:
            _ = fout.create_dataset(
                    'embedding', embeddings.shape, dtype='float32', data=embeddings
            )

        params = Params({
                'pretrained_file': embeddings_filename,
                'embedding_dim': 5,
                })
        embedding_layer = Embedding.from_params(vocab, params)
        assert numpy.allclose(embedding_layer.weight.data.numpy(), embeddings)
Example #37
0
    def test_forward_works_with_projection_layer(self):
        vocab = Vocabulary()
        vocab.add_token_to_namespace('the')
        vocab.add_token_to_namespace('a')
        params = Params({
                'pretrained_file': 'tests/fixtures/glove.6B.300d.sample.txt.gz',
                'embedding_dim': 300,
                'projection_dim': 20
                })
        embedding_layer = Embedding.from_params(vocab, params)
        input_tensor = Variable(torch.LongTensor([[3, 2, 1, 0]]))
        embedded = embedding_layer(input_tensor).data.numpy()
        assert embedded.shape == (1, 4, 20)

        input_tensor = Variable(torch.LongTensor([[[3, 2, 1, 0]]]))
        embedded = embedding_layer(input_tensor).data.numpy()
        assert embedded.shape == (1, 1, 4, 20)
Example #38
0
 def test_embedding_layer_actually_initializes_word_vectors_correctly(self):
     vocab = Vocabulary()
     vocab.add_token_to_namespace("word")
     vocab.add_token_to_namespace("word2")
     unicode_space = "\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0"
     vocab.add_token_to_namespace(unicode_space)
     embeddings_filename = str(self.TEST_DIR / "embeddings.gz")
     with gzip.open(embeddings_filename, 'wb') as embeddings_file:
         embeddings_file.write("word 1.0 2.3 -1.0\n".encode('utf-8'))
         embeddings_file.write(f"{unicode_space} 3.4 3.3 5.0\n".encode('utf-8'))
     params = Params({
             'pretrained_file': embeddings_filename,
             'embedding_dim': 3,
             })
     embedding_layer = Embedding.from_params(vocab, params)
     word_vector = embedding_layer.weight.data[vocab.get_token_index("word")]
     assert numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
     word_vector = embedding_layer.weight.data[vocab.get_token_index(unicode_space)]
     assert numpy.allclose(word_vector.numpy(), numpy.array([3.4, 3.3, 5.0]))
     word_vector = embedding_layer.weight.data[vocab.get_token_index("word2")]
     assert not numpy.allclose(word_vector.numpy(), numpy.array([1.0, 2.3, -1.0]))
    def __init__(self,
                 input_dim: int,
                 combination: str = "x,y",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_exclusive_start_indices: bool = False) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._combination = combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        self._use_exclusive_start_indices = use_exclusive_start_indices
        if use_exclusive_start_indices:
            self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim)]))

        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)
        elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError("To use a span width embedding representation, you must"
                                     "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None
class BidirectionalEndpointSpanExtractor(SpanExtractor):
    """
    Represents spans from a bidirectional encoder as a concatenation of two different
    representations of the span endpoints, one for the forward direction of the encoder
    and one from the backward direction. This type of representation encodes some subtelty,
    because when you consider the forward and backward directions separately, the end index
    of the span for the backward direction's representation is actually the start index.

    By default, this ``SpanExtractor`` represents spans as
    ``sequence_tensor[inclusive_span_end] - sequence_tensor[exclusive_span_start]``
    meaning that the representation is the difference between the the last word in the span
    and the word `before` the span started. Note that the start and end indices are with
    respect to the direction that the RNN is going in, so for the backward direction, the
    start/end indices are reversed.

    Additionally, the width of the spans can be embedded and concatenated on to the
    final combination.

    The following other types of representation are supported for both the forward and backward
    directions, assuming that ``x = span_start_embeddings`` and ``y = span_end_embeddings``.

    ``x``, ``y``, ``x*y``, ``x+y``, ``x-y``, ``x/y``, where each of those binary operations
    is performed elementwise.  You can list as many combinations as you want, comma separated.
    For example, you might give ``x,y,x*y`` as the ``combination`` parameter to this class.
    The computed similarity function would then be ``[x; y; x*y]``, which can then be optionally
    concatenated with an embedded representation of the width of the span.

    Parameters
    ----------
    input_dim : ``int``, required.
        The final dimension of the ``sequence_tensor``.
    forward_combination : str, optional (default = "y-x").
        The method used to combine the ``forward_start_embeddings`` and ``forward_end_embeddings``
        for the forward direction of the bidirectional representation.
        See above for a full description.
    backward_combination : str, optional (default = "y-x").
        The method used to combine the ``backward_start_embeddings`` and ``backward_end_embeddings``
        for the backward direction of the bidirectional representation.
        See above for a full description.
    num_width_embeddings : ``int``, optional (default = None).
        Specifies the number of buckets to use when representing
        span width features.
    span_width_embedding_dim : ``int``, optional (default = None).
        The embedding size for the span_width features.
    bucket_widths : ``bool``, optional (default = False).
        Whether to bucket the span widths into log-space buckets. If ``False``,
        the raw span widths are used.
    use_sentinels : ``bool``, optional (default = ``True``).
        If ``True``, sentinels are used to represent exclusive span indices for the elements
        in the first and last positions in the sequence (as the exclusive indices for these
        elements are outside of the the sequence boundary). This is not strictly necessary,
        as you may know that your exclusive start and end indices are always within your sequence
        representation, such as if you have appended/prepended <START> and <END> tokens to your
        sequence.
    """
    def __init__(self,
                 input_dim: int,
                 forward_combination: str = "y-x",
                 backward_combination: str = "y-x",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_sentinels: bool = True) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._forward_combination = forward_combination
        self._backward_combination = backward_combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        if self._input_dim % 2 != 0:
            raise ConfigurationError("The input dimension is not divisible by 2, but the "
                                     "BidirectionalEndpointSpanExtractor assumes the embedded representation "
                                     "is bidirectional (and hence divisible by 2).")
        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)
        elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError("To use a span width embedding representation, you must"
                                     "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None

        self._use_sentinels = use_sentinels
        if use_sentinels:
            self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))
            self._end_sentinel = Parameter(torch.randn([1, 1, int(input_dim / 2)]))

    def get_input_dim(self) -> int:
        return self._input_dim

    def get_output_dim(self) -> int:
        unidirectional_dim = int(self._input_dim / 2)
        forward_combined_dim = util.get_combined_dim(self._forward_combination,
                                                     [unidirectional_dim, unidirectional_dim])
        backward_combined_dim = util.get_combined_dim(self._backward_combination,
                                                      [unidirectional_dim, unidirectional_dim])
        if self._span_width_embedding is not None:
            return forward_combined_dim + backward_combined_dim + \
                   self._span_width_embedding.get_output_dim()
        return forward_combined_dim + backward_combined_dim

    @overrides
    def forward(self,
                sequence_tensor: torch.FloatTensor,
                span_indices: torch.LongTensor,
                sequence_mask: torch.LongTensor = None,
                span_indices_mask: torch.LongTensor = None) -> torch.FloatTensor:

        # Both of shape (batch_size, sequence_length, embedding_size / 2)
        forward_sequence, backward_sequence = sequence_tensor.split(int(self._input_dim / 2), dim=-1)
        forward_sequence = forward_sequence.contiguous()
        backward_sequence = backward_sequence.contiguous()

        # shape (batch_size, num_spans)
        span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]

        if span_indices_mask is not None:
            span_starts = span_starts * span_indices_mask
            span_ends = span_ends * span_indices_mask
        # We want `exclusive` span starts, so we remove 1 from the forward span starts
        # as the AllenNLP ``SpanField`` is inclusive.
        # shape (batch_size, num_spans)
        exclusive_span_starts = span_starts - 1
        # shape (batch_size, num_spans, 1)
        start_sentinel_mask = (exclusive_span_starts == -1).long().unsqueeze(-1)

        # We want `exclusive` span ends for the backward direction
        # (so that the `start` of the span in that direction is exlusive), so
        # we add 1 to the span ends as the AllenNLP ``SpanField`` is inclusive.
        exclusive_span_ends = span_ends + 1

        if sequence_mask is not None:
            # shape (batch_size)
            sequence_lengths = util.get_lengths_from_binary_sequence_mask(sequence_mask)
        else:
            # shape (batch_size), filled with the sequence length size of the sequence_tensor.
            sequence_lengths = util.ones_like(sequence_tensor[:, 0, 0]).long() * sequence_tensor.size(1)

        # shape (batch_size, num_spans, 1)
        end_sentinel_mask = (exclusive_span_ends == sequence_lengths.unsqueeze(-1)).long().unsqueeze(-1)

        # As we added 1 to the span_ends to make them exclusive, which might have caused indices
        # equal to the sequence_length to become out of bounds, we multiply by the inverse of the
        # end_sentinel mask to erase these indices (as we will replace them anyway in the block below).
        # The same argument follows for the exclusive span start indices.
        exclusive_span_ends = exclusive_span_ends * (1 - end_sentinel_mask.squeeze(-1))
        exclusive_span_starts = exclusive_span_starts * (1 - start_sentinel_mask.squeeze(-1))

        # We'll check the indices here at runtime, because it's difficult to debug
        # if this goes wrong and it's tricky to get right.
        if (exclusive_span_starts < 0).any() or (exclusive_span_ends > sequence_lengths.unsqueeze(-1)).any():
            raise ValueError(f"Adjusted span indices must lie inside the length of the sequence tensor, "
                             f"but found: exclusive_span_starts: {exclusive_span_starts}, "
                             f"exclusive_span_ends: {exclusive_span_ends} for a sequence tensor with lengths "
                             f"{sequence_lengths}.")

        # Forward Direction: start indices are exclusive. Shape (batch_size, num_spans, input_size / 2)
        forward_start_embeddings = util.batched_index_select(forward_sequence, exclusive_span_starts)
        # Forward Direction: end indices are inclusive, so we can just use span_ends.
        # Shape (batch_size, num_spans, input_size / 2)
        forward_end_embeddings = util.batched_index_select(forward_sequence, span_ends)

        # Backward Direction: The backward start embeddings use the `forward` end
        # indices, because we are going backwards.
        # Shape (batch_size, num_spans, input_size / 2)
        backward_start_embeddings = util.batched_index_select(backward_sequence, exclusive_span_ends)
        # Backward Direction: The backward end embeddings use the `forward` start
        # indices, because we are going backwards.
        # Shape (batch_size, num_spans, input_size / 2)
        backward_end_embeddings = util.batched_index_select(backward_sequence, span_starts)

        if self._use_sentinels:
            # If we're using sentinels, we need to replace all the elements which were
            # outside the dimensions of the sequence_tensor with either the start sentinel,
            # or the end sentinel.
            float_end_sentinel_mask = end_sentinel_mask.float()
            float_start_sentinel_mask = start_sentinel_mask.float()
            forward_start_embeddings = forward_start_embeddings * (1 - float_start_sentinel_mask) \
                                        + float_start_sentinel_mask * self._start_sentinel
            backward_start_embeddings = backward_start_embeddings * (1 - float_end_sentinel_mask) \
                                        + float_end_sentinel_mask * self._end_sentinel

        # Now we combine the forward and backward spans in the manner specified by the
        # respective combinations and concatenate these representations.
        # Shape (batch_size, num_spans, forward_combination_dim)
        forward_spans = util.combine_tensors(self._forward_combination,
                                             [forward_start_embeddings, forward_end_embeddings])
        # Shape (batch_size, num_spans, backward_combination_dim)
        backward_spans = util.combine_tensors(self._backward_combination,
                                              [backward_start_embeddings, backward_end_embeddings])
        # Shape (batch_size, num_spans, forward_combination_dim + backward_combination_dim)
        span_embeddings = torch.cat([forward_spans, backward_spans], -1)

        if self._span_width_embedding is not None:
            # Embed the span widths and concatenate to the rest of the representations.
            if self._bucket_widths:
                span_widths = util.bucket_values(span_ends - span_starts,
                                                 num_total_buckets=self._num_width_embeddings)
            else:
                span_widths = span_ends - span_starts

            span_width_embeddings = self._span_width_embedding(span_widths)
            return torch.cat([span_embeddings, span_width_embeddings], -1)

        if span_indices_mask is not None:
            return span_embeddings * span_indices_mask.float().unsqueeze(-1)
        return span_embeddings

    @classmethod
    def from_params(cls, params: Params) -> "BidirectionalEndpointSpanExtractor":
        input_dim = params.pop_int("input_dim")
        forward_combination = params.pop("forward_combination", "y-x")
        backward_combination = params.pop("backward_combination", "x-y")
        num_width_embeddings = params.pop_int("num_width_embeddings", None)
        span_width_embedding_dim = params.pop_int("span_width_embedding_dim", None)
        bucket_widths = params.pop_bool("bucket_widths", False)
        use_sentinels = params.pop_bool("use_sentinels", True)
        return BidirectionalEndpointSpanExtractor(input_dim=input_dim,
                                                  forward_combination=forward_combination,
                                                  backward_combination=backward_combination,
                                                  num_width_embeddings=num_width_embeddings,
                                                  span_width_embedding_dim=span_width_embedding_dim,
                                                  bucket_widths=bucket_widths,
                                                  use_sentinels=use_sentinels)
class EndpointSpanExtractor(SpanExtractor):
    """
    Represents spans as a combination of the embeddings of their endpoints. Additionally,
    the width of the spans can be embedded and concatenated on to the final combination.

    The following types of representation are supported, assuming that
    ``x = span_start_embeddings`` and ``y = span_end_embeddings``.

    ``x``, ``y``, ``x*y``, ``x+y``, ``x-y``, ``x/y``, where each of those binary operations
    is performed elementwise.  You can list as many combinations as you want, comma separated.
    For example, you might give ``x,y,x*y`` as the ``combination`` parameter to this class.
    The computed similarity function would then be ``[x; y; x*y]``, which can then be optionally
    concatenated with an embedded representation of the width of the span.

    Parameters
    ----------
    input_dim : ``int``, required.
        The final dimension of the ``sequence_tensor``.
    combination : str, optional (default = "x,y").
        The method used to combine the ``start_embedding`` and ``end_embedding``
        representations. See above for a full description.
    num_width_embeddings : ``int``, optional (default = None).
        Specifies the number of buckets to use when representing
        span width features.
    span_width_embedding_dim : ``int``, optional (default = None).
        The embedding size for the span_width features.
    bucket_widths : ``bool``, optional (default = False).
        Whether to bucket the span widths into log-space buckets. If ``False``,
        the raw span widths are used.
    use_exclusive_start_indices : ``bool``, optional (default = ``False``).
        If ``True``, the start indices extracted are converted to exclusive indices. Sentinels
        are used to represent exclusive span indices for the elements in the first
        position in the sequence (as the exclusive indices for these elements are outside
        of the the sequence boundary) so that start indices can be exclusive.
        NOTE: This option can be helpful to avoid the pathological case in which you
        want span differences for length 1 spans - if you use inclusive indices, you
        will end up with an ``x - x`` operation for length 1 spans, which is not good.
    """
    def __init__(self,
                 input_dim: int,
                 combination: str = "x,y",
                 num_width_embeddings: int = None,
                 span_width_embedding_dim: int = None,
                 bucket_widths: bool = False,
                 use_exclusive_start_indices: bool = False) -> None:
        super().__init__()
        self._input_dim = input_dim
        self._combination = combination
        self._num_width_embeddings = num_width_embeddings
        self._bucket_widths = bucket_widths

        self._use_exclusive_start_indices = use_exclusive_start_indices
        if use_exclusive_start_indices:
            self._start_sentinel = Parameter(torch.randn([1, 1, int(input_dim)]))

        if num_width_embeddings is not None and span_width_embedding_dim is not None:
            self._span_width_embedding = Embedding(num_width_embeddings, span_width_embedding_dim)
        elif not all([num_width_embeddings is None, span_width_embedding_dim is None]):
            raise ConfigurationError("To use a span width embedding representation, you must"
                                     "specify both num_width_buckets and span_width_embedding_dim.")
        else:
            self._span_width_embedding = None

    def get_input_dim(self) -> int:
        return self._input_dim

    def get_output_dim(self) -> int:
        combined_dim = util.get_combined_dim(self._combination, [self._input_dim, self._input_dim])
        if self._span_width_embedding is not None:
            return combined_dim + self._span_width_embedding.get_output_dim()
        return combined_dim

    @overrides
    def forward(self,
                sequence_tensor: torch.FloatTensor,
                span_indices: torch.LongTensor,
                sequence_mask: torch.LongTensor = None,
                span_indices_mask: torch.LongTensor = None) -> None:
        # shape (batch_size, num_spans)
        span_starts, span_ends = [index.squeeze(-1) for index in span_indices.split(1, dim=-1)]

        if span_indices_mask is not None:
            # It's not strictly necessary to multiply the span indices by the mask here,
            # but it's possible that the span representation was padded with something other
            # than 0 (such as -1, which would be an invalid index), so we do so anyway to
            # be safe.
            span_starts = span_starts * span_indices_mask
            span_ends = span_ends * span_indices_mask

        if not self._use_exclusive_start_indices:
            start_embeddings = util.batched_index_select(sequence_tensor, span_starts)
            end_embeddings = util.batched_index_select(sequence_tensor, span_ends)

        else:
            # We want `exclusive` span starts, so we remove 1 from the forward span starts
            # as the AllenNLP ``SpanField`` is inclusive.
            # shape (batch_size, num_spans)
            exclusive_span_starts = span_starts - 1
            # shape (batch_size, num_spans, 1)
            start_sentinel_mask = (exclusive_span_starts == -1).long().unsqueeze(-1)
            exclusive_span_starts = exclusive_span_starts * (1 - start_sentinel_mask.squeeze(-1))

            # We'll check the indices here at runtime, because it's difficult to debug
            # if this goes wrong and it's tricky to get right.
            if (exclusive_span_starts < 0).any():
                raise ValueError(f"Adjusted span indices must lie inside the the sequence tensor, "
                                 f"but found: exclusive_span_starts: {exclusive_span_starts}.")

            start_embeddings = util.batched_index_select(sequence_tensor, exclusive_span_starts)
            end_embeddings = util.batched_index_select(sequence_tensor, span_ends)

            # We're using sentinels, so we need to replace all the elements which were
            # outside the dimensions of the sequence_tensor with the start sentinel.
            float_start_sentinel_mask = start_sentinel_mask.float()
            start_embeddings = start_embeddings * (1 - float_start_sentinel_mask) \
                                        + float_start_sentinel_mask * self._start_sentinel

        combined_tensors = util.combine_tensors(self._combination, [start_embeddings, end_embeddings])
        if self._span_width_embedding is not None:
            # Embed the span widths and concatenate to the rest of the representations.
            if self._bucket_widths:
                span_widths = util.bucket_values(span_ends - span_starts,
                                                 num_total_buckets=self._num_width_embeddings)
            else:
                span_widths = span_ends - span_starts

            span_width_embeddings = self._span_width_embedding(span_widths)
            return torch.cat([combined_tensors, span_width_embeddings], -1)

        if span_indices_mask is not None:
            return combined_tensors * span_indices_mask.unsqueeze(-1).float()
        return combined_tensors