コード例 #1
0
 def test_init_checks_dropout_consistency(self):
     with pytest.raises(ConfigurationError):
         Maxout(input_dim=2,
                num_layers=3,
                output_dims=5,
                pool_sizes=4,
                dropout=[0.2, 0.3])
コード例 #2
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab, embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)
        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   pre_encode_feedforward=pre_encode_feedforward,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   initializer=initializer,
                   regularizer=regularizer)
コード例 #3
0
    def test_forward_gives_correct_output(self):
        params = Params({
            'input_dim': 2,
            'output_dims': 3,
            'pool_sizes': 4,
            'dropout': 0.0,
            'num_layers': 2
        })
        maxout = Maxout.from_params(params)

        constant_init = Initializer.from_params(
            Params({
                "type": "constant",
                "val": 1.
            }))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(maxout)

        input_tensor = torch.FloatTensor([[-3, 1]])
        output = maxout(input_tensor).data.numpy()
        assert output.shape == (1, 3)
        # This output was checked by hand
        # The output of the first maxout layer is [-1, -1, -1], since the
        # matrix multiply gives us [-2]*12. Reshaping and maxing
        # produces [-2, -2, -2] and the bias increments these values.
        # The second layer output is [-2, -2, -2], since the matrix
        # matrix multiply gives us [-3]*12. Reshaping and maxing
        # produces [-3, -3, -3] and the bias increments these values.
        assert_almost_equal(output, [[-2, -2, -2]])
コード例 #4
0
ファイル: maxout_test.py プロジェクト: apmoore1/allennlp
    def test_forward_gives_correct_output(self):
        params = Params({
                'input_dim': 2,
                'output_dims': 3,
                'pool_sizes': 4,
                'dropout': 0.0,
                'num_layers': 2
                })
        maxout = Maxout.from_params(params)

        constant_init = Initializer.from_params(Params({"type": "constant", "val": 1.}))
        initializer = InitializerApplicator([(".*", constant_init)])
        initializer(maxout)

        input_tensor = torch.FloatTensor([[-3, 1]])
        output = maxout(input_tensor).data.numpy()
        assert output.shape == (1, 3)
        # This output was checked by hand
        # The output of the first maxout layer is [-1, -1, -1], since the
        # matrix multiply gives us [-2]*12. Reshaping and maxing
        # produces [-2, -2, -2] and the bias increments these values.
        # The second layer output is [-2, -2, -2], since the matrix
        # matrix multiply gives us [-3]*12. Reshaping and maxing
        # produces [-3, -3, -3] and the bias increments these values.
        assert_almost_equal(output, [[-2, -2, -2]])
コード例 #5
0
 def test_init_checks_pool_sizes_consistency(self):
     with pytest.raises(ConfigurationError):
         Maxout(input_dim=2,
                num_layers=2,
                output_dims=5,
                pool_sizes=[4, 5, 2],
                dropout=0.0)
コード例 #6
0
    def __init__(self,
                 vocab: Vocabulary,
                 embedder: TextFieldEmbedder,
                 question_encoder: Seq2SeqEncoder,
                 passage_encoder: Seq2SeqEncoder,
                 feed_forward: FeedForward,
                 dropout: float = 0.1,
                 num_decoding_steps: int = 40,
                 initializer: InitializerApplicator = InitializerApplicator(),
                 regularizer: Optional[RegularizerApplicator] = None) -> None:
        super(AnswerSynthesis, self).__init__(vocab, regularizer)
        self._vocab = vocab
        self._vocab_size = vocab.get_vocab_size()  # default: tokens
        self._num_decoding_steps = num_decoding_steps
        self._start_token_index = self._vocab.get_token_index(START_SYMBOL)
        self._end_token_index = self._vocab.get_token_index(END_SYMBOL)

        self._embedder = embedder
        self._question_encoder = question_encoder
        self._passage_encoder = passage_encoder

        encoding_dim = question_encoder.get_output_dim()
        embedding_dim = embedder.get_output_dim()

        self._span_start_embedding = nn.Embedding(2, 50)
        self._span_end_embedding = nn.Embedding(2, 50)
        self._gru_decoder = nn.GRUCell(encoding_dim + embedding_dim,
                                       encoding_dim)
        self._feed_forward = feed_forward

        self._attention = Attention(NonlinearSimilarity(encoding_dim))

        self._W_r = nn.Linear(embedding_dim, encoding_dim, bias=False)
        self._U_r = nn.Linear(encoding_dim, encoding_dim, bias=False)
        self._V_r = nn.Linear(encoding_dim, encoding_dim, bias=False)

        self._max_out = Maxout(encoding_dim,
                               num_layers=1,
                               output_dims=int(encoding_dim / 2),
                               pool_sizes=2)
        self._W_o = nn.Linear(int(encoding_dim / 2),
                              self._vocab_size,
                              bias=False)

        self._squad_metrics = SquadEmAndF1()
        #self._predict_acc = CategoricalAccuracy()

        if dropout > 0:
            self._dropout = torch.nn.Dropout(p=dropout)
        else:
            self._dropout = lambda x: x

        initializer(self)
        self._num_iter = 0
コード例 #7
0
    def from_params(  # type: ignore
            cls, vocab: Vocabulary,
            params: Params) -> "BiattentiveClassificationNetwork":

        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(
            params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool(
            "use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(
            params.pop("initializer", []))
        regularizer = RegularizerApplicator.from_params(
            params.pop("regularizer", []))
        params.assert_empty(cls.__name__)

        return cls(
            vocab=vocab,
            text_field_embedder=text_field_embedder,
            embedding_dropout=embedding_dropout,
            pre_encode_feedforward=pre_encode_feedforward,
            encoder=encoder,
            integrator=integrator,
            integrator_dropout=integrator_dropout,
            output_layer=output_layer,
            elmo=elmo,
            use_input_elmo=use_input_elmo,
            use_integrator_output_elmo=use_integrator_output_elmo,
            initializer=initializer,
            regularizer=regularizer,
        )
コード例 #8
0
    def from_params(
            cls, vocab: Vocabulary,
            params: Params) -> 'SemEvalClassifierAttention':  # type: ignore
        # pylint: disable=arguments-differ
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool(
            "use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   initializer=initializer,
                   regularizer=regularizer)
コード例 #9
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'BiattentiveClassificationNetwork':  # type: ignore
        # pylint: disable=arguments-differ
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop("embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop("pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop("encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop("integrator"))
        integrator_dropout = params.pop("integrator_dropout")

        output_layer_params = params.pop("output_layer")
        if "activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop("elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool("use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool("use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop('regularizer', []))
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   pre_encode_feedforward=pre_encode_feedforward,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   elmo=elmo,
                   use_input_elmo=use_input_elmo,
                   use_integrator_output_elmo=use_integrator_output_elmo,
                   initializer=initializer,
                   regularizer=regularizer)
コード例 #10
0
    def from_params(cls, vocab            , params        )                                      :  # type: ignore
        # pylint: disable=arguments-differ
        embedder_params = params.pop(u"text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(vocab=vocab, params=embedder_params)
        embedding_dropout = params.pop(u"embedding_dropout")
        pre_encode_feedforward = FeedForward.from_params(params.pop(u"pre_encode_feedforward"))
        encoder = Seq2SeqEncoder.from_params(params.pop(u"encoder"))
        integrator = Seq2SeqEncoder.from_params(params.pop(u"integrator"))
        integrator_dropout = params.pop(u"integrator_dropout")

        output_layer_params = params.pop(u"output_layer")
        if u"activations" in output_layer_params:
            output_layer = FeedForward.from_params(output_layer_params)
        else:
            output_layer = Maxout.from_params(output_layer_params)

        elmo = params.pop(u"elmo", None)
        if elmo is not None:
            elmo = Elmo.from_params(elmo)
        use_input_elmo = params.pop_bool(u"use_input_elmo", False)
        use_integrator_output_elmo = params.pop_bool(u"use_integrator_output_elmo", False)

        initializer = InitializerApplicator.from_params(params.pop(u'initializer', []))
        regularizer = RegularizerApplicator.from_params(params.pop(u'regularizer', []))
        params.assert_empty(cls.__name__)

        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   embedding_dropout=embedding_dropout,
                   pre_encode_feedforward=pre_encode_feedforward,
                   encoder=encoder,
                   integrator=integrator,
                   integrator_dropout=integrator_dropout,
                   output_layer=output_layer,
                   elmo=elmo,
                   use_input_elmo=use_input_elmo,
                   use_integrator_output_elmo=use_integrator_output_elmo,
                   initializer=initializer,
                   regularizer=regularizer)
コード例 #11
0
    def __init__(self, embedder: TextFieldEmbedder,
                 embedding_dropout_prob: float, embedding_dim: int,
                 use_input_elmo: bool, pre_encode_dim: Union[int, List[int]],
                 pre_encode_layer_dropout_prob: Union[float, List[float]],
                 encoder_dim: int, integrtator_dim: int,
                 integrator_dropout: float, use_integrator_output_elmo: bool,
                 output_dim: Union[int, List[int]], output_pool_size: int,
                 output_dropout_prob: Union[float, List[float]], comp: int,
                 elmo: Elmo):
        super(LABiattentiveClassificationNetwork, self).__init__()
        # pre_encode_feedforward

        self.text_field_embedder = embedder
        self.embedding_dropout = nn.Dropout(embedding_dropout_prob)
        self.use_input_elmo = use_input_elmo
        embedding_dim += ELMO_DIM if self.use_input_elmo else 0
        if isinstance(pre_encode_dim, int):
            pre_encode_layer_num = 1
            pre_encode_dim = [pre_encode_dim]
            pre_encode_layer_dropout_prob = [pre_encode_layer_dropout_prob]
        else:
            pre_encode_layer_num = len(pre_encode_dim)

        self.pre_encode_feedforward = FeedForward(
            input_dim=embedding_dim,
            num_layers=pre_encode_layer_num,
            hidden_dims=pre_encode_dim,
            activations=[nn.ReLU()] * pre_encode_layer_num,
            dropout=pre_encode_layer_dropout_prob)
        pytorch_encoder = nn.LSTM(input_size=pre_encode_dim[-1],
                                  hidden_size=encoder_dim,
                                  num_layers=1,
                                  bidirectional=True,
                                  batch_first=True)
        self.encoder = PytorchSeq2SeqWrapper(pytorch_encoder)
        pytorch_integrator = nn.LSTM(input_size=6 * encoder_dim,
                                     hidden_size=integrtator_dim,
                                     num_layers=1,
                                     bidirectional=True,
                                     batch_first=True)
        self.integrator = PytorchSeq2SeqWrapper(pytorch_integrator)
        self.integrator_dropout = nn.Dropout(p=integrator_dropout)
        self.use_integrator_output_elmo = use_integrator_output_elmo

        if self.use_integrator_output_elmo:
            self.combined_integrator_output_dim = (
                self.integrator.get_output_dim() + self.elmo.get_output_dim())
        else:
            self.combined_integrator_output_dim = self.integrator.get_output_dim(
            )

        self.self_attentive_pooling_projection = nn.Linear(
            self.combined_integrator_output_dim, 1)

        if isinstance(output_dim, int):
            output_layer_num = 1
            la_output_dim = [output_dim]
            output_dropout_prob = [output_dropout_prob]
        else:
            la_output_dim = copy(output_dim)
            output_layer_num = len(output_dim)
        la_output_dim[-1] = la_output_dim[-1] * comp
        self.la_output_layer = Maxout(input_dim=integrtator_dim * 8,
                                      num_layers=output_layer_num,
                                      output_dims=la_output_dim,
                                      pool_sizes=output_pool_size,
                                      dropout=output_dropout_prob)

        initializer = InitializerApplicator()
        self.elmo = elmo
        initializer(self)