Example #1
0
    def from_params(cls, vocab: Vocabulary, params: Params) -> 'DeIsTe':
        embedder_params = params.pop("text_field_embedder")
        text_field_embedder = TextFieldEmbedder.from_params(
            vocab, embedder_params)

        inter_attention = MatrixAttention.from_params(
            params.pop("inter_attention"))
        param_dyn_encoder = Seq2VecEncoder.from_params(
            params.pop("param_dyn_encoder"))

        pos_embedder = TokenEmbedder.from_params(
            vocab=None, params=params.pop("pos_embedder"))
        pos_attn_encoder = Seq2VecEncoder.from_params(
            params.pop("pos_attn_encoder"))

        output_feedforward_params = params.pop('output_feedforward', None)
        output_feedforward = FeedForward.from_params(
            output_feedforward_params) if output_feedforward_params else None

        initializer = InitializerApplicator.from_params(
            params.pop('initializer', []))
        regularizer = RegularizerApplicator.from_params(
            params.pop('regularizer', []))

        params.assert_empty(cls.__name__)
        return cls(vocab=vocab,
                   text_field_embedder=text_field_embedder,
                   inter_attention=inter_attention,
                   param_dyn_encoder=param_dyn_encoder,
                   pos_embedder=pos_embedder,
                   pos_attn_encoder=pos_attn_encoder,
                   output_feedforward=output_feedforward,
                   initializer=initializer,
                   regularizer=regularizer)
 def test_can_init_dot(self):
     legacy_attention = MatrixAttention.from_params(
         Params({
             "type": "linear",
             "tensor_1_dim": 3,
             "tensor_2_dim": 3
         }))
     isinstance(legacy_attention, LinearMatrixAttention)
    def test_can_build_from_params(self):
        params = Params({
            "type": "legacy",
            "similarity_function": {
                "type": "cosine"
            }
        })
        attention = MatrixAttention.from_params(params)

        assert attention._similarity_function.__class__.__name__ == "CosineSimilarity"
 def test_can_build_from_params(self):
     params = Params({
         "type": "legacy",
         'similarity_function': {
             'type': 'cosine'
         }
     })
     attention = MatrixAttention.from_params(params)
     # pylint: disable=protected-access
     assert attention._similarity_function.__class__.__name__ == 'CosineSimilarity'
 def test_can_init_dot(self):
     legacy_attention = MatrixAttention.from_params(Params({"type": "scaled_dot_product"}))
     isinstance(legacy_attention, ScaledDotProductMatrixAttention)
 def test_can_init_cosine(self):
     legacy_attention = MatrixAttention.from_params(Params({"type": "cosine"}))
     isinstance(legacy_attention, CosineMatrixAttention)
 def test_can_init_dot(self):
     legacy_attention = MatrixAttention.from_params(Params({"type": "dot_product"}))
     isinstance(legacy_attention, DotProductMatrixAttention)
 def test_can_build_from_params(self):
     params = Params({"type": "legacy", 'similarity_function': {'type': 'cosine'}})
     attention = MatrixAttention.from_params(params)
     # pylint: disable=protected-access
     assert attention._similarity_function.__class__.__name__ == 'CosineSimilarity'
 def test_can_init_cosine(self):
     legacy_attention = MatrixAttention.from_params(
         Params({"type": "cosine"}))
     isinstance(legacy_attention, CosineMatrixAttention)
 def test_can_init_dot(self):
     legacy_attention = MatrixAttention.from_params(Params({"type": "linear",
                                                            "tensor_1_dim": 3,
                                                            "tensor_2_dim": 3}))
     isinstance(legacy_attention, LinearMatrixAttention)