def test_can_init_scaled_dot(self):
     legacy_attention = Attention.from_params(
         Params({
             "type": "scaled_dot_product",
             "scaling_factor": 9
         }))
     isinstance(legacy_attention, ScaledDotProductAttention)
Ejemplo n.º 2
0
 def test_can_init_linear(self):
     legacy_attention = Attention.from_params(
         Params({
             "type": "linear",
             "tensor_1_dim": 3,
             "tensor_2_dim": 3
         }))
     isinstance(legacy_attention, LinearAttention)
 def test_can_init_cosine(self):
     legacy_attention = Attention.from_params(Params({"type": "cosine"}))
     isinstance(legacy_attention, CosineAttention)
 def test_can_init_legacy(self):
     legacy_attention = Attention.from_params(Params({"type": "legacy"}))
     isinstance(legacy_attention, LegacyAttention)
Ejemplo n.º 5
0
 def test_can_init_dot(self):
     legacy_attention = Attention.from_params(
         Params({"type": "dot_product"}))
     isinstance(legacy_attention, DotProductAttention)
 def test_can_init_dot(self):
     legacy_attention = Attention.from_params(Params({"type": "dot_product"}))
     isinstance(legacy_attention, DotProductAttention)
Ejemplo n.º 7
0
 def test_can_init_cosine(self):
     legacy_attention = Attention.from_params(Params({"type": "cosine"}))
     isinstance(legacy_attention, CosineAttention)
Ejemplo n.º 8
0
 def test_can_init_linear(self):
     legacy_attention = Attention.from_params(Params({"type": "linear",
                                                      "tensor_1_dim": 3,
                                                      "tensor_2_dim": 3}))
     isinstance(legacy_attention, LinearAttention)