def test_forward_does_a_bilinear_product(self):
     # pylint: disable=protected-access
     bilinear = BilinearSimilarity(2, 2)
     bilinear._weight_matrix = Parameter(torch.FloatTensor([[-.3, .5], [2.0, -1.0]]))
     bilinear._bias = Parameter(torch.FloatTensor([.1]))
     a_vectors = Variable(torch.FloatTensor([[1, 1], [-1, -1]]))
     b_vectors = Variable(torch.FloatTensor([[1, 0], [0, 1]]))
     result = bilinear(a_vectors, b_vectors).data.numpy()
     assert result.shape == (2,)
     assert_almost_equal(result, [1.8, .6])
Exemple #2
0
 def test_forward_does_a_bilinear_product(self):
     # pylint: disable=protected-access
     bilinear = BilinearSimilarity(2, 2)
     bilinear._weight_matrix = Parameter(
         torch.FloatTensor([[-.3, .5], [2.0, -1.0]]))
     bilinear._bias = Parameter(torch.FloatTensor([.1]))
     a_vectors = torch.FloatTensor([[1, 1], [-1, -1]])
     b_vectors = torch.FloatTensor([[1, 0], [0, 1]])
     result = bilinear(a_vectors, b_vectors).data.numpy()
     assert result.shape == (2, )
     assert_almost_equal(result, [1.8, .6])
 def test_forward_works_with_higher_order_tensors(self):
     # pylint: disable=protected-access
     bilinear = BilinearSimilarity(4, 7)
     weights = numpy.random.rand(4, 7)
     bilinear._weight_matrix = Parameter(torch.from_numpy(weights).float())
     bilinear._bias = Parameter(torch.from_numpy(numpy.asarray([0])).float())
     a_vectors = numpy.random.rand(5, 4, 3, 6, 4)
     b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
     a_variables = Variable(torch.from_numpy(a_vectors).float())
     b_variables = Variable(torch.from_numpy(b_vectors).float())
     result = bilinear(a_variables, b_variables).data.numpy()
     assert result.shape == (5, 4, 3, 6)
     expected_result = numpy.dot(numpy.dot(numpy.transpose(a_vectors[3, 2, 1, 3]), weights),
                                 b_vectors[3, 2, 1, 3])
     assert_almost_equal(result[3, 2, 1, 3], expected_result, decimal=5)
 def test_can_construct_from_params(self):
     params = Params({
             'tensor_1_dim': 3,
             'tensor_2_dim': 4
             })
     bilinear = BilinearSimilarity.from_params(params)
     assert list(bilinear._weight_matrix.size()) == [3, 4]  # pylint: disable=protected-access
Exemple #5
0
 def test_forward_works_with_higher_order_tensors(self):
     # pylint: disable=protected-access
     bilinear = BilinearSimilarity(4, 7)
     weights = numpy.random.rand(4, 7)
     bilinear._weight_matrix = Parameter(torch.from_numpy(weights).float())
     bilinear._bias = Parameter(
         torch.from_numpy(numpy.asarray([0])).float())
     a_vectors = numpy.random.rand(5, 4, 3, 6, 4)
     b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
     a_variables = torch.from_numpy(a_vectors).float()
     b_variables = torch.from_numpy(b_vectors).float()
     result = bilinear(a_variables, b_variables).data.numpy()
     assert result.shape == (5, 4, 3, 6)
     expected_result = numpy.dot(
         numpy.dot(numpy.transpose(a_vectors[3, 2, 1, 3]), weights),
         b_vectors[3, 2, 1, 3])
     assert_almost_equal(result[3, 2, 1, 3], expected_result, decimal=5)
Exemple #6
0
 def __init__(self,
              vocab: Vocabulary,
              text_field_embedder: TextFieldEmbedder,
              pos_tag_embedding: Embedding = None,
              users_embedding: Embedding = None,
              dropout: float = 0.1,
              label_namespace: str = "labels",
              initializer: InitializerApplicator = InitializerApplicator(),
              regularizer: RegularizerApplicator = None) -> None:
     super().__init__(vocab, regularizer)
     self._label_namespace = label_namespace
     self._dropout = Dropout(dropout)
     self._text_field_embedder = text_field_embedder
     self._pos_tag_embedding = pos_tag_embedding or None
     representation_dim = self._text_field_embedder.get_output_dim()
     if pos_tag_embedding is not None:
         representation_dim += self._pos_tag_embedding.get_output_dim()
     self._report_cnn = CnnEncoder(representation_dim, 25)
     self._comment_cnn = CnnEncoder(representation_dim, 25)
     lstm_input_dim = self._comment_cnn.get_output_dim()
     self._user_embedding = users_embedding or None
     if users_embedding is not None:
         lstm_input_dim += self._user_embedding.get_output_dim()
     rnn = nn.LSTM(input_size=lstm_input_dim,
                   hidden_size=150,
                   batch_first=True,
                   bidirectional=True)
     self._encoder = PytorchSeq2SeqWrapper(rnn)
     self._seq2vec = CnnEncoder(self._encoder.get_output_dim(), 25)
     self._num_class = self.vocab.get_vocab_size(self._label_namespace)
     self._bilinear_sim = BilinearSimilarity(self._encoder.get_output_dim(),
                                             self._encoder.get_output_dim())
     self._projector = FeedForward(self._seq2vec.get_output_dim(), 2,
                                   [50, self._num_class],
                                   Activation.by_name("sigmoid")(), dropout)
     self._golden_instances = None
     self._golden_instances_labels = None
     self._golden_instances_id = None
     self._metrics = {
         "accuracy":
         CategoricalAccuracy(),
         "f-measure":
         F1Measure(
             positive_label=vocab.get_token_index("feature", "labels")),
     }
     self._loss = torch.nn.CrossEntropyLoss()
     self._contrastive_loss = ContrastiveLoss()
     self._mse_loss = torch.nn.MSELoss()
     initializer(self)
Exemple #7
0
        torch.nn.LSTM(EMBEDDING_DIM, HIDDEN_DIM, batch_first=True))
    # esim = PytorchSeq2SeqWrapper(torch.nn.ESIM(EMBEDDING_DIM, HIDDEN_DIM, batch_first=True))

    encoder_dim = word_embeddings.get_output_dim()

    projection_feedforward = FeedForward(encoder_dim * 4, 1,
                                         inference.get_input_dim(),
                                         Activation.by_name("elu")())

    # (batch_size, model_dim * 2 * 4)
    output_feedforward = FeedForward(lstm.get_output_dim() * 4, 1, 2,
                                     Activation.by_name("elu")())

    output_logit = torch.nn.Linear(in_features=2, out_features=2)

    simfunc = BilinearSimilarity(encoder_dim, encoder_dim)

    model = ESIM(vocab=vocab,
                 text_field_embedder=word_embeddings,
                 encoder=lstm,
                 inference_encoder=inference,
                 similarity_function=simfunc,
                 projection_feedforward=projection_feedforward,
                 output_feedforward=output_feedforward,
                 output_logit=output_logit)

    if torch.cuda.is_available():
        cuda_device = 0

        model = model.cuda(cuda_device)
    else:
Exemple #8
0
 def test_can_construct_from_params(self):
     params = Params({'tensor_1_dim': 3, 'tensor_2_dim': 4})
     bilinear = BilinearSimilarity.from_params(params)
     assert list(bilinear._weight_matrix.size()) == [3, 4]  # pylint: disable=protected-access
Exemple #9
0
 def test_weights_are_correct_sizes(self):
     # pylint: disable=protected-access
     bilinear = BilinearSimilarity(tensor_1_dim=5, tensor_2_dim=2)
     assert list(bilinear._weight_matrix.size()) == [5, 2]
     assert list(bilinear._bias.size()) == [1]
 def test_can_construct_from_params(self):
     params = Params({"tensor_1_dim": 3, "tensor_2_dim": 4})
     bilinear = BilinearSimilarity.from_params(params)
     assert list(bilinear._weight_matrix.size()) == [3, 4]
    def test_weights_are_correct_sizes(self):

        bilinear = BilinearSimilarity(tensor_1_dim=5, tensor_2_dim=2)
        assert list(bilinear._weight_matrix.size()) == [5, 2]
        assert list(bilinear._bias.size()) == [1]