Esempio n. 1
0
 def test_forward_works_with_subtract_combinations(self):
     linear = LinearSimilarity(2, 2, combination='x-y')
     linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
     linear._bias = Parameter(torch.FloatTensor([0]))
     a_vectors = Variable(torch.FloatTensor([[1, 1], [-1, -1]]))
     b_vectors = Variable(torch.FloatTensor([[1, 0], [0, 1]]))
     result = linear(a_vectors, b_vectors).data.numpy()
     assert result.shape == (2,)
     assert_almost_equal(result, [.5, -.7])
Esempio n. 2
0
 def test_forward_does_a_weighted_product(self):
     linear = LinearSimilarity(3, 1, combination='x,y')
     linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5, 2.0, -1.0]))
     linear._bias = Parameter(torch.FloatTensor([.1]))
     a_vectors = torch.FloatTensor([[[1, 1, 1], [-1, -1, 0]]])
     b_vectors = torch.FloatTensor([[[0], [1]]])
     result = linear(Variable(a_vectors), Variable(b_vectors)).data.numpy()
     assert result.shape == (1, 2,)
     assert_almost_equal(result, [[2.3, -1.1]])
Esempio n. 3
0
 def test_forward_works_with_divide_combinations(self):
     linear = LinearSimilarity(2, 2, combination='x/y')
     linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
     linear._bias = Parameter(torch.FloatTensor([0]))
     a_vectors = torch.FloatTensor([[1, 1], [-1, -1]])
     b_vectors = torch.FloatTensor([[1, 2], [2, 1]])
     result = linear(a_vectors, b_vectors).data.numpy()
     assert result.shape == (2,)
     assert_almost_equal(result, [-.05, -.35])
 def test_forward_works_with_subtract_combinations(self):
     linear = LinearSimilarity(2, 2, combination='x-y')
     linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
     linear._bias = Parameter(torch.FloatTensor([0]))
     a_vectors = torch.FloatTensor([[1, 1], [-1, -1]])
     b_vectors = torch.FloatTensor([[1, 0], [0, 1]])
     result = linear(a_vectors, b_vectors).data.numpy()
     assert result.shape == (2, )
     assert_almost_equal(result, [.5, -.7])
Esempio n. 5
0
 def test_forward_works_with_divide_combinations(self):
     linear = LinearSimilarity(2, 2, combination='x/y')
     linear._weight_vector = Parameter(torch.FloatTensor([-.3, .5]))
     linear._bias = Parameter(torch.FloatTensor([0]))
     a_vectors = Variable(torch.FloatTensor([[1, 1], [-1, -1]]))
     b_vectors = Variable(torch.FloatTensor([[1, 2], [2, 1]]))
     result = linear(a_vectors, b_vectors).data.numpy()
     assert result.shape == (2, )
     assert_almost_equal(result, [-.05, -.35])
Esempio n. 6
0
 def test_forward_does_a_weighted_product(self):
     linear = LinearSimilarity(3, 1, combination="x,y")
     linear._weight_vector = Parameter(
         torch.FloatTensor([-0.3, 0.5, 2.0, -1.0]))
     linear._bias = Parameter(torch.FloatTensor([0.1]))
     a_vectors = torch.FloatTensor([[[1, 1, 1], [-1, -1, 0]]])
     b_vectors = torch.FloatTensor([[[0], [1]]])
     result = linear(a_vectors, b_vectors).data.numpy()
     assert result.shape == (1, 2)
     assert_almost_equal(result, [[2.3, -1.1]])
Esempio n. 7
0
 def test_forward_works_with_higher_order_tensors(self):
     linear = LinearSimilarity(7, 7, combination='x,y')
     weights = numpy.random.rand(14)
     linear._weight_vector = Parameter(torch.from_numpy(weights).float())
     linear._bias = Parameter(torch.FloatTensor([0.]))
     a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
     b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
     result = linear(Variable(torch.from_numpy(a_vectors).float()),
                     Variable(torch.from_numpy(b_vectors).float()))
     result = result.data.numpy()
     assert result.shape == (5, 4, 3, 6)
     combined_vectors = numpy.concatenate([a_vectors[3, 2, 1, 3, :], b_vectors[3, 2, 1, 3, :]])
     expected_result = numpy.dot(combined_vectors, weights)
     assert_almost_equal(result[3, 2, 1, 3], expected_result, decimal=6)
Esempio n. 8
0
 def test_forward_works_with_higher_order_tensors(self):
     linear = LinearSimilarity(7, 7, combination='x,y')
     weights = numpy.random.rand(14)
     linear._weight_vector = Parameter(torch.from_numpy(weights).float())
     linear._bias = Parameter(torch.FloatTensor([0.]))
     a_vectors = numpy.random.rand(5, 4, 3, 6, 7)
     b_vectors = numpy.random.rand(5, 4, 3, 6, 7)
     result = linear(Variable(torch.from_numpy(a_vectors).float()),
                     Variable(torch.from_numpy(b_vectors).float()))
     result = result.data.numpy()
     assert result.shape == (5, 4, 3, 6)
     combined_vectors = numpy.concatenate(
         [a_vectors[3, 2, 1, 3, :], b_vectors[3, 2, 1, 3, :]])
     expected_result = numpy.dot(combined_vectors, weights)
     assert_almost_equal(result[3, 2, 1, 3], expected_result, decimal=6)