class NormalizedSVD2DTest(unittest.TestCase): def setUp(self): self.tensor = DictTensor(2) self.tensor.update( nested_list_to_dict(numpy.random.random_sample((10, 12)))) self.normalized_tensor = self.tensor.normalized() self.svd = self.normalized_tensor.svd(k=3) self.u, self.svals, self.v = self.svd.u, self.svd.svals, self.svd.v def test_decomposition(self): self.assertEqual(self.u.shape[0], self.tensor.shape[0]) self.assertEqual(len(self.svals), self.u.shape[1]) self.assertEqual(len(self.svals), self.v.shape[1]) self.assertEqual(self.v.shape[0], self.tensor.shape[1]) # Assert that the singular values are decreasing for i in range(1, len(self.svals)): self.assert_(self.svals[i] < self.svals[i - 1]) def test_reconstructed(self): pass # TODO def test_orthonormality(self): assertTensorEqual(self.u.T * self.u, numpy.eye(self.u.shape[1])) assertTensorEqual(self.v.T * self.v, numpy.eye(self.u.shape[1])) def test_variance(self): return # TODO # Assert that the SVD explained some of the variance. diff_k3 = self.tensor - self.svd.reconstructed tensor_mag = self.tensor.magnitude() diff_k3_mag = diff_k3.magnitude() self.assert_(tensor_mag > diff_k3_mag) # Check that a smaller SVD explains less of the variance, but still some. svd_k1 = self.tensor.svd(k=1) diff_k1 = self.tensor - svd_k1.reconstructed diff_k1_mag = diff_k1.magnitude() self.assert_(tensor_mag > diff_k1_mag > diff_k3_mag)
class NormalizedSVD2DTest(unittest.TestCase): def setUp(self): self.tensor = DictTensor(2) self.tensor.update(nested_list_to_dict( numpy.random.random_sample((10, 12)))) self.normalized_tensor = self.tensor.normalized() self.svd = self.normalized_tensor.svd(k=3) self.u, self.svals, self.v = self.svd.u, self.svd.svals, self.svd.v def test_decomposition(self): self.assertEqual(self.u.shape[0], self.tensor.shape[0]) self.assertEqual(len(self.svals), self.u.shape[1]) self.assertEqual(len(self.svals), self.v.shape[1]) self.assertEqual(self.v.shape[0], self.tensor.shape[1]) # Assert that the singular values are decreasing for i in range(1,len(self.svals)): self.assert_(self.svals[i] < self.svals[i-1]) def test_reconstructed(self): pass # TODO def test_orthonormality(self): assertTensorEqual(self.u.T * self.u, numpy.eye(self.u.shape[1])) assertTensorEqual(self.v.T * self.v, numpy.eye(self.u.shape[1])) def test_variance(self): return # TODO # Assert that the SVD explained some of the variance. diff_k3 = self.tensor - self.svd.reconstructed tensor_mag = self.tensor.magnitude() diff_k3_mag = diff_k3.magnitude() self.assert_(tensor_mag > diff_k3_mag) # Check that a smaller SVD explains less of the variance, but still some. svd_k1 = self.tensor.svd(k=1) diff_k1 = self.tensor - svd_k1.reconstructed diff_k1_mag = diff_k1.magnitude() self.assert_(tensor_mag > diff_k1_mag > diff_k3_mag)
class SVD2DTest(unittest.TestCase): def setUp(self): self.tensor = DictTensor(2) # Note: this command actually puts 20 values in tensor! self.tensor.update(nested_list_to_dict(svd_2d_test_matrix)) self.svd = self.tensor.svd(k=3) self.incremental = self.tensor.incremental_svd(k=3, niter=200) self.u, self.svals, self.v = self.svd.u, self.svd.svals, self.svd.v def test_incremental(self): self.assertEqual(self.incremental.u.shape[0], self.tensor.shape[0]) self.assertEqual(len(self.incremental.svals), self.incremental.u.shape[1]) self.assertEqual(len(self.incremental.svals), self.incremental.v.shape[1]) self.assertEqual(self.incremental.v.shape[0], self.tensor.shape[1]) assertTensorEqual(self.incremental.u, [[0, 0, 1], [0, 1, 0], [0, 0, 0], [1, 0, 0]]) assertTensorEqual(self.incremental.v, [[0, 0, sqrt(.2)], [1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, sqrt(.8)]]) assertTensorEqual(self.incremental.svals, [4, 3, sqrt(5)]) def test_decomposition(self): self.assertEqual(self.u.shape[0], self.tensor.shape[0]) self.assertEqual(len(self.svals), self.u.shape[1]) self.assertEqual(len(self.svals), self.v.shape[1]) self.assertEqual(self.v.shape[0], self.tensor.shape[1]) assertTensorEqual(self.u, [[0, 0, 1], [0, -1, 0], [0, 0, 0], [-1, 0, 0]], abs=True) assertTensorEqual(self.v, [[0, 0, sqrt(.2)], [-1, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, sqrt(.8)]], abs=True) assertTensorEqual(self.svals, [4, 3, sqrt(5)]) def test_reconstructed(self): assertTensorEqual(self.svd.reconstructed, [[1, 0, 0, 0, 2], [0, 0, 3, 0, 0], [0, 0, 0, 0, 0], [0, 4, 0, 0, 0]]) assertTensorEqual(self.svd.reconstructed[1,:], [0, 0, 3, 0, 0]) assertTensorEqual(self.svd.reconstructed[:,2], [0, 3, 0, 0]) def test_orthonormality(self): identity = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] assertTensorEqual(self.u.T * self.u, identity) assertTensorEqual(self.v.T * self.v, identity) def test_variance(self): # Assert that the SVD explained some of the variance. diff_k3 = self.tensor - self.svd.reconstructed tensor_mag = self.tensor.magnitude() diff_k3_mag = diff_k3.magnitude() self.assert_(tensor_mag > diff_k3_mag) # Check that a smaller SVD explains less of the variance, but still some. svd_k1 = self.tensor.svd(k=1) diff_k1 = self.tensor - svd_k1.reconstructed diff_k1_mag = diff_k1.magnitude() self.assert_(tensor_mag > diff_k1_mag > diff_k3_mag)
class SVD2DTest(unittest.TestCase): def setUp(self): self.tensor = DictTensor(2) # Note: this command actually puts 20 values in tensor! self.tensor.update(nested_list_to_dict(svd_2d_test_matrix)) self.svd = self.tensor.svd(k=3) self.incremental = self.tensor.incremental_svd(k=3, niter=200) self.u, self.svals, self.v = self.svd.u, self.svd.svals, self.svd.v def test_incremental(self): self.assertEqual(self.incremental.u.shape[0], self.tensor.shape[0]) self.assertEqual(len(self.incremental.svals), self.incremental.u.shape[1]) self.assertEqual(len(self.incremental.svals), self.incremental.v.shape[1]) self.assertEqual(self.incremental.v.shape[0], self.tensor.shape[1]) assertTensorEqual(self.incremental.u, [[0, 0, 1], [0, 1, 0], [0, 0, 0], [1, 0, 0]]) assertTensorEqual(self.incremental.v, [[0, 0, sqrt(.2)], [1, 0, 0], [0, 1, 0], [0, 0, 0], [0, 0, sqrt(.8)]]) assertTensorEqual(self.incremental.svals, [4, 3, sqrt(5)]) def test_decomposition(self): self.assertEqual(self.u.shape[0], self.tensor.shape[0]) self.assertEqual(len(self.svals), self.u.shape[1]) self.assertEqual(len(self.svals), self.v.shape[1]) self.assertEqual(self.v.shape[0], self.tensor.shape[1]) assertTensorEqual(self.u, [[0, 0, 1], [0, -1, 0], [0, 0, 0], [-1, 0, 0]], abs=True) assertTensorEqual(self.v, [[0, 0, sqrt(.2)], [-1, 0, 0], [0, -1, 0], [0, 0, 0], [0, 0, sqrt(.8)]], abs=True) assertTensorEqual(self.svals, [4, 3, sqrt(5)]) def test_reconstructed(self): assertTensorEqual(self.svd.reconstructed, [[1, 0, 0, 0, 2], [0, 0, 3, 0, 0], [0, 0, 0, 0, 0], [0, 4, 0, 0, 0]]) assertTensorEqual(self.svd.reconstructed[1, :], [0, 0, 3, 0, 0]) assertTensorEqual(self.svd.reconstructed[:, 2], [0, 3, 0, 0]) def test_orthonormality(self): identity = [[1, 0, 0], [0, 1, 0], [0, 0, 1]] assertTensorEqual(self.u.T * self.u, identity) assertTensorEqual(self.v.T * self.v, identity) def test_variance(self): # Assert that the SVD explained some of the variance. diff_k3 = self.tensor - self.svd.reconstructed tensor_mag = self.tensor.magnitude() diff_k3_mag = diff_k3.magnitude() self.assert_(tensor_mag > diff_k3_mag) # Check that a smaller SVD explains less of the variance, but still some. svd_k1 = self.tensor.svd(k=1) diff_k1 = self.tensor - svd_k1.reconstructed diff_k1_mag = diff_k1.magnitude() self.assert_(tensor_mag > diff_k1_mag > diff_k3_mag)
class DictTensorTest(unittest.TestCase): slice_testcase = [[1, None, None], [None, 2, 3], [4, None, None], [None, 5, None]] def test_initial(self): self.assertEqual(len(self.tensor), 0) self.assertEqual(len(self.tensor.keys()), 0) assert_dims_consistent(self.tensor) self.assertEqual(self.tensor.shape, (0, 0)) assert isinstance(self.tensor[4, 5], (float, int, long)) self.assertEqual(self.tensor[5, 5], 0) self.assertEqual(self.tensor[2, 7], 0) def test_storage(self): self.tensor[5, 5] = 1 self.tensor[2, 7] = 2 assertTensorEqual( self.tensor, [[None] * 8, [None] * 8, [None] * 7 + [2], [None] * 8, [None] * 8, [None] * 5 + [1, None, None]]) def test_slice(self): self.tensor.update( nones_removed(nested_list_to_dict(self.slice_testcase))) # Test end conditions: start index # is included in slice, end index is not slice = self.tensor[1:3, 0:2] assertTensorEqual(slice, [[None, 2], [4, None]]) # Test that slicing on some dims correctly # reduces the dimensionality of the tensor slice = self.tensor[3, :] assertTensorEqual(slice, [None, 5, None]) # Test the step parameter slice = self.tensor[1:4:2, :] assertTensorEqual(slice, [[None, 2, 3], [None, 5, None]]) def test_transpose(self): self.tensor[0, 0] = 1 self.tensor[1, 2] = 3 self.tensor[2, 0] = 4 self.tensor[3, 1] = 5 t = self.tensor.transpose() assertTensorEqual( t, [[1, None, 4, None], [None, None, None, 5], [None, 3, None, None]]) def test_delete(self): self.tensor.update( nones_removed(nested_list_to_dict(self.slice_testcase))) assertTensorEqual(self.tensor, self.slice_testcase) del self.tensor[0, 0] assertTensorEqual(self.tensor, [[None, None, None], [None, 2, 3], [4, None, None], [None, 5, None]]) def test_contains(self): self.tensor[1, 2] = 1 self.tensor[4, 5] = 2 self.assertTrue((1, 2) in self.tensor) self.assertTrue(self.tensor.has_key((1, 2))) self.assertFalse((4, 2) in self.tensor) self.assertFalse((1, 5) in self.tensor) def setUp(self): self.tensor = DictTensor(2) def test_1D(self): tensor_1D = DictTensor(1) tensor_1D[2] = 1 assertTensorEqual(tensor_1D, [None, None, 1]) def test_combine_by_element(self): t1 = DictTensor(2) t2 = DictTensor(2) t1[1, 1] = 1 t1[1, 0] = 2 t2[1, 1] = 4 t2[0, 1] = 5 t3 = t1.combine_by_element(t2, lambda x, y: x + (2 * y)) assertTensorEqual(t3, [[None, 10], [2, 9]]) # Make sure errors are raised when the tensors don't have the # same shape or number of dimensions t4 = DictTensor(2) t4[0, 2] = 3 t4[1, 0] = 5 self.assertRaises( IndexError, lambda: t1.combine_by_element(t4, lambda x, y: x + y)) t4 = DictTensor(3) self.assertRaises( IndexError, lambda: t1.combine_by_element(t4, lambda x, y: x + y)) def testAdd(self): t1 = DictTensor(2) t2 = DictTensor(2) t1[0, 0] = 1 t1[1, 1] = 1 t1[1, 0] = 2 t2[2, 1] = 4 t2[1, 0] = 5 t3 = t1 + t2 assertTensorEqual(t3, [[1, None], [7, 1], [None, 4]]) def testICmul(self): t1 = tensor_from_nested_list([[1, 2], [3, 4]]) assertTensorEqual(t1, [[1, 2], [3, 4]]) t1 *= 2 assertTensorEqual(t1, [[2, 4], [6, 8]]) def testICdiv(self): t1 = tensor_from_nested_list([[2, 4], [6, 8]]) t1 /= 2 assertTensorEqual(t1, [[1, 2], [3, 4]]) def testReprOfEmpty(self): repr(self.tensor) self.tensor.example_key() def testNorm(self): norm_test = [[0, 0, 0], [0, 1, 0], [0, 5.0, 0]] self.tensor.update(nested_list_to_dict(norm_test)) self.assertEqual(self.tensor.norm(), sqrt(26.0)) self.assertEqual(self.tensor.magnitude(), sqrt(26.0))
class DictTensorTest(unittest.TestCase): slice_testcase = [[1, None, None], [None, 2, 3 ], [4, None, None], [None, 5, None]] def test_initial(self): self.assertEqual(len(self.tensor), 0) self.assertEqual(len(self.tensor.keys()), 0) assert_dims_consistent(self.tensor) self.assertEqual(self.tensor.shape, (0, 0)) assert isinstance(self.tensor[4, 5], (float, int, long)) self.assertEqual(self.tensor[5, 5], 0) self.assertEqual(self.tensor[2, 7], 0) def test_storage(self): self.tensor[5, 5] = 1 self.tensor[2, 7] = 2 assertTensorEqual(self.tensor, [[None]*8, [None]*8, [None]*7 + [2], [None]*8, [None]*8, [None]*5 + [1, None, None]]) def test_slice(self): self.tensor.update(nones_removed(nested_list_to_dict(self.slice_testcase))) # Test end conditions: start index # is included in slice, end index is not slice = self.tensor[1:3, 0:2] assertTensorEqual(slice, [[None, 2], [4, None]]) # Test that slicing on some dims correctly # reduces the dimensionality of the tensor slice = self.tensor[3, :] assertTensorEqual(slice, [None, 5, None]) # Test the step parameter slice = self.tensor[1:4:2, :] assertTensorEqual(slice, [[None, 2, 3], [None, 5, None]]) def test_transpose(self): self.tensor[0, 0] = 1 self.tensor[1, 2] = 3 self.tensor[2, 0] = 4 self.tensor[3, 1] = 5 t = self.tensor.transpose() assertTensorEqual(t, [[1, None, 4, None], [None, None, None, 5], [None, 3, None, None]]) def test_delete(self): self.tensor.update(nones_removed(nested_list_to_dict(self.slice_testcase))) assertTensorEqual(self.tensor, self.slice_testcase) del self.tensor[0,0] assertTensorEqual(self.tensor, [[None, None, None], [None, 2, 3 ], [4, None, None], [None, 5, None]]) def test_contains(self): self.tensor[1,2] = 1 self.tensor[4,5] = 2 self.assertTrue((1,2) in self.tensor) self.assertTrue(self.tensor.has_key((1,2))) self.assertFalse((4,2) in self.tensor) self.assertFalse((1,5) in self.tensor) def setUp(self): self.tensor = DictTensor(2) def test_1D(self): tensor_1D = DictTensor(1) tensor_1D[2] = 1 assertTensorEqual(tensor_1D, [None, None, 1]) def test_combine_by_element(self): t1 = DictTensor(2) t2 = DictTensor(2) t1[1, 1] = 1 t1[1, 0] = 2 t2[1, 1] = 4 t2[0, 1] = 5 t3 = t1.combine_by_element(t2, lambda x, y: x + (2*y)) assertTensorEqual(t3, [[None, 10], [2, 9]]) # Make sure errors are raised when the tensors don't have the # same shape or number of dimensions t4 = DictTensor(2) t4[0, 2] = 3 t4[1, 0] = 5 self.assertRaises(IndexError, lambda: t1.combine_by_element(t4, lambda x, y: x + y)) t4 = DictTensor(3) self.assertRaises(IndexError, lambda: t1.combine_by_element(t4, lambda x, y: x + y)) def testAdd(self): t1 = DictTensor(2) t2 = DictTensor(2) t1[0, 0] = 1 t1[1, 1] = 1 t1[1, 0] = 2 t2[2, 1] = 4 t2[1, 0] = 5 t3 = t1 + t2 assertTensorEqual(t3, [[1, None], [7, 1], [None, 4]]) def testICmul(self): t1 = tensor_from_nested_list([[1, 2], [3, 4]]) assertTensorEqual(t1, [[1, 2], [3, 4]]) t1 *= 2 assertTensorEqual(t1, [[2, 4], [6, 8]]) def testICdiv(self): t1 = tensor_from_nested_list([[2, 4], [6, 8]]) t1 /= 2 assertTensorEqual(t1, [[1, 2], [3, 4]]) def testReprOfEmpty(self): repr(self.tensor) self.tensor.example_key() def testNorm(self): norm_test = [[0,0,0], [0,1,0], [0,5.0,0]] self.tensor.update(nested_list_to_dict(norm_test)) self.assertEqual(self.tensor.norm(), sqrt(26.0)) self.assertEqual(self.tensor.magnitude(), sqrt(26.0))