def test_missingness(algname): # Random data tensor. shape = (15, 16, 17) rank = 3 if algname == "mcp_als": X = tt.randn_ktensor(shape, rank=rank, random_state=data_seed).full() elif algname == "ncp_hals": X = tt.rand_ktensor(shape, rank=rank, random_state=data_seed).full() # Random missingness mask. mask = np.random.binomial(1, .5, size=X.shape).astype(bool) # Create second tensor with corrupted entries. Y = X.copy() Y[~mask] = 999. # Algorithm fitting options. options = dict(rank=rank, mask=mask, verbose=False, tol=1e-6, random_state=alg_seed) # Fit decompositions for both X and Y. resultX = getattr(tt, algname)(X, **options) resultY = getattr(tt, algname)(Y, **options) # Test that learning curves are identical. assert np.allclose(resultX.obj_hist, resultY.obj_hist) # Test that final factors are identical. for uX, uY in zip(resultX.factors, resultY.factors): assert np.allclose(uX, uY)
def cp_als_test(): # Create synthetic dataset. I, J, K, R = 25, 25, 25, 3 # dimensions and rank parameters # Create a random tensor consisting of a low-rank component and noise. X = tensortools.randn_ktensor((I, J, K), rank=R).full() X += np.random.randn(I, J, K) # add some random noise # Perform CP tensor decomposition. U = tensortools.cp_als(X, rank=R, verbose=True) V = tensortools.cp_als(X, rank=R, verbose=True) # Compare the low-dimensional factors from the two fits. fig, _, _ = tensortools.plot_factors(U.factors) tensortools.plot_factors(V.factors, fig=fig) # Align the two fits and print a similarity score. similarity_score = tensortools.kruskal_align(U.factors, V.factors, permute_U=True, permute_V=True) print(similarity_score) # Plot the results to see alignment. fig, ax, po = tensortools.plot_factors(U.factors) tensortools.plot_factors(V.factors, fig=fig) plt.show()
def test_align(): # Generate random KTensor. I, J, K, R = 15, 16, 17, 4 U = tt.randn_ktensor((I, J, K), rank=R) X = U.full() # Dense representation of U. # Enumerate all permutations of factors and test that # kruskal_align appropriately inverts this permutation. for prm in itertools.permutations(range(R)): V = U.copy() V.permute(prm) assert (tt.kruskal_align(U, V) - 1) < atol_float64 assert linalg.norm(X - U.full()) < atol_float64 assert linalg.norm(X - V.full()) < atol_float64 # Test that second input to kruskal_align is correctly permuted. for prm in itertools.permutations(range(R)): V = U.copy() V.permute(prm) tt.kruskal_align(U, V, permute_V=True) for fU, fV in zip(U, V): assert linalg.norm(fU - fV) < atol_float64 assert linalg.norm(X - U.full()) < atol_float64 assert linalg.norm(X - V.full()) < atol_float64 # Test that first input to kruskal_align is correctly permuted. for prm in itertools.permutations(range(R)): V = U.copy() V.permute(prm) tt.kruskal_align(V, U, permute_U=True) for fU, fV in zip(U, V): assert linalg.norm(fU - fV) < atol_float64 assert linalg.norm(X - U.full()) < atol_float64 assert linalg.norm(X - V.full()) < atol_float64
def test_objective_decreases(algname, shape, rank): # Generate data. If algorithm is made for nonnegative tensor decomposition # then generate nonnegative data. if algname in ['ncp_hals, ncp_bcd']: X = tt.rand_ktensor(shape, rank=rank, random_state=data_seed).full() else: X = tt.randn_ktensor(shape, rank=rank, random_state=data_seed).full() # Fit model. f = getattr(tt, algname) result = f(X, rank=rank, verbose=False, tol=1e-6, random_state=alg_seed) # Test that objective function monotonically decreases. assert np.all(np.diff(result.obj_hist) < obj_decreased_tol)
def test_objective_decreases(algname, shape, rank): # Generate data. If algorithm is made for nonnegative tensor decomposition # then generate nonnegative data. if algname in ['ncp_hals, ncp_bcd']: X = tt.rand_ktensor(shape, rank=rank, random_state=data_seed).full() else: X = tt.randn_ktensor(shape, rank=rank, random_state=data_seed).full() # Algorithm fitting options. options = dict(rank=rank, verbose=False, tol=1e-6, random_state=alg_seed) # Add special options for particular algorithms. if algname == 'mcp_als': options['mask'] = np.ones_like(X).astype(bool) # Fit model. result = getattr(tt, algname)(X, **options) # Test that objective function monotonically decreases. assert np.all(np.diff(result.obj_hist) < obj_decreased_tol)
def test_nonneg(algname, neg_modes): # Random data tensor. shape = (15, 16, 17) rank = 3 X = tt.randn_ktensor(shape, rank=rank, random_state=data_seed).full() # Algorithm fitting options. options = dict(rank=rank, negative_modes=neg_modes, verbose=False, tol=1e-6, random_state=alg_seed) # Fit decomposition. result = getattr(tt, algname)(X, **options) for mode, factor in enumerate(result.factors): if mode in neg_modes: assert factor.min() < 0 # this should be true for most datasets else: assert factor.min() >= 0
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Jan 21 13:07:43 2019 @author: abuzarmahmood """ import tensortools as tt import numpy as np import matplotlib.pyplot as plt # Make synthetic dataset. I, J, K, R = 25, 25, 25, 4 # dimensions and rank X = tt.randn_ktensor((I, J, K), rank=R).full() X += np.random.randn(I, J, K) # add noise # Fit CP tensor decomposition (two times). U = tt.cp_als(X, rank=R, verbose=True) V = tt.cp_als(X, rank=R, verbose=True) # Compare the low-dimensional factors from the two fits. fig, _, _ = tt.plot_factors(U.factors) tt.plot_factors(V.factors, fig=fig) # Align the two fits and print a similarity score. sim = tt.kruskal_align(U.factors, V.factors, permute_U=True, permute_V=True) print(sim) # Plot the results again to see alignment. fig, ax, po = tt.plot_factors(U.factors)