def explained_variance(self): '''Computes the explained variance score for a tensor decomposition. Inspired on the function in sklearn.metrics.explained_variance_score. Returns ------- explained_variance : float Explained variance score for a tnesor factorization. ''' assert self.tl_object is not None, "Must run compute_tensor_factorization before using this method." tensor = self.tensor rec_tensor = self.tl_object.to_tensor() mask = self.mask if mask is not None: tensor = tensor * mask rec_tensor = tensor * mask y_diff_avg = tl.mean(tensor - rec_tensor) numerator = tl.norm(tensor - rec_tensor - y_diff_avg) tensor_avg = tl.mean(tensor) denominator = tl.norm(tensor - tensor_avg) if denominator == 0.: explained_variance = 0.0 else: explained_variance = 1. - (numerator / denominator) explained_variance = explained_variance.item() return explained_variance
def test_cp_flip_sign(): shape = (3, 4, 5) rank = 4 cp_tensor = random_cp(shape, rank) weights, factors = cp_flip_sign(cp_tensor) assert_(tl.all(tl.mean(factors[1], axis=0) > 0)) assert_(tl.all(tl.mean(factors[2], axis=0) > 0)) assert_equal(cp_tensor.rank, cp_tensor.rank) assert_array_equal(cp_tensor.weights, weights) assert_array_almost_equal(cp_to_tensor((weights, factors)), cp_to_tensor(cp_tensor))
def higher_order_moment(tensor, order): """Computes the Higher-Order Momemt Parameters ---------- tensor : 2D-tensor -- or ND-tensor matrix of size (n_samples, n_features) or tensor of size(n_samples, D1, ..., DN) order : int order of the higher-order moment to compute Returns ------- tensor : moment if tensor is a matrix of size (n_samples, n_features), tensor of size (n_features, )*order """ moment = tensor for _ in range(order - 1): moment = batched_outer(moment, tensor) return tl.mean(moment, axis=0)
def make_svd_non_negative(tensor, U, S, V, nntype): """ Use NNDSVD method to transform SVD results into a non-negative form. This method leads to more efficient solving with NNMF [1]. Parameters ---------- tensor : tensor being decomposed U, S, V: SVD factorization results nntype : {'nndsvd', 'nndsvda'} Whether to fill small values with 0.0 (nndsvd), or the tensor mean (nndsvda, default). [1]: Boutsidis & Gallopoulos. Pattern Recognition, 41(4): 1350-1362, 2008. """ # NNDSVD initialization W = tl.zeros_like(U) H = tl.zeros_like(V) # The leading singular triplet is non-negative # so it can be used as is for initialization. W = tl.index_update(W, tl.index[:, 0], tl.sqrt(S[0]) * tl.abs(U[:, 0])) H = tl.index_update(H, tl.index[0, :], tl.sqrt(S[0]) * tl.abs(V[0, :])) for j in range(1, tl.shape(U)[1]): x, y = U[:, j], V[j, :] # extract positive and negative parts of column vectors x_p, y_p = tl.clip(x, a_min=0.0), tl.clip(y, a_min=0.0) x_n, y_n = tl.abs(tl.clip(x, a_max=0.0)), tl.abs(tl.clip(y, a_max=0.0)) # and their norms x_p_nrm, y_p_nrm = tl.norm(x_p), tl.norm(y_p) x_n_nrm, y_n_nrm = tl.norm(x_n), tl.norm(y_n) m_p, m_n = x_p_nrm * y_p_nrm, x_n_nrm * y_n_nrm # choose update if m_p > m_n: u = x_p / x_p_nrm v = y_p / y_p_nrm sigma = m_p else: u = x_n / x_n_nrm v = y_n / y_n_nrm sigma = m_n lbd = tl.sqrt(S[j] * sigma) W = tl.index_update(W, tl.index[:, j], lbd * u) H = tl.index_update(H, tl.index[j, :], lbd * v) # After this point we no longer need H eps = tl.eps(tensor.dtype) if nntype == "nndsvd": W = soft_thresholding(W, eps) elif nntype == "nndsvda": avg = tl.mean(tensor) W = tl.where(W < eps, tl.ones(tl.shape(W), **tl.context(W)) * avg, W) else: raise ValueError( 'Invalid nntype parameter: got %r instead of one of %r' % (nntype, ('nndsvd', 'nndsvda'))) return W