def test_fastgsss(self, device, dtype, cheb): N = 1000 ds = 0.1 order = 12 M = 10 g = random_graph(N, ds, dtype=dtype, device=device) S, T = fastgsss(g, M, N // 10, order=order, cheby=cheb) print(S)
def test_estimater_lk_on_minnesota(self, dtype, device): N = 100 g = random_graph(N, dtype=dtype, device=device) lmax = g.max_frequency(lap_type="comb") print(lmax) band_limit = 10 lambda_k, cum_coh = estimate_lk( g, band_limit, lmax=lmax, lap_type="comb", verbose=False, num_estimation=1 ) print(lambda_k) print(cum_coh)
def test_rsbs(self, dtype, device, return_list): N = 100 k = 50 M = 30 appropriate_num_rv = np.int32(2 * np.round(np.log(N))) g = random_graph(N, dtype=dtype, device=device) nodes, coh = rsbs(g, M, k, num_rv=appropriate_num_rv, return_list=return_list) print(nodes) if return_list: assert isinstance(nodes, list) else: assert isinstance(nodes, torch.Tensor)
def test_fastgsss_rec(self, device, dtype, cheb): N = 8 g = random_graph(N, 0.4, dtype=dtype, device=device) fs, U = g.spectral(lap_type="sym") M = 4 bw = 4 nu = 3 c = torch.rand(bw, dtype=dtype, device=device) f_band = U[:, :bw] @ c f_band_noise = f_band + math.sqrt(5e-3) * torch.randn( N, dtype=dtype, device=device) K = 12 S, T = fastgsss(g, M, bw, nu, cheb, order=K) f_hat = recon_fastssss(f_band_noise[S], S, T, order=K) s, m = snr_and_mse(f_hat, f_band) assert m < 0.5
def test_greedy_bga(): N = 10 num_iter = 10 g = random_graph(N, 0.4) flag, vtx_color, A = is_bipartite_fix(g.to_dense(), fix_flag=False) assert not flag B, bset1 = greedy_bga(g.to_scipy("coo"), num_iter, verbose=True) flag1, vtx_color1, Ab = is_bipartite_fix(B, fix_flag=False) assert flag1 assert (Ab - B).sum() == 0 # no change C, bset2 = greedy_bga(B, num_iter, verbose=False) print(bset1) print(vtx_color1) print(bset2) assert (C - B).toarray().sum() == 0 # since BFS-bipartite coloring root is chosen at random assert np.allclose(bset2, vtx_color1) or np.allclose( bset2, 1 - np.asarray(vtx_color1))
def test_rsbs_recon(self, dtype, device): N = 10 k = 5 M = 5 appropriate_num_rv = np.int32(2 * np.round(np.log(N))) g = random_graph(N, 0.3, dtype=dtype, device=device, seed=2021) print(g.device()) # since scikit-umfpack requires double scalars. if dtype == torch.double: nodes, coh = rsbs(g, M, k, num_rv=appropriate_num_rv, return_list=True) f = torch.rand(N, 1, dtype=dtype, device=device) f = f / f.norm() f_hat = recon_rsbs( f[nodes], S=nodes, L=g.L("comb"), cum_coh=coh, mu=0.1, reg_order=1 ) if torch.any(torch.isnan(f_hat)): print( "This case leads to numerical instability and thus would be skipped" ) else: s, m = snr_and_mse(f_hat, f) assert m < 1
def test_check_symmetric(): G = random_graph(6, 0.4) assert G.is_symmetric() print("\n", G.to_dense())
def test_rand_test(self, device, dtype, density): N = 10 G = random_graph(N, density, True, dtype, device) assert G.density() - density < 2 / N * (N - 1) G = random_graph(N, density, False, dtype, device) assert G.density() - density < 2 / N * (N - 1)