def test_transform(): import math graph = next(pg.load_datasets_graph(["graph5"])) for _ in supported_backends(): r1 = pg.Normalize(pg.PageRank(), "sum").rank(graph) r2 = pg.Transformer(pg.PageRank(), lambda x: x / pg.sum(x)).rank(graph) assert pg.Mabs(r1)(r2) < pg.epsilon() r1 = pg.Transformer(math.exp).transform(pg.PageRank()(graph)) r2 = pg.Transformer(pg.PageRank(), pg.exp).rank(graph) assert pg.Mabs(r1)(r2) < pg.epsilon()
def test_absorbing_vs_pagerank(): graph = next(pg.load_datasets_graph(["graph9"])) personalization = {"A": 1, "B": 1} for _ in supported_backends(): pagerank_result = pg.PageRank(normalization='col').rank(graph, personalization) absorbing_result = pg.AbsorbingWalks(0.85, normalization='col', max_iters=1000).rank(graph, personalization) assert pg.Mabs(pagerank_result)(absorbing_result) < pg.epsilon()
def test_custom_runs(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): ranks1 = pg.Normalize( pg.PageRank(0.85, tol=pg.epsilon(), max_iters=1000, use_quotient=False)).rank(graph, {"A": 1}) ranks2 = pg.Normalize( pg.GenericGraphFilter([0.85**i * len(graph) for i in range(80)], tol=pg.epsilon())).rank(graph, {"A": 1}) ranks3 = pg.Normalize( pg.LowPassRecursiveGraphFilter([0.85 for _ in range(80)], tol=pg.epsilon())).rank( graph, {"A": 1}) assert pg.Mabs(ranks1)(ranks2) < 1.E-6 assert pg.Mabs(ranks1)(ranks3) < 1.E-6
def test_pagerank_vs_networkx(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): ranker = pg.Normalize("sum", pg.PageRank(normalization='col', tol=1.E-9)) test_result = ranker(graph) test_result2 = nx.pagerank(graph, tol=1.E-9) # TODO: assert that 2.5*epsilon is indeed a valid limit assert pg.Mabs(test_result)(test_result2) < 2.5*pg.epsilon()
def test_quotient(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): test_result = pg.PageRank(normalization='symmetric', tol=max(1.E-9, pg.epsilon()), use_quotient=True).rank(graph) norm_result = pg.PageRank(normalization='symmetric', tol=max(1.E-9, pg.epsilon()), use_quotient=pg.Normalize("sum")).rank(graph) assert pg.Mabs(test_result)(norm_result) < pg.epsilon()
def test_stream_diff(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): ranks1 = pg.GenericGraphFilter( [0, 0, 1], max_iters=4, error_type="iters") | pg.to_signal( graph, {"A": 1}) ranks2 = pg.GenericGraphFilter( [1, 1, 1], tol=None) & ~pg.GenericGraphFilter( [1, 1], tol=None) | pg.to_signal(graph, {"A": 1}) assert pg.Mabs(ranks1)(ranks2) < pg.epsilon()
def test_lanczos_speedup(): graph = next(pg.load_datasets_graph(["bigraph"])) for _ in supported_backends(): for algorithm in [pg.HeatKernel]: result = pg.Normalize(algorithm(normalization='symmetric')).rank( graph, {"0": 1}) result_lanczos = pg.Normalize( algorithm(normalization='symmetric', krylov_dims=5)).rank(graph, {"0": 1}) assert pg.Mabs(result)(result_lanczos) < 0.01
def test_computations(): for _ in supported_backends(): assert pg.Accuracy([1, 2, 3])([1, 2, 3]) == 1 assert pg.Mabs([3, 1, 1])([2, 0, 2]) == 1 assert pg.CrossEntropy([1, 1, 1])([1, 1, 1]) < 1.E-12 assert float(pg.Cos([2, 0, 1])([2, 0, 1])) == 1 assert float(pg.Cos([2, 0, 1])([-2, 0, -1])) == -1 assert float(pg.Cos([0, 0, 0])([0, 0, 0])) == 0 assert float(pg.Dot([1, 1, 1])([1, 1, 1])) == 3 assert float(pg.TPR([1, 0, 0, 0])([1, 1, 0, 0])) == 0.5 assert float(pg.TNR([0, 0, 0, 1])([1, 1, 0, 0])) == 0.5
def test_automatic_graph_casting(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): signal = pg.to_signal(graph, {"A": 1}) test_result1 = pg.PageRank(normalization='col').rank(signal, signal) test_result2 = pg.PageRank(normalization='col').rank(personalization=signal) assert pg.Mabs(test_result1)(test_result2) < pg.epsilon() with pytest.raises(Exception): pg.PageRank(normalization='col').rank(personalization={"A": 1}) with pytest.raises(Exception): pg.PageRank(normalization='col').rank(graph.copy(), signal)
def test_pagerank_vs_networkx(): graph = next(pg.load_datasets_graph(["graph9"], graph_api=nx)) graph = graph.to_directed() # graph_api needed so that nx.pagerank can perform internal computations for _ in supported_backends(): ranker = pg.Normalize("sum", pg.PageRank(normalization='col')) test_result2 = nx.pagerank(graph) test_result = ranker(graph) print(test_result) print(test_result2) # TODO: assert that 2.5*epsilon is indeed a valid limit assert pg.Mabs(test_result)(test_result2) < 2.5 * pg.epsilon()
def test_stream(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): ranks1 = pg.Normalize( pg.PageRank(0.85, tol=pg.epsilon(), max_iters=1000, use_quotient=False)).rank(graph, {"A": 1}) ranks2 = pg.to_signal(graph, {"A": 1}) >> pg.PageRank( 0.85, tol=pg.epsilon(), max_iters=1000) + pg.Tautology() >> pg.Normalize() assert pg.Mabs(ranks1)(ranks2) < pg.epsilon()
def test_filter_stream(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): test_result = pg.Normalize( pg.PageRank(normalization='symmetric', tol=max(1.E-9, pg.epsilon()), use_quotient=True)).rank(graph) norm_result = pg.PageRank(tol=max(1.E-9, pg.epsilon())) \ + pg.preprocessor(normalization='symmetric') \ + pg.Normalize("sum") \ >> pg.Normalize() \ | pg.to_signal(graph, {v: 1 for v in graph}) assert pg.Mabs(test_result)(norm_result) < pg.epsilon()
def test_best_direction(): assert pg.Conductance().best_direction() == -1 assert pg.Density().best_direction() == 1 assert pg.Modularity().best_direction() == 1 assert pg.AUC([1, 2, 3]).best_direction() == 1 assert pg.Cos([1, 2, 3]).best_direction() == 1 assert pg.Dot([1, 2, 3]).best_direction() == 1 assert pg.TPR([1, 2, 3]).best_direction() == 1 assert pg.TNR([1, 2, 3]).best_direction() == 1 assert pg.Mabs([1, 2, 3]).best_direction() == -1 assert pg.MSQ([1, 2, 3]).best_direction() == -1 assert pg.Euclidean([1, 2, 3]).best_direction() == -1 assert pg.L2([1, 2, 3]).best_direction() == -1
def test_krylov_space(): graph = next(pg.load_datasets_graph(["bigraph"])) nodes = list(graph) for _ in supported_backends(): personalization = pg.to_signal(graph, {nodes[0]: 1, nodes[1]: 1}) M = pg.preprocessor(normalization="symmetric")(graph) krylov_dims = 5 krylov_result = pg.eye(int(krylov_dims)) krylov_base, H = pg.krylov_base(M, personalization.np, int(krylov_dims)) error_bound = pg.krylov_error_bound(krylov_base, H, M, personalization.np) assert pg.sum(pg.krylov2original(0, H, krylov_dims)) == 0 assert error_bound < 0.01 for _ in range(100): krylov_result = krylov_result @ H personalization.np = pg.conv(personalization.np, M) # print(pg.Mabs(personalization.np)(pg.krylov2original(krylov_base, krylov_result, int(krylov_dims)))) assert pg.Mabs(personalization.np)(pg.krylov2original( krylov_base, krylov_result, int(krylov_dims))) <= error_bound assert pg.krylov2original( krylov_base, krylov_result, int(krylov_dims)).shape == personalization.np.shape