def test_explicit_citations(): assert "unknown node ranking algorithm" == pg.NodeRanking().cite() assert "with parameters tuned \cite{krasanakis2021pygrank}" in pg.ParameterTuner( lambda params: pg.PageRank(params[0])).cite() assert "Postprocessor" in pg.Postprocessor().cite() assert pg.PageRank().cite() in pg.AlgorithmSelection().cite() assert "krasanakis2021pygrank" in pg.ParameterTuner().cite() assert "ortega2018graph" in pg.ParameterTuner().cite() assert pg.HeatKernel().cite() in pg.SeedOversampling(pg.HeatKernel()).cite() assert pg.AbsorbingWalks().cite() in pg.BoostedSeedOversampling(pg.AbsorbingWalks()).cite() assert "krasanakis2018venuerank" in pg.BiasedKernel(converge_to_eigenvectors=True).cite() assert "yu2021chebyshev" in pg.HeatKernel(coefficient_type="chebyshev").cite() assert "susnjara2015accelerated" in pg.HeatKernel(krylov_dims=5).cite() assert "krasanakis2021pygrank" in pg.GenericGraphFilter(optimization_dict=dict()).cite() assert "tautology" in pg.Tautology().cite() assert pg.PageRank().cite() == pg.Tautology(pg.PageRank()).cite() assert "mabs" in pg.MabsMaintain(pg.PageRank()).cite() assert "max normalization" in pg.Normalize(pg.PageRank()).cite() assert "[0,1] range" in pg.Normalize(pg.PageRank(), "range").cite() assert "ordinal" in pg.Ordinals(pg.PageRank()).cite() assert "exp" in pg.Transformer(pg.PageRank()).cite() assert "0.5" in pg.Threshold(pg.PageRank(), 0.5).cite() assert "andersen2007local" in pg.Sweep(pg.PageRank()).cite() assert pg.HeatKernel().cite() in pg.Sweep(pg.PageRank(), pg.HeatKernel()).cite() assert "LFPRO" in pg.AdHocFairness("O").cite() assert "LFPRO" in pg.AdHocFairness(pg.PageRank(), "LFPRO").cite() assert "multiplicative" in pg.AdHocFairness(pg.PageRank(), "B").cite() assert "multiplicative" in pg.AdHocFairness(pg.PageRank(), "mult").cite() assert "tsioutsiouliklis2020fairness" in pg.AdHocFairness().cite() assert "rahman2019fairwalk" in pg.FairWalk(pg.PageRank()).cite() assert "krasanakis2020prioredit" in pg.FairPersonalizer(pg.PageRank()).cite()
def test_absorbing_vs_pagerank(): graph = next(pg.load_datasets_graph(["graph9"])) personalization = {"A": 1, "B": 1} for _ in supported_backends(): pagerank_result = pg.PageRank(normalization='col').rank(graph, personalization) absorbing_result = pg.AbsorbingWalks(0.85, normalization='col', max_iters=1000).rank(graph, personalization) assert pg.Mabs(pagerank_result)(absorbing_result) < pg.epsilon()
def test_completion(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): pg.PageRank().rank(graph) pg.HeatKernel().rank(graph) pg.AbsorbingWalks().rank(graph) pg.HeatKernel().rank(graph) assert True
def test_filter_citations(): assert pg.PageRank().cite() != pg.GraphFilter().cite() assert pg.HeatKernel().cite() != pg.GraphFilter().cite() assert pg.AbsorbingWalks().cite() != pg.GraphFilter().cite() assert pg.HeatKernel().cite() != pg.GraphFilter().cite() assert pg.PageRank(alpha=0.85).cite() != pg.PageRank(alpha=0.99).cite() assert pg.HeatKernel(krylov_dims=0).cite() != pg.HeatKernel(krylov_dims=5).cite() assert pg.HeatKernel(coefficient_type="taylor").cite() != pg.HeatKernel(coefficient_type="chebyshev").cite() assert pg.HeatKernel(optimization_dict=dict()).cite() != pg.HeatKernel(optimization_dict=None).cite()
def test_completion(): graph = next(pg.load_datasets_graph(["graph9"])) for _ in supported_backends(): pg.PageRank().rank(graph) pg.PageRank(normalization="both").rank(graph) pg.HeatKernel().rank(graph) pg.AbsorbingWalks().rank(graph) pg.SymmetricAbsorbingRandomWalks().rank(graph) pg.HeatKernel().rank(graph) assert True