コード例 #1
0
ファイル: test_filters.py プロジェクト: maniospas/pygrank
def test_stream_diff():
    graph = next(pg.load_datasets_graph(["graph9"]))
    for _ in supported_backends():
        ranks1 = pg.GenericGraphFilter(
            [0, 0, 1], max_iters=4, error_type="iters") | pg.to_signal(
                graph, {"A": 1})
        ranks2 = pg.GenericGraphFilter(
            [1, 1, 1], tol=None) & ~pg.GenericGraphFilter(
                [1, 1], tol=None) | pg.to_signal(graph, {"A": 1})
        assert pg.Mabs(ranks1)(ranks2) < pg.epsilon()
コード例 #2
0
def test_explicit_citations():
    assert "unknown node ranking algorithm" == pg.NodeRanking().cite()
    assert "with parameters tuned \cite{krasanakis2021pygrank}" in pg.ParameterTuner(
        lambda params: pg.PageRank(params[0])).cite()
    assert "Postprocessor" in pg.Postprocessor().cite()
    assert pg.PageRank().cite() in pg.AlgorithmSelection().cite()
    assert "krasanakis2021pygrank" in pg.ParameterTuner().cite()
    assert "ortega2018graph" in pg.ParameterTuner().cite()
    assert pg.HeatKernel().cite() in pg.SeedOversampling(pg.HeatKernel()).cite()
    assert pg.AbsorbingWalks().cite() in pg.BoostedSeedOversampling(pg.AbsorbingWalks()).cite()
    assert "krasanakis2018venuerank" in pg.BiasedKernel(converge_to_eigenvectors=True).cite()
    assert "yu2021chebyshev" in pg.HeatKernel(coefficient_type="chebyshev").cite()
    assert "susnjara2015accelerated" in pg.HeatKernel(krylov_dims=5).cite()
    assert "krasanakis2021pygrank" in pg.GenericGraphFilter(optimization_dict=dict()).cite()
    assert "tautology" in pg.Tautology().cite()
    assert pg.PageRank().cite() == pg.Tautology(pg.PageRank()).cite()
    assert "mabs" in pg.MabsMaintain(pg.PageRank()).cite()
    assert "max normalization" in pg.Normalize(pg.PageRank()).cite()
    assert "[0,1] range" in pg.Normalize(pg.PageRank(), "range").cite()
    assert "ordinal" in pg.Ordinals(pg.PageRank()).cite()
    assert "exp" in pg.Transformer(pg.PageRank()).cite()
    assert "0.5" in pg.Threshold(pg.PageRank(), 0.5).cite()
    assert "andersen2007local" in pg.Sweep(pg.PageRank()).cite()
    assert pg.HeatKernel().cite() in pg.Sweep(pg.PageRank(), pg.HeatKernel()).cite()
    assert "LFPRO" in pg.AdHocFairness("O").cite()
    assert "LFPRO" in pg.AdHocFairness(pg.PageRank(), "LFPRO").cite()
    assert "multiplicative" in pg.AdHocFairness(pg.PageRank(), "B").cite()
    assert "multiplicative" in pg.AdHocFairness(pg.PageRank(), "mult").cite()
    assert "tsioutsiouliklis2020fairness" in pg.AdHocFairness().cite()
    assert "rahman2019fairwalk" in pg.FairWalk(pg.PageRank()).cite()
    assert "krasanakis2020prioredit" in pg.FairPersonalizer(pg.PageRank()).cite()
コード例 #3
0
ファイル: test_autotune.py プロジェクト: maniospas/pygrank
def test_lowpass_tuning():
    _, G, groups = next(pg.load_datasets_multiple_communities(["bigraph"]))
    group = groups[0]
    training, evaluation = pg.split(pg.to_signal(G, {v: 1 for v in group}), training_samples=0.1)
    auc1 = pg.AUC(evaluation, exclude=training)(pg.ParameterTuner(lambda params: pg.GenericGraphFilter(params)).rank(training))
    auc2 = pg.AUC(evaluation, exclude=training)(pg.ParameterTuner(lambda params: pg.LowPassRecursiveGraphFilter(params)).rank(training))
    assert auc2 > auc1*0.8
コード例 #4
0
ファイル: test_autotune.py プロジェクト: maniospas/pygrank
def test_hoptuner_explicit_algorithm():
    _, G, groups = next(pg.load_datasets_multiple_communities(["bigraph"]))
    group = groups[0]
    training, evaluation = pg.split(pg.to_signal(G, {v: 1 for v in group}), training_samples=0.5)
    auc1 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(lambda params: pg.GenericGraphFilter(params, krylov_dims=10), basis="arnoldi", measure=pg.AUC).rank(training))
    auc2 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(basis="arnoldi", krylov_dims=10, measure=pg.AUC).rank(training))
    assert abs(auc1-auc2) < 0.005
コード例 #5
0
 def __init__(self, num_inputs, num_outputs, hidden=64):
     super().__init__([
         Dropout(0.5, input_shape=(num_inputs,)),
         Dense(hidden, activation="relu", kernel_regularizer=L2(1.E-5)),
         Dropout(0.5),
         Dense(num_outputs, activation="relu")])
     self.ranker = pg.ParameterTuner(
         lambda par: pg.GenericGraphFilter([par[0] ** i for i in range(int(10))],
                                           error_type="iters", max_iters=int(10)),
         max_vals=[0.95], min_vals=[0.5], verbose=False,
         measure=pg.Mabs, deviation_tol=0.1, tuning_backend="numpy")
コード例 #6
0
 def __init__(self, num_inputs, num_outputs, hidden=64):
     super().__init__()
     self.layer1 = torch.nn.Linear(num_inputs, hidden)
     self.layer2 = torch.nn.Linear(hidden, num_outputs)
     self.activation = torch.nn.ReLU()
     self.dropout = torch.nn.Dropout(0.5)
     self.num_outputs = num_outputs
     self.ranker = pg.ParameterTuner(
         lambda par: pg.GenericGraphFilter([par[0] ** i for i in range(int(10))],
                                           error_type="iters", max_iters=int(10)),
         max_vals=[0.95], min_vals=[0.5], verbose=False,
         measure=pg.Mabs, deviation_tol=0.1, tuning_backend="numpy")
コード例 #7
0
def create_param_tuner(optimizer=pg.optimize):
    return pg.ParameterTuner(lambda params:
                              pg.Normalize(
                                  postprocessor(
                                      pg.GenericGraphFilter([1]+params,
                                                            preprocessor=pre,
                                                            error_type="iters",
                                                            max_iters=41,
                                                            optimization_dict=optimization,
                                                            preserve_norm=False))),
                             deviation_tol=1.E-6,
                             measure=measure,
                             optimizer=optimizer,
                             max_vals=[1]*40,
                             min_vals=[0]*40)
コード例 #8
0
ファイル: test_filters.py プロジェクト: maniospas/pygrank
def test_custom_runs():
    graph = next(pg.load_datasets_graph(["graph9"]))
    for _ in supported_backends():
        ranks1 = pg.Normalize(
            pg.PageRank(0.85,
                        tol=pg.epsilon(),
                        max_iters=1000,
                        use_quotient=False)).rank(graph, {"A": 1})
        ranks2 = pg.Normalize(
            pg.GenericGraphFilter([0.85**i * len(graph) for i in range(80)],
                                  tol=pg.epsilon())).rank(graph, {"A": 1})
        ranks3 = pg.Normalize(
            pg.LowPassRecursiveGraphFilter([0.85 for _ in range(80)],
                                           tol=pg.epsilon())).rank(
                                               graph, {"A": 1})
        assert pg.Mabs(ranks1)(ranks2) < 1.E-6
        assert pg.Mabs(ranks1)(ranks3) < 1.E-6
コード例 #9
0
 def __init__(self, num_inputs, num_outputs, hidden=64):
     super().__init__([
         Dropout(0.5, input_shape=(num_inputs, )),
         Dense(hidden, activation="relu", kernel_regularizer=L2(0.005)),
         Dropout(0.5),
         Dense(num_outputs)
     ])
     pre = pg.preprocessor(renormalize=True, assume_immutability=True)
     self.ranker = pg.ParameterTuner(lambda par: pg.GenericGraphFilter(
         [par[0]**i for i in range(int(10))],
         preprocessor=pre,
         error_type="iters",
         max_iters=10),
                                     max_vals=[1],
                                     min_vals=[0.5],
                                     verbose=False,
                                     measure=pg.Mabs,
                                     deviation_tol=0.01,
                                     tuning_backend="numpy")
コード例 #10
0
def test_autotune_citations():
    assert pg.ParameterTuner().cite() != pg.GenericGraphFilter().cite()
    assert pg.HopTuner().cite() != pg.GenericGraphFilter().cite()
    assert pg.AlgorithmSelection().cite() != pg.GenericGraphFilter().cite()
コード例 #11
0
import pygrank as pg
datasets = ["EUCore", "Amazon"]
pre = pg.preprocessor(assume_immutability=True, normalization="symmetric")
algs = {
    "ppr.85": pg.PageRank(.85, preprocessor=pre, tol=1.E-9, max_iters=1000),
    "ppr.99": pg.PageRank(.99, preprocessor=pre, tol=1.E-9, max_iters=1000),
    "hk3": pg.HeatKernel(3, preprocessor=pre, tol=1.E-9, max_iters=1000),
    "hk5": pg.HeatKernel(5, preprocessor=pre, tol=1.E-9, max_iters=1000),
}

algs = algs | pg.create_variations(algs, {"+Sweep": pg.Sweep})
loader = pg.load_datasets_one_community(datasets)
algs["tuned"] = pg.ParameterTuner(preprocessor=pre, tol=1.E-9, max_iters=1000)
algs["selected"] = pg.AlgorithmSelection(
    pg.create_demo_filters(preprocessor=pre, tol=1.E-9,
                           max_iters=1000).values())
algs["tuned+Sweep"] = pg.ParameterTuner(
    ranker_generator=lambda params: pg.Sweep(
        pg.GenericGraphFilter(
            params, preprocessor=pre, tol=1.E-9, max_iters=1000)))

for alg in algs.values():
    print(alg.cite())  # prints a list of algorithm citations

pg.benchmark_print(pg.benchmark(algs, loader, pg.AUC, fraction_of_training=.5),
                   delimiter=" & ",
                   end_line="\\\\")