示例#1
0
def test_all_communities_benchmarks():
    datasets = ["bigraph"]
    pre = pg.preprocessor(assume_immutability=True, normalization="symmetric")
    tol = 1.E-9
    optimization = pg.SelfClearDict()
    algorithms = {
        "ppr0.85": pg.PageRank(alpha=0.85, preprocessor=pre, max_iters=10000, tol=tol),
        "ppr0.9": pg.PageRank(alpha=0.9, preprocessor=pre, max_iters=10000, tol=tol),
        "ppr0.99": pg.PageRank(alpha=0.99, preprocessor=pre, max_iters=10000, tol=tol),
        "hk3": pg.HeatKernel(t=3, preprocessor=pre, max_iters=10000, tol=tol, optimization_dict=optimization),
        "hk5": pg.HeatKernel(t=5, preprocessor=pre, max_iters=10000, tol=tol, optimization_dict=optimization),
        "hk7": pg.HeatKernel(t=7, preprocessor=pre, max_iters=10000, tol=tol, optimization_dict=optimization),
    }

    tuned = {"selected": pg.AlgorithmSelection(algorithms.values(), fraction_of_training=0.8)}
    loader = pg.load_datasets_all_communities(datasets, min_group_size=50)
    pg.benchmark_print(pg.benchmark(algorithms | tuned, loader, pg.AUC, fraction_of_training=.8, seed=list(range(1))),
                       decimals=3, delimiter=" & ", end_line="\\\\")
    loader = pg.load_datasets_all_communities(datasets, min_group_size=50)
    pg.benchmark_print(pg.benchmark(algorithms | tuned, loader, pg.Modularity, sensitive=pg.pRule, fraction_of_training=.8, seed=list(range(1))),
                       decimals=3, delimiter=" & ", end_line="\\\\")
    mistreatment = lambda known_scores, sensitive_signal, exclude: \
        pg.AM([pg.Disparity([pg.TPR(known_scores, exclude=1 - (1 - exclude.np) * sensitive_signal.np),
                             pg.TPR(known_scores, exclude=1 - (1 - exclude.np) * (1 - sensitive_signal.np))]),
               pg.Disparity([pg.TNR(known_scores, exclude=1 - (1 - exclude.np) * sensitive_signal.np),
                             pg.TNR(known_scores, exclude=1 - (1 - exclude.np) * (1 - sensitive_signal.np))])])
    loader = pg.load_datasets_all_communities(datasets, min_group_size=50)
    pg.benchmark_print(pg.benchmark(algorithms | tuned, loader, pg.Modularity, sensitive=mistreatment, fraction_of_training=.8, seed=list(range(1))),
                       decimals=3, delimiter=" & ", end_line="\\\\")
示例#2
0
def test_best_direction():
    assert pg.Conductance().best_direction() == -1
    assert pg.Density().best_direction() == 1
    assert pg.Modularity().best_direction() == 1
    assert pg.AUC([1, 2, 3]).best_direction() == 1
    assert pg.Cos([1, 2, 3]).best_direction() == 1
    assert pg.Dot([1, 2, 3]).best_direction() == 1
    assert pg.TPR([1, 2, 3]).best_direction() == 1
    assert pg.TNR([1, 2, 3]).best_direction() == 1
示例#3
0
def test_computations():
    for _ in supported_backends():
        assert pg.Accuracy([1, 2, 3])([1, 2, 3]) == 1
        assert pg.Mabs([3, 1, 1])([2, 0, 2]) == 1
        assert pg.CrossEntropy([1, 1, 1])([1, 1, 1]) < 1.E-12
        assert float(pg.Cos([2, 0, 1])([2, 0, 1])) == 1
        assert float(pg.Cos([2, 0, 1])([-2, 0, -1])) == -1
        assert float(pg.Cos([0, 0, 0])([0, 0, 0])) == 0
        assert float(pg.Dot([1, 1, 1])([1, 1, 1])) == 3
        assert float(pg.TPR([1, 0, 0, 0])([1, 1, 0, 0])) == 0.5
        assert float(pg.TNR([0, 0, 0, 1])([1, 1, 0, 0])) == 0.5
示例#4
0
def test_fair_personalizer_mistreatment():
    H = pg.PageRank(assume_immutability=True, normalization="symmetric")
    algorithms = {
        "Base": lambda G, p, s: H.rank(G, p),
        "FairPersMistreat": pg.Normalize(pg.FairPersonalizer(H, parity_type="mistreatment", pRule_weight=10)),
        "FairPersTPR": pg.Normalize(pg.FairPersonalizer(H, parity_type="TPR", pRule_weight=10)),
        "FairPersTNR": pg.Normalize(pg.FairPersonalizer(H, parity_type="TNR", pRule_weight=-1))  # TNR optimization increases mistreatment for this example
    }
    mistreatment = lambda known_scores, sensitive_signal, exclude: \
        pg.AM([pg.Disparity([pg.TPR(known_scores, exclude=1 - (1 - exclude) * sensitive_signal),
                             pg.TPR(known_scores, exclude=1 - (1 - exclude) * (1 - sensitive_signal))]),
               pg.Disparity([pg.TNR(known_scores, exclude=1 - (1 - exclude) * sensitive_signal),
                             pg.TNR(known_scores, exclude=1 - (1 - exclude) * (1 - sensitive_signal))])])
    _, graph, groups = next(pg.load_datasets_multiple_communities(["synthfeats"]))
    labels = pg.to_signal(graph, groups[0])
    sensitive = pg.to_signal(graph, groups[1])
    train, test = pg.split(labels)
    # TODO: maybe try to check for greater improvement
    base_mistreatment = mistreatment(test, sensitive, train)(algorithms["Base"](graph, train, sensitive))
    for algorithm in algorithms.values():
        if algorithm != algorithms["Base"]:
            print(algorithm.cite())
            assert base_mistreatment >= mistreatment(test, sensitive, train)(algorithm(graph, train, sensitive))
示例#5
0
                            .8,
                            pRule_weight=10,
                            max_residual=1,
                            error_type=pg.Mabs,
                            error_skewing=False,
                            parameter_buckets=1,
                            parity_type="impact")
        #"FFfix-C": pg.FairTradeoff(filter, .8, pRule_weight=10, error_type=pg.Mabs)
        #"FairTf": pg.FairnessTf(filter)
    }
    algorithms = pg.create_variations(algorithms, {"": pg.Normalize})

    #import cProfile as profile
    #pr = profile.Profile()
    #pr.enable()
    mistreatment = lambda known_scores, sensitive_signal, exclude: \
        pg.AM([pg.Disparity([pg.TPR(known_scores, exclude=1-(1-exclude.np)*sensitive_signal.np),
                             pg.TPR(known_scores, exclude=1-(1-exclude.np)*(1-sensitive_signal.np))]),
               pg.Disparity([pg.TNR(known_scores, exclude=1 - (1 - exclude.np) * sensitive_signal.np),
                             pg.TNR(known_scores, exclude=1 - (1 - exclude.np) * (1 - sensitive_signal.np))])])
    pg.benchmark_print(pg.benchmark(algorithms,
                                    pg.load_datasets_multiple_communities(
                                        datasets, max_group_number=2),
                                    metric=pg.AUC,
                                    sensitive=pg.pRule,
                                    fraction_of_training=seed_fractions),
                       delimiter=" & ",
                       end_line="\\\\")

    #pr.disable()
    #pr.dump_stats('profile.pstat')