Exemple #1
0
def test_hoptuner_autorgression():
    _, G, groups = next(pg.load_datasets_multiple_communities(["bigraph"]))
    group = groups[0]
    training, evaluation = pg.split(pg.to_signal(G, {v: 1 for v in group}), training_samples=0.01)
    auc1 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(measure=pg.AUC).rank(training))
    auc3 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(measure=pg.AUC, autoregression=5).rank(training))
    assert auc3 > auc1*0.9
Exemple #2
0
def test_hoptuner_explicit_algorithm():
    _, G, groups = next(pg.load_datasets_multiple_communities(["bigraph"]))
    group = groups[0]
    training, evaluation = pg.split(pg.to_signal(G, {v: 1 for v in group}), training_samples=0.5)
    auc1 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(lambda params: pg.GenericGraphFilter(params, krylov_dims=10), basis="arnoldi", measure=pg.AUC).rank(training))
    auc2 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(basis="arnoldi", krylov_dims=10, measure=pg.AUC).rank(training))
    assert abs(auc1-auc2) < 0.005
Exemple #3
0
def test_hoptuner_arnoldi_backends():
    _, G, groups = next(pg.load_datasets_multiple_communities(["bigraph"]))
    group = groups[0]
    training, evaluation = pg.split(pg.to_signal(G, {v: 1 for v in group}), training_samples=0.5)
    auc1 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(basis="arnoldi", measure=pg.AUC).rank(training))
    auc2 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(basis="arnoldi", measure=pg.AUC, tuning_backend="pytorch").rank(training))
    auc3 = pg.AUC(evaluation, exclude=training)(pg.HopTuner(basis="arnoldi", measure=pg.AUC, tuning_backend="tensorflow").rank(training))
    assert auc1 == auc2
    assert auc1 == auc3
Exemple #4
0
def test_hoptuner_arnoldi():
    _, G, groups = next(pg.load_datasets_multiple_communities(["bigraph"]))
    group = groups[0]
    training, evaluation = pg.split(pg.to_signal(G, {v: 1
                                                     for v in group}),
                                    training_samples=0.5)
    auc1 = pg.AUC(evaluation,
                  exclude=training)(pg.HopTuner(measure=pg.AUC).rank(training))
    auc2 = pg.AUC(evaluation,
                  exclude=training)(pg.HopTuner(basis="arnoldi",
                                                measure=pg.AUC).rank(training))
    assert abs(auc1 - auc2) < 0.005
Exemple #5
0
    def __init__(self,
                 num_inputs,
                 num_outputs,
                 hidden=64,
                 alpha=0.9,
                 propagate_on_training=True,
                 graph_dropout=0):
        super().__init__([
            tf.keras.layers.Dropout(0.5, input_shape=(num_inputs, )),
            tf.keras.layers.Dense(
                hidden,
                activation=tf.nn.relu,
                kernel_regularizer=tf.keras.regularizers.L2(1.E-5)),
            tf.keras.layers.Dropout(0.5),
            tf.keras.layers.Dense(num_outputs, activation=tf.nn.relu),
        ])

        if isinstance(alpha, str) and alpha == "estimated":
            self.ranker = pg.HopTuner(renormalize=True,
                                      assume_immutability=True,
                                      measure=pg.Cos,
                                      tuning_backend="numpy",
                                      tunable_offset=None,
                                      num_parameters=10,
                                      tol=0,
                                      autoregression=5,
                                      error_type="iters",
                                      max_iters=10)
        else:
            if isinstance(alpha, tf.Variable):
                self.trainable_variables = self.trainable_variables + [alpha]
            self.ranker = pg.PageRank(alpha,
                                      renormalize=True,
                                      assume_immutability=True,
                                      error_type="iters",
                                      max_iters=10)

        self.graph_dropout = graph_dropout
        self.num_outputs = num_outputs
        self.propagate_on_training = propagate_on_training
Exemple #6
0
def test_autotune_citations():
    assert pg.ParameterTuner().cite() != pg.GenericGraphFilter().cite()
    assert pg.HopTuner().cite() != pg.GenericGraphFilter().cite()
    assert pg.AlgorithmSelection().cite() != pg.GenericGraphFilter().cite()
    pg.HeatKernel(t=7,
                  preprocessor=pre,
                  max_iters=10000,
                  tol=tol,
                  optimization_dict=optimization),
}
algorithms = algorithms  # | pg.benchmarks.create_variations(algorithms, {"+sweep": pg.Sweep})

tuned = {
    "selected":
    pg.AlgorithmSelection(algorithms.values(), fraction_of_training=0.8),
    #"tuned": pg.ParameterTuner(preprocessor=pre, fraction_of_training=0.8, tol=tol, optimization_dict=optimization, measure=pg.AUC),
    "arnoldi":
    pg.HopTuner(preprocessor=pre,
                basis="arnoldi",
                measure=pg.Cos,
                tol=tol,
                optimization_dict=optimization),
    #"arnoldi2": pg.ParameterTuner(lambda params: pg.HopTuner(preprocessor=pre, basis="arnoldi", num_parameters=int(params[0]),
    #                                                         measure=pg.Cos,
    #                                                         tol=tol, optimization_dict=optimization, tunable_offset=None),
    #                              max_vals=[40], min_vals=[5], divide_range=2, fraction_of_training=0.1),
}

#algorithms = pg.create_variations(algorithms, {"": pg.Tautology, "+Sweep": pg.Sweep})
#print(algorithms.keys())

#for name, graph, group in pg.load_datasets_all_communities(datasets, min_group_size=50):
#    print(" & ".join([str(val) for val in [name, len(graph), graph.number_of_edges(), len(group)]])+" \\\\")
loader = pg.load_datasets_all_communities(datasets, min_group_size=50)
pg.benchmark_print(pg.benchmark(algorithms | tuned,