Exemple #1
0
    def rank(self, graph, personalization, sensitive, *args, **kwargs):
        original_ranks = self.ranker(graph,
                                     personalization,
                                     *args,
                                     sensitive=sensitive,
                                     **kwargs)
        base_ranks = original_ranks if self.ranker == self.base_ranker else self.base_ranker(
            graph, personalization, *args, **kwargs)
        training_objective = pg.AM()\
            .add(pg.L2(base_ranks), weight=-1.)\
            .add(pg.pRule(tf.cast(sensitive.np, tf.float32)), weight=10., max_val=0.8)

        with pg.Backend("tensorflow"):
            ranks_var = tf.Variable(pg.to_array(original_ranks.np))
            optimizer = tf.keras.optimizers.Adam(learning_rate=0.1)
            best_loss = float('inf')
            best_ranks = None
            for epoch in range(2000):
                with tf.GradientTape() as tape:
                    ranks = pg.to_signal(original_ranks, ranks_var)
                    loss = -training_objective(
                        ranks)  #+ 1.E-5*tf.reduce_sum(ranks_var*ranks_var)
                grads = tape.gradient(loss, [ranks_var])
                optimizer.apply_gradients(zip(grads, [ranks_var]))
                validation_loss = loss
                if validation_loss < best_loss:
                    patience = 100
                    best_ranks = ranks
                    best_loss = validation_loss
                patience -= 1
                if patience == 0:
                    break
        return best_ranks
Exemple #2
0
 def _transform(self, ranks: pg.GraphSignal, **kwargs):
     if ranks.graph not in self.known_ranks or not self.assume_immutability:
         with pg.Backend("numpy"):
             A = pg.preprocessor(normalization=self.normalization)(
                 ranks.graph)
             D = pg.degrees(
                 pg.preprocessor(normalization="none")(ranks.graph))
             s = pg.sum(
                 D)**0.5 / 2 if self.sparsity is None else self.sparsity
             D = (D / pg.max(D))**self.beta
             S = scipy.sparse.random(
                 self.dims,
                 A.shape[0],
                 density=1. / s,
                 data_rvs=lambda l: np.random.choice([-1, 1], size=l),
                 format="csc")
             S = S @ scipy.sparse.spdiags(D, 0, *A.shape)
         self.embeddigns[ranks.graph] = pg.scipy_sparse_to_backend(S.T)
         self.known_ranks[ranks.graph] = [
         ]  # we know that the first term is zero and avoid direct embedding comparison
         for _ in range(len(self.weights)):
             S = S @ A
             self.known_ranks[ranks.graph].append(
                 pg.scipy_sparse_to_backend(S))
     ret = 0
     on = pg.conv(ranks.np, self.embeddigns[ranks.graph])
     for weight, S in zip(self.weights, self.known_ranks[ranks.graph]):
         uv = pg.conv(on, S)
         ret = ret + weight * uv
     return pg.to_signal(ranks, ret)
Exemple #3
0
def test_appnp_tf():
    from tensorflow.keras.layers import Dropout, Dense
    from tensorflow.keras.regularizers import L2

    class APPNP(tf.keras.Sequential):
        def __init__(self, num_inputs, num_outputs, hidden=64):
            super().__init__([
                Dropout(0.5, input_shape=(num_inputs,)),
                Dense(hidden, activation="relu", kernel_regularizer=L2(1.E-5)),
                Dropout(0.5),
                Dense(num_outputs, activation="relu")])
            self.ranker = pg.ParameterTuner(
                lambda par: pg.GenericGraphFilter([par[0] ** i for i in range(int(10))],
                                                  error_type="iters", max_iters=int(10)),
                max_vals=[0.95], min_vals=[0.5], verbose=False,
                measure=pg.Mabs, deviation_tol=0.1, tuning_backend="numpy")

        def call(self, features, graph, training=False):
            predict = super().call(features, training=training)
            propagate = self.ranker.propagate(graph, predict, graph_dropout=0.5 if training else 0)
            return tf.nn.softmax(propagate, axis=1)

    graph, features, labels = pg.load_feature_dataset('synthfeats')
    training, test = pg.split(list(range(len(graph))), 0.8)
    training, validation = pg.split(training, 1 - 0.2 / 0.8)
    model = APPNP(features.shape[1], labels.shape[1])
    with pg.Backend('tensorflow'):  # pygrank computations in tensorflow backend
        graph = pg.preprocessor(renormalize=True, cors=True)(graph)  # cors = use in many backends
        pg.gnn_train(model, features, graph, labels, training, validation,
                     optimizer=tf.optimizers.Adam(learning_rate=0.01), verbose=True, epochs=50)
        assert float(pg.gnn_accuracy(labels, model(features, graph), test)) == 1.  # dataset is super-easy to predict
Exemple #4
0
def test_backend_with():
    for backend_name in [
            "matvec", "pytorch", "tensorflow", "numpy", "torch_sparse"
    ]:
        with pg.Backend(backend_name) as backend:
            assert pg.backend_name() == backend_name
            assert backend.backend_name() == backend_name
        assert pg.backend_name() == "numpy"
Exemple #5
0
def test_signal_np_auto_conversion():
    import tensorflow as tf
    import numpy as np
    graph = nx.DiGraph([(1, 2), (2, 3)])
    signal = pg.to_signal(graph, tf.convert_to_tensor([1., 2., 3.]))
    assert isinstance(signal.np, np.ndarray)
    with pg.Backend("tensorflow"):
        assert pg.backend_name() == "tensorflow"
        assert not isinstance(signal.np, np.ndarray)
    assert pg.backend_name() == "numpy"
    assert isinstance(signal.np, np.ndarray)
Exemple #6
0
    def train_model(self, graph, personalization, sensitive, *args, **kwargs):
        original_ranks = self.ranker(graph, personalization, *args, **kwargs)
        #pretrained_ranks = None if self.pretrainer is None else self.pretrainer(graph, personalization, *args, sensitive=sensitive, **kwargs)
        features = tf.concat([
            tf.reshape(personalization.np, (-1, 1)),
            tf.reshape(original_ranks.np, (-1, 1)),
            tf.reshape(sensitive.np, (-1, 1))
        ],
                             axis=1)
        training_objective = pg.AM()\
            .add(pg.L2(tf.cast(original_ranks.np, tf.float32)), weight=1.)\
            .add(pg.pRule(tf.cast(sensitive.np, tf.float32)), max_val=0.8, weight=-10.)
        model = self.model()
        with pg.Backend("tensorflow"):
            best_loss = float('inf')
            best_ranks = None
            optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)

            #degrade = 1
            for epoch in range(5000):
                with tf.GradientTape() as tape:
                    personalization = pg.to_signal(personalization,
                                                   model(features))
                    #personalization.np = tf.nn.relu(personalization.np*2-1)
                    ranks = self.ranker(graph, personalization, *args,
                                        **kwargs)
                    loss = training_objective(ranks)
                    for var in model.trainable_variables:
                        loss = loss + 1.E-5 * tf.reduce_sum(var * var)
                    #loss = loss * degrade
                grads = tape.gradient(loss, model.trainable_variables)
                #degrade *= 0.9
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))
                validation_loss = training_objective(ranks)
                if validation_loss < best_loss:
                    patience = 10
                    best_ranks = ranks
                    best_loss = validation_loss
                    print("epoch", epoch, "loss", validation_loss, "prule",
                          pg.pRule(tf.cast(sensitive.np, tf.float32))(ranks))
                patience -= 1
                if patience == 0:
                    break
        return best_ranks
Exemple #7
0
def test_appnp_torch():
    graph, features, labels = pg.load_feature_dataset('synthfeats')
    training, test = pg.split(list(range(len(graph))), 0.8)
    training, validation = pg.split(training, 1 - 0.2 / 0.8)

    class AutotuneAPPNP(torch.nn.Module):
        def __init__(self, num_inputs, num_outputs, hidden=64):
            super().__init__()
            self.layer1 = torch.nn.Linear(num_inputs, hidden)
            self.layer2 = torch.nn.Linear(hidden, num_outputs)
            self.activation = torch.nn.ReLU()
            self.dropout = torch.nn.Dropout(0.5)
            self.num_outputs = num_outputs
            self.ranker = pg.ParameterTuner(
                lambda par: pg.GenericGraphFilter([par[0] ** i for i in range(int(10))],
                                                  error_type="iters", max_iters=int(10)),
                max_vals=[0.95], min_vals=[0.5], verbose=False,
                measure=pg.Mabs, deviation_tol=0.1, tuning_backend="numpy")

        def forward(self, features, graph, training=False):
            predict = self.dropout(torch.FloatTensor(features))
            predict = self.dropout(self.activation(self.layer1(predict)))
            predict = self.activation(self.layer2(predict))
            predict = self.ranker.propagate(graph, predict, graph_dropout=0.5 if training else 0)
            ret = torch.nn.functional.softmax(predict, dim=1)
            self.loss = 0
            for param in self.layer1.parameters():
                self.loss = self.loss + 1E-5*torch.norm(param)
            return ret

    def init_weights(m):
        if isinstance(m, torch.nn.Linear):
            torch.nn.init.xavier_uniform_(m.weight)
            m.bias.data.fill_(0.01)

    model = AutotuneAPPNP(features.shape[1], labels.shape[1])
    graph = pg.preprocessor(renormalize=True, cors=True)(graph)
    model.apply(init_weights)
    with pg.Backend('pytorch'):
        pg.gnn_train(model, features, graph, labels, training, validation, epochs=50)
Exemple #8
0
                                  assume_immutability=True,
                                  use_quotient=False,
                                  error_type="iters",
                                  max_iters=10)  # 10 iterations

    def call(self,
             features,
             graph,
             training=False):  # can call with tensor graph
        predict = super().call(features, training=training)
        propagate = self.ranker.propagate(graph,
                                          predict,
                                          graph_dropout=0.5 * training)
        return tf.nn.softmax(propagate, axis=1)


graph, features, labels = pg.load_feature_dataset('citeseer')
training, test = pg.split(list(range(len(graph))), 0.8, seed=5)  # seeded split
training, validation = pg.split(training, 1 - 0.2 / 0.8)
model = APPNP(features.shape[1], labels.shape[1])
with pg.Backend('tensorflow'):  # pygrank with tensorflow backend
    pg.gnn_train(model,
                 features,
                 graph,
                 labels,
                 training,
                 validation,
                 optimizer=tf.optimizers.Adam(learning_rate=0.01),
                 verbose=True)
    print("Accuracy", pg.gnn_accuracy(labels, model(features, graph), test))
                                        max_vals=[1],
                                        min_vals=[0.5],
                                        verbose=False,
                                        measure=pg.Mabs,
                                        deviation_tol=0.01,
                                        tuning_backend="numpy")

    def call(self, features, graph, training=False):
        predict = super().call(features, training=training)
        propagate = self.ranker.propagate(graph,
                                          predict,
                                          graph_dropout=0.5 * training)
        return tf.nn.softmax(propagate, axis=1)


graph, features, labels = pg.load_feature_dataset('citeseer')
training, test = pg.split(list(range(len(graph))), 0.8, seed=5)
training, validation = pg.split(training, 1 - 0.2 / 0.8)
model = APPNP(features.shape[1], labels.shape[1])
with pg.Backend('tensorflow'):  # pygrank computations in tensorflow backend
    pg.gnn_train(model,
                 features,
                 graph,
                 labels,
                 training,
                 validation,
                 optimizer=tf.optimizers.Adam(learning_rate=0.01),
                 verbose=True,
                 test=test)
    print("Accuracy", pg.gnn_accuracy(labels, model(features, graph), test))