Example #1
0
def _convert_to_sparse_tensor(x):
    if x.ndim == 2:
        return sp_matrix_to_sp_tensor(x)
    elif x.ndim == 3:
        s1_, s2_, s3_ = x.shape
        return reshape(sp_matrix_to_sp_tensor(x.reshape(s1_ * s2_, s3_)),
                       (s1_, s2_, s3_))
Example #2
0
def load_random(num_features: int,
                num_rel_types: int,
                sparse: bool = True,
                graph_type: str = 'homogeneous',
                num_nodes: int = 1000,
                edges: bool = False):
    N = num_nodes
    F = num_features
    S = 3
    A = [np.random.randint(0, 2, (N, N)) for _ in range(num_rel_types)
         ] if graph_type == 'heterogeneous' else np.ones((N, N))
    if sparse:
        try:
            A = sp_matrix_to_sp_tensor(A)
        except TypeError:
            A = [sp_matrix_to_sp_tensor(a) for a in A]

    X = np.random.normal(size=(N, F))
    data = [X, A]
    if edges:
        E = np.random.normal(size=(N * N, S))
        data.append(E)

    print('[+] Data has been loaded.')
    return data
Example #3
0
def test_Disjoint2Batch():
    X = np.array([[1, 0], [0, 1], [1, 1], [0, 0], [1, 2]])
    I = np.array([0, 0, 0, 1, 1])
    A_data = [1, 1, 1, 1, 1]
    A_row = [0, 1, 2, 3, 4]
    A_col = [1, 0, 1, 4, 3]
    A = ops.sp_matrix_to_sp_tensor(
        sp.csr_matrix((A_data, (A_row, A_col)), shape=(5, 5))
    )

    expected_X = np.array([[[1., 0.],
                            [0., 1.],
                            [1., 1.]],
                           [[0., 0.],
                            [1., 2.],
                            [0., 0.]]])
    expected_A = np.array([[[0., 1., 0.],
                            [1., 0., 0.],
                            [0., 1., 0.]],
                           [[0., 1., 0.],
                            [1., 0., 0.],
                            [0., 0., 0.]]])

    result_X, result_A = Disjoint2Batch()((X, A, I))
    assert np.allclose(result_A, expected_A)
    assert np.allclose(result_X, expected_X)
Example #4
0
def _test_mixed_mode(layer, **kwargs):
    sparse = kwargs.pop("sparse", False)
    X_batch = np.stack([X] * batch_size)
    A_in = Input(shape=(N, ), sparse=sparse)
    X_in = Input(shape=(N, F))
    inputs = [X_in, A_in]
    if sparse:
        input_data = [X_batch, sp_matrix_to_sp_tensor(A)]
    else:
        input_data = [X_batch, A]

    if kwargs.pop("edges", None):
        E_in = Input(shape=(
            N * N,
            S,
        ))
        inputs.append(E_in)
        E_batch = np.stack([E_single] * batch_size)
        input_data.append(E_batch)

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    assert output.shape == (batch_size, N, kwargs["channels"])
Example #5
0
    def __init__(self,
                 channels,
                 adj,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):

        super().__init__(activity_regularizer=activity_regularizer, **kwargs)
        self.channels = channels
        self.adj = adj
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = False

        fltr = localpooling_filter(self.adj)
        self.fltr = sp_matrix_to_sp_tensor(fltr)
Example #6
0
 def run_MPNN_unit(self, Adj, X, net_id=1):
     """
     Function calls layer given by id
     args:
       Adj: Tensor. [:, x, y]. Adjacency matrix of the graph
       X: Tensor. [:, x, z]. Node feature matrix.
     returns:
       Tensor. [x, z*2]. ouput of passig a message through the MPNN
     """
     L1, L2 = self._nets[net_id]
     Adj = sp_matrix_to_sp_tensor(Adj)
     y = None
     for i in range(0,len(L1)):
         if i == 0: # MessagePassing layer
             y = L1[i].propagate(X, Adj)
             continue
         y = L1[i](y)
     H1 = y
     for i in range(0, len(L2)):
         if i == 0: # MessagePassing Layer
             y = L2[i].propagate(y, Adj)
             continue
         y = L2[i](y)
     H2 = y
     return tf.concat((H1,H2), axis=1)
Example #7
0
def make_embedding(CV, MODEL, DATA, EMBED):
    DATA_FOLD = DATA + f"/FOLD-{CV}"
    if not os.path.exists(EMBED):
        os.mkdir(EMBED)

    graph, features, labels = load_dataset(DATA, DATA_FOLD)
    fltr = GraphConv.preprocess(graph).astype('f4')
    fltr = ops.sp_matrix_to_sp_tensor(fltr)

    X_in = Input((features.shape[1], ))
    fltr_in = Input((features.shape[0], ), sparse=True)
    X_1 = GraphConv(512, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_in, fltr_in])
    X_1 = Dropout(0.5)(X_1)
    X_2 = GraphConv(256, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_1, fltr_in])
    X_2 = Dropout(0.5)(X_2)
    X_3 = GraphConv(128, 'relu', True,
                    kernel_regularizer=l2(5e-4))([X_2, fltr_in])
    X_3 = Dropout(0.5)(X_3)
    X_4 = GraphConv(64, 'linear', True,
                    kernel_regularizer=l2(5e-4))([X_3, fltr_in])
    X_5 = Dense(labels.shape[1], use_bias=True)(X_4)

    loaded_model = load_model(f"{MODEL}")
    model_without_task = Model(inputs=[X_in, fltr_in], outputs=X_4)
    model_without_task.set_weights(loaded_model.get_weights()[:8])

    final_node_representations = model_without_task([features, fltr],
                                                    training=False)
    save_embedding(final_node_representations, EMBED, DATA_FOLD, CV)
Example #8
0
    def collate(self, batch):
        packed = self.pack(batch, return_dict=True)
        y = None
        if "y" in self.dataset.signature:
            y = packed.pop("y_list")
            if self.node_level:
                if len(np.shape(y[0])) == 1:
                    y = [y_[:, None] for y_ in y]
                y = np.vstack(y)
            else:
                if len(np.shape(y[0])) == 0:
                    y = [np.array([y_]) for y_ in y]
                y = np.array(y)

        output = to_disjoint(**packed)

        # Sparse matrices to SparseTensors
        output = list(output)
        for i in range(len(output)):
            if sp.issparse(output[i]):
                output[i] = sp_matrix_to_sp_tensor(output[i])
        output = tuple(output)

        if y is None:
            return output
        else:
            return output, y
Example #9
0
def test_modes_ops():
    X = np.array([[1, 0], [0, 1], [1, 1], [0, 0], [1, 2]])
    I = np.array([0, 0, 0, 1, 1])

    A_data = [1, 1, 1, 1, 1]
    A_row = [0, 1, 2, 3, 4]
    A_col = [1, 0, 1, 4, 3]
    A_sparse = csr_matrix((A_data, (A_row, A_col)), shape=(5, 5))
    A_sparse_tensor = ops.sp_matrix_to_sp_tensor(A_sparse)

    # Disjoint signal to batch
    expected_result = np.array(
        [[[1.0, 0.0], [0.0, 1.0], [1.0, 1.0]], [[0.0, 0.0], [1.0, 2.0], [0.0, 0.0]]]
    )
    result = ops.disjoint_signal_to_batch(X, I).numpy()

    assert expected_result.shape == result.shape
    assert np.allclose(expected_result, result)

    # Disjoint adjacency to batch
    expected_result = np.array(
        [
            [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]],
            [[0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 0.0]],
        ]
    )

    result = ops.disjoint_adjacency_to_batch(A_sparse_tensor, I).numpy()

    assert expected_result.shape == result.shape
    assert np.allclose(expected_result, result)
Example #10
0
 def call(self, model_input, training=True):
     """
     Function calls the net
     args:
       Adj: Tensor. [:, x, y]. Adjacency matrix of the graph
       X: Tensor. [:, x, z]. Node feature matrix
     returns:
       x: Tensor. [x, 1]. Output of the neural net
     """
     Adj, X = model_input
     if not training:
         Adj = sp_matrix_to_sp_tensor(Adj)
     LSTM_input = []
     for i in range(0, self._window):
         LSTM_input.append(
             self.run_MPNN_unit(Adj[i,:,:],
                                X[i,:,:], net_id=str(i+1)))
     x = tf.stack(LSTM_input, axis=1)
     x = self.LSTM1(x)
     output, final_memory_state, final_carry_state = self.LSTM2(x)
     #x = X+x
     #Lin?
     x = final_memory_state
     x = self.Lin(x)
     return x
Example #11
0
def _test_single_mode(layer, **kwargs):
    print('Single mode')
    sparse = kwargs.pop('sparse', False)
    A_in = Input(shape=(None, ), sparse=sparse)
    X_in = Input(shape=(F, ))
    inputs = [X_in, A_in]
    if sparse:
        input_data = [X, sp_matrix_to_sp_tensor(A)]
    else:
        input_data = [X, A]

    if kwargs.pop('edges', None):
        E_in = Input(shape=(S, ))
        inputs.append(E_in)
        input_data.append(E_single)

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    # model = Model(inputs, output)
    model = IRGCNModel(3)
    model(inputs)
    model.summary()

    output = model(input_data)
    assert output.shape == (N, kwargs['channels'])
Example #12
0
def evaluate(A_list, X_list, y_list, ops_list, batch_size):
    batches = batch_iterator([X_list, A_list, y_list], batch_size=batch_size)
    output = []
    for b in batches:
        X, A, I = numpy_to_disjoint(*b[:-1])
        A = ops.sp_matrix_to_sp_tensor(A)
        y = b[-1]
        pred = model([X, A, I], training=False)
        outs = [o(pred, y) for o in ops_list]
        output.append(outs)
    return np.mean(output, 0)
def evaluate(A_list, X_list, y_list, ops, batch_size):
    batches = batch_iterator([A_list, X_list, y_list], batch_size=batch_size)
    output = []
    for b in batches:
        X, A, I = Batch(b[0], b[1]).get('XAI')
        A = sp_matrix_to_sp_tensor(A)
        y = b[2]
        pred = model([X, A, I], training=False)
        outs = [o(pred, y) for o in ops]
        output.append(outs)
    return np.mean(output, 0)
Example #14
0
def _get_input_from_dtypes(dtypes, sparse=False):
    assert len(dtypes) >= 2
    x = np.ones((3, 1), dtype=dtypes[0])
    a = np.ones((3, 3), dtype=dtypes[1])
    if sparse:
        a = sp_matrix_to_sp_tensor(a)
    output = [x, a]
    if len(dtypes) > 2:
        e = np.ones((3 * 3, 1), dtype=dtypes[2])
        output.append(e)
    return output
Example #15
0
    def load_folded_dataset(self, path):
        with open(path + "/graph.json", 'r') as f:
            graph_json = json.load(f)
        graph = nx.json_graph.node_link_graph(graph_json)
        adjacency_mat = nx.adjacency_matrix(graph)
        fltr = GraphConv.preprocess(adjacency_mat).astype('f4')

        self.fltr = ops.sp_matrix_to_sp_tensor(fltr)
        self.features = np.load(path + "/feats.npy")
        self.train_mask = np.load(path + "/train_mask.npy")
        self.valid_mask = np.load(path + "/valid_mask.npy")
        self.train_labels = GCN.labels[self.train_mask]
        self.valid_labels = GCN.labels[self.valid_mask]
Example #16
0
def Model_treeGCN_softmax_1(node_count,
                            wordvocabsize,
                            w2v_k,
                            word_W,
                            l2_reg=5e-4):
    X_word_in = Input(shape=(node_count, ), dtype='int32')
    # fltr_in = Input(shape=(node_count, node_count), sparse=True)
    fltr_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

    word_embedding_layer = Embedding(input_dim=wordvocabsize + 1,
                                     output_dim=w2v_k,
                                     input_length=node_count,
                                     mask_zero=True,
                                     trainable=True,
                                     weights=[word_W])
    word_embedding_x = word_embedding_layer(X_word_in)
    word_embedding_x = Dropout(0.25)(word_embedding_x)

    graph_conv_1 = GraphConv(200,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg),
                             use_bias=True)([word_embedding_x, fltr_in])
    dropout_1 = Dropout(0.5)(graph_conv_1)
    graph_conv_2 = GraphConv(200,
                             activation='relu',
                             kernel_regularizer=l2(l2_reg),
                             use_bias=True)([dropout_1, fltr_in])
    dropout_2 = Dropout(0.5)(graph_conv_2)

    feature_node0 = Lambda(lambda x: x[:, 0])(dropout_2)

    # pool = GlobalAttentionPool(200)(dropout_2)

    flatten = Flatten()(dropout_2)
    fc = Dense(512, activation='relu')(flatten)
    fc = Dropout(0.5)(fc)

    # LSTM_backward = LSTM(200, activation='tanh', return_sequences=False,
    #                      go_backwards=True, dropout=0.5)(dropout_2)

    # present_node0 = concatenate([feature_node0, LSTM_backward], axis=-1)
    class_output = Dense(120)(fc)
    class_output = Activation('softmax', name='CLASS')(class_output)

    # Build model
    model = Model(inputs=[X_word_in, fltr_in], outputs=class_output)
    optimizer = Adam(lr=0.001)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  weighted_metrics=['acc'])
    return model
Example #17
0
    def collate(self, batch):
        graph = batch[0]
        output = graph.numpy()

        # Sparse matrices to SparseTensors
        output = list(output)
        for i in range(len(output)):
            if sp.issparse(output[i]):
                output[i] = sp_matrix_to_sp_tensor(output[i])
        output = tuple(output)

        output = (output[:-1], output[-1])
        if self.sample_weights is not None:
            output += (self.sample_weights, )
        return tuple(output)
Example #18
0
def test_sparse_model_sizes():
    """
    This is a sanity check to make sure we have the same number of operations that we intend to have
    """
    N = 5
    F = 4
    S = 3
    X_in = Input(shape=(F, ), name="X_in")
    A_in = Input(shape=(None, ), name="A_in", sparse=True)
    E_in = Input(shape=(S, ), name="E_in")

    x = np.ones(shape=(N, F))
    a = np.ones(shape=(N, N))
    a = sp_matrix_to_sp_tensor(a)
    e = np.ones(shape=(N * N, S))

    def assert_n_params(inp, out, expected_size):
        model = Model(inputs=inp, outputs=out)
        model.compile(optimizer="adam", loss="mean_squared_error")
        print(model.count_params())
        assert model.count_params() == expected_size
        # for test coverage:
        model([x, a, e])

    X, E = XENetConv([5], 10, 20, False)([X_in, A_in, E_in])
    assert_n_params([X_in, A_in, E_in], [X, E], 350)
    # int vs list: 5 vs [5]
    X, E = XENetConv(5, 10, 20, False)([X_in, A_in, E_in])
    assert_n_params([X_in, A_in, E_in], [X, E], 350)
    # t = (4+4+3+3+1)*5 =  75    # Stack Conv
    # x = (4+5+5+1)*10  = 150    # Node reduce
    # e = (5+1)*20      = 120    # Edge reduce
    # p                 =   5    # Prelu
    # total = t+x+e+p   = 350

    X, E = XENetConv(5, 10, 20, True)([X_in, A_in, E_in])
    assert_n_params([X_in, A_in, E_in], [X, E], 362)
    # t = (4+4+3+3+1)*5 =  75
    # a = (5+1)*1   *2  =  12    # Attention
    # x = (4+5+5+1)*10  = 150
    # e = (5+1)*20      = 120
    # p                 =   5    # Prelu
    # total = t+x+e+p   = 362

    X, E = XENetConv([50, 5], 10, 20, True)([X_in, A_in, E_in])
    assert_n_params([X_in, A_in, E_in], [X, E], 1292)
Example #19
0
def _test_mixed_mode(layer, **kwargs):
    sparse = kwargs.pop('sparse', False)
    X_batch = np.stack([X] * batch_size)
    A_in = Input(shape=(N, ), sparse=sparse)
    X_in = Input(shape=(N, F))
    inputs = [X_in, A_in]
    if sparse:
        input_data = [X_batch, sp_matrix_to_sp_tensor(A)]
    else:
        input_data = [X_batch, A]

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    assert output.shape == (batch_size, N, kwargs['channels'])
Example #20
0
    def collate(self, batch):
        packed = self.pack(batch, return_dict=True)
        y = None
        if "y" in self.dataset.signature:
            y = packed.pop("y_list")
            y = np.vstack(y) if self.node_level else np.array(y)

        output = to_disjoint(**packed)
        output = list(output)
        for i in range(len(output)):
            if sp.issparse(output[i]):
                output[i] = sp_matrix_to_sp_tensor(output[i])
        output = tuple(output)

        if y is None:
            return output
        else:
            return output, y
Example #21
0
    def collate(self, batch):
        packed = self.pack(batch, return_dict=True)
        y = np.array(packed.pop("y_list")) if "y" in self.dataset.signature else None

        output = to_batch(**packed)

        # Sparse matrices to SparseTensors
        output = list(output)
        for i in range(len(output)):
            if sp.issparse(output[i]):
                output[i] = sp_matrix_to_sp_tensor(output[i])
        output = tuple(output)

        if len(output) == 1:
            output = output[0]

        if y is None:
            return output
        else:
            return output, y
Example #22
0
    def getdata(self):
        # Load data
        self.data
        adj = self.data.a

        # The adjacency matrix is stored as an attribute of the dataset.
        # Create filter for GCN and convert to sparse tensor.

        self.data.a = GCNConv.preprocess(self.data.a)
        self.data.a = sp_matrix_to_sp_tensor(self.data.a)

        # Train/valid/test split
        data_tr, data_te = self.data[:-10000], self.data[-10000:]
        np.random.shuffle(data_tr)
        data_tr, data_va = data_tr[:-10000], data_tr[-10000:]

        # We use a MixedLoader since the dataset is in mixed mode
        loader_tr = MixedLoader(data_tr, batch_size=batch_size, epochs=epochs)
        loader_va = MixedLoader(data_va, batch_size=batch_size)
        loader_te = MixedLoader(data_te, batch_size=batch_size)
        return adj, loader_tr, loader_va, loader_te
Example #23
0
def _test_disjoint_mode(layer, sparse=False, **kwargs):
    A = sp.block_diag(
        [np.ones((N1, N1)), np.ones((N2, N2)), np.ones((N3, N3))]
    ).todense()
    X = np.random.normal(size=(N, F))
    I = np.array([0] * N1 + [1] * N2 + [2] * N3).astype(int)

    A_in = Input(shape=(None,), sparse=sparse)
    X_in = Input(shape=(F,))
    I_in = Input(shape=(), dtype=tf.int32)
    inputs = [X_in, A_in, I_in]

    if sparse:
        input_data = [X, sp_matrix_to_sp_tensor(A), I]
    else:
        input_data = [X, A, I]

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    X_pool, A_pool, I_pool, mask = output
    N_pool_expected = (
        np.ceil(kwargs["ratio"] * N1)
        + np.ceil(kwargs["ratio"] * N2)
        + np.ceil(kwargs["ratio"] * N3)
    )
    N_pool_expected = int(N_pool_expected)
    N_pool_true = A_pool.shape[0]

    _check_number_of_nodes(N_pool_expected, N_pool_true)

    assert X_pool.shape == (N_pool_expected, F)
    assert A_pool.shape == (N_pool_expected, N_pool_expected)
    assert I_pool.shape == (N_pool_expected,)

    output_shape = [o.shape for o in output]
    _check_output_and_model_output_shapes(output_shape, model.output_shape)
Example #24
0
def k_hop_sparse_subgraph(a, node_idx, k, transformer=None):
    """
    Computes the subgraph containing all the neighbors of `node_idx` up to the k-th order.
    If `a` is not the binary adjacency matrix a  `transformer` should be passed.
    **Arguments**
    - `a`: sparse `(n_nodes, n_nodes)` graph tensor;
    - `node_idx`: center node;
    - `k`: order of neighbor;
    - `transformer`: one of the functions from the `spektral.transforms` module,
       needed to convert the binary adjacency matrix into the correct format for the model;
    """
    if a.dtype != tf.float32:
        a = tf.cast(a, tf.float32)

    if transformer:
        a = binary_adj_converter(a)

    power_a = tf.sparse.eye(a.shape[0])
    k_neighs = np.zeros(a.shape[0]).astype("float32").reshape(1, -1)
    k_neighs[0, node_idx] = 1

    for _ in range(k - 1):
        power_a = dot(power_a, a)
        temp = tf.sparse.slice(power_a,
                               start=[node_idx, 0],
                               size=[1, power_a.shape[0]])
        k_neighs += tf.sparse.to_dense(temp)

    comp_graph = tf.sparse.add(a * tf.reshape(k_neighs, (-1, 1)), a * k_neighs)
    is_nonzero = tf.not_equal(comp_graph.values, 0)
    comp_graph = tf.sparse.retain(comp_graph, is_nonzero)
    comp_graph = tf.sign(comp_graph)

    if transformer:
        comp_graph = sp_tensor_to_sp_matrix(comp_graph)
        comp_graph = transformer(comp_graph)
        return sp_matrix_to_sp_tensor(comp_graph)
    else:
        return comp_graph
Example #25
0
def _test_single_mode(layer, **kwargs):
    sparse = kwargs.pop("sparse", False)
    A_in = Input(shape=(None, ), sparse=sparse)
    X_in = Input(shape=(F, ))
    inputs = [X_in, A_in]
    if sparse:
        input_data = [X, sp_matrix_to_sp_tensor(A)]
    else:
        input_data = [X, A]

    if kwargs.pop("edges", None):
        E_in = Input(shape=(S, ))
        inputs.append(E_in)
        input_data.append(E_single)

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    assert output.shape == (N, kwargs["channels"])
Example #26
0
def _test_single_mode(layer, sparse=False, **kwargs):
    A = np.ones((N, N))
    X = np.random.normal(size=(N, F))

    A_in = Input(shape=(None,), sparse=sparse)
    X_in = Input(shape=(F,))
    inputs = [X_in, A_in]

    if sparse:
        input_data = [X, sp_matrix_to_sp_tensor(A)]
    else:
        input_data = [X, A]

    layer_instance = layer(**kwargs)
    output = layer_instance(inputs)
    model = Model(inputs, output)

    output = model(input_data)

    X_pool, A_pool, mask = output
    if "ratio" in kwargs.keys():
        N_exp = kwargs["ratio"] * N
    elif "k" in kwargs.keys():
        N_exp = kwargs["k"]
    else:
        raise ValueError("Need k or ratio.")
    N_pool_expected = int(np.ceil(N_exp))
    N_pool_true = A_pool.shape[-1]

    _check_number_of_nodes(N_pool_expected, N_pool_true)

    assert X_pool.shape == (N_pool_expected, F)
    assert A_pool.shape == (N_pool_expected, N_pool_expected)

    output_shape = [o.shape for o in output]
    _check_output_and_model_output_shapes(output_shape, model.output_shape)
Example #27
0
################################################################################
# MODEL
################################################################################
X_in = Input(shape=(F, ), name='X_in')
A_in = Input(shape=(None, ), name='A_in', sparse=True)

X_1 = GraphConvSkip(16, activation='elu')([X_in, A_in])
X_1, A_1, S = MinCutPool(n_clusters, return_mask=True)([X_1, A_in])

model = Model([X_in, A_in], [X_1, S])

################################################################################
# TRAINING
################################################################################
# Setup
inputs = [X, sp_matrix_to_sp_tensor(A_norm)]
opt = tf.keras.optimizers.Adam(learning_rate=lr)

# Fit model
loss_history = []
nmi_history = []
for _ in tqdm(range(epochs)):
    outs = train_step(inputs)
    outs = [o.numpy() for o in outs]
    loss_history.append((outs[0], outs[1], (outs[0] + outs[1])))
    s = np.argmax(outs[2], axis=-1)
    nmi_history.append(v_measure_score(y, s))
loss_history = np.array(loss_history)

################################################################################
# RESULTS
Example #28
0
epoch = 0
model_loss = 0
model_acc = 0
best_val_loss = np.inf
best_weights = None
patience = es_patience
batches_in_epoch = np.ceil(y_train.shape[0] / batch_size)

print('Fitting model')
batches = batch_iterator([X_train, A_train, y_train],
                         batch_size=batch_size, epochs=epochs)
for b in batches:
    current_batch += 1

    X_, A_, I_ = numpy_to_disjoint(*b[:-1])
    A_ = ops.sp_matrix_to_sp_tensor(A_)
    y_ = b[-1]
    outs = train_step(X_, A_, I_, y_)

    model_loss += outs[0]
    model_acc += outs[1]
    if current_batch == batches_in_epoch:
        epoch += 1
        model_loss /= batches_in_epoch
        model_acc /= batches_in_epoch

        # Compute validation loss and accuracy
        val_loss, val_acc = evaluate(A_val, X_val, y_val, [loss_fn, acc_fn], batch_size=batch_size)
        print('Ep. {} - Loss: {:.2f} - Acc: {:.2f} - Val loss: {:.2f} - Val acc: {:.2f}'
              .format(epoch, model_loss, model_acc, val_loss, val_acc))
tf.config.experimental_run_functions_eagerly(True)

# Parameters
batch_size = 32  # Batch size
epochs = 1000  # Number of training epochs
patience = 10  # Patience for early stopping
l2_reg = 5e-4  # Regularization rate for l2

# Load data
data = MNIST()

# The adjacency matrix is stored as an attribute of the dataset.
# Create filter for GCN and convert to sparse tensor.
data.a = GCNConv.preprocess(data.a)
data.a = sp_matrix_to_sp_tensor(data.a)

# Train/valid/test split
data_tr, data_te = data[:-10000], data[-10000:]
np.random.shuffle(data_tr)
data_tr, data_va = data_tr[:-10000], data_tr[-10000:]

# We use a MixedLoader since the dataset is in mixed mode
loader_tr = MixedLoader(data_tr, batch_size=batch_size, epochs=epochs)
loader_va = MixedLoader(data_va, batch_size=batch_size)
loader_te = MixedLoader(data_te, batch_size=batch_size)


# Build model
class Net(Model):
    def __init__(self, **kwargs):
Example #30
0
es_patience = 200     # Patience fot early stopping

# Load data
X_train, y_train, X_val, y_val, X_test, y_test, adj = mnist.load_data()
X_train, X_val, X_test = X_train[..., None], X_val[..., None], X_test[..., None]
N = X_train.shape[-2]      # Number of nodes in the graphs
F = X_train.shape[-1]      # Node features dimensionality
n_out = y_train.shape[-1]  # Dimension of the target

fltr = normalized_laplacian(adj)

# Model definition
X_in = Input(shape=(N, F))
# Pass A as a fixed tensor, otherwise Keras will complain about inputs of
# different rank.
A_in = Input(tensor=sp_matrix_to_sp_tensor(fltr))

graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([X_in, A_in])
graph_conv = GraphConv(32,
                       activation='elu',
                       kernel_regularizer=l2(l2_reg),
                       use_bias=True)([graph_conv, A_in])
flatten = Flatten()(graph_conv)
fc = Dense(512, activation='relu')(flatten)
output = Dense(n_out, activation='softmax')(fc)

# Build model
model = Model(inputs=[X_in, A_in], outputs=output)