Exemplo n.º 1
0
 def __init__(self, g, in_feats, n_hidden, n_classes, n_layers, activation,
              dropout):
     super(GCN, self).__init__()
     self.g = g
     self.layers = gluon.nn.Sequential()
     # input layer
     self.layers.add(GraphConv(in_feats, n_hidden, activation=activation))
     # hidden layers
     for i in range(n_layers - 1):
         self.layers.add(
             GraphConv(n_hidden, n_hidden, activation=activation))
     # output layer
     self.layers.add(GraphConv(n_hidden, n_classes))
     self.dropout = gluon.nn.Dropout(rate=dropout)
Exemplo n.º 2
0
    def __init__(self,
                 embedding_weights,
                 output_classes_num,
                 dropout=0.0,
                 gcn_hidden_size=30,
                 dense_hidden_size=20,
                 trainable_embeddings=False,
                 num_layers=3,
                 **kwargs):
        super(GCNNet, self).__init__(**kwargs)

        with self.name_scope():
            self.dropout = mx.gluon.nn.Dropout(dropout)

            # Embedding shape for number of features in the
            # first graph convolution.
            emb_in_dim = embedding_weights.shape[1]

            # Given the sparse documentation for
            # DGL + MXNet we store the word embeddings
            # directly as an mxnet.nd matrix
            # rather than a trainable embedding layer
            self.embeddings = embedding_weights
            self.gcn_span_a = nn.Sequential()
            self.gcn_span_b = nn.Sequential()
            '''
            self.gcn_path = nn.Sequential()
            '''
            self.gcn_main = nn.Sequential()

            # Set up initial layer for
            # each graph

            self.gcn_span_a.add(
                GraphConv(
                    emb_in_dim,
                    gcn_hidden_size,
                    # num_heads = 1,
                    activation=mx.nd.relu))

            self.gcn_span_b.add(
                GraphConv(
                    emb_in_dim,
                    gcn_hidden_size,
                    # num_heads = 1,
                    activation=mx.nd.relu))
            '''
            self.gcn_path.add(
                GraphConv(
                    emb_in_dim,
                    gcn_hidden_size,
                    # num_heads = 1,
                    activation = mx.nd.relu
                )
            )
            '''
            self.gcn_main.add(
                GraphConv(
                    emb_in_dim,
                    gcn_hidden_size,
                    # num_heads = 1,
                    activation=mx.nd.relu))

            # Add additional convolution layers for each graph
            for i in range(num_layers):

                self.gcn_span_a.add(
                    GraphConv(
                        gcn_hidden_size,
                        gcn_hidden_size,
                        # num_heads = 1,
                        activation=mx.nd.relu))

                self.gcn_span_b.add(
                    GraphConv(
                        gcn_hidden_size,
                        gcn_hidden_size,
                        # num_heads = 1,
                        activation=mx.nd.relu))
                '''
                self.gcn_path.add(
                    GraphConv(
                        gcn_hidden_size,
                        gcn_hidden_size,
                        # num_heads = 1,
                        activation = mx.nd.relu
                    )
                )
                '''
                self.gcn_main.add(
                    GraphConv(
                        gcn_hidden_size,
                        gcn_hidden_size,
                        # num_heads = 1,
                        activation=mx.nd.relu))

            # For condensing all the different GCN outputs
            self.dense_collect = mx.gluon.nn.Dense(units=dense_hidden_size,
                                                   activation='relu')

            # Final dense classification layer
            self.dense_out = mx.gluon.nn.Dense(units=output_classes_num, )