def __init__(self,B_s,Node_Dim,Hidden_Dim,Out_Dim,Class_Dim,Loss_Only=True):
     super(GNN,self).__init__()
     self.loss_only = Loss_Only
     self.Bs = B_s
     self.Conv1 = SAGEConv(Node_Dim,Hidden_Dim)
     self.Conv2 = SAGEConv(Hidden_Dim,Out_Dim)
     self.ClassHead = Linear(Out_Dim,Class_Dim)  
Ejemplo n.º 2
0
 def __init__(self,
              in_channels,
              out_channels,
              hidden_channels=256,
              n_layers=3,
              dropout=0.0,
              kwargs={}):
     super(SageNet, self).__init__()
     self.in_channels = in_channels
     self.out_channels = out_channels
     self.hidden_channels = hidden_channels
     self.n_layers = n_layers
     self.dropout = dropout
     self.kwargs = kwargs
     self.convs = nn.ModuleList()
     for i in range(self.n_layers - 1):
         n_in = self.in_channels if i == 0 else self.hidden_channels
         self.convs.append(
             SAGEConv(
                 n_in,
                 self.hidden_channels,
                 aggr='add',
                 normalize=True,  # otherwise explode
                 root_weight=False,
                 **self.kwargs))
     # Last layer
     self.convs.append(
         SAGEConv(
             self.hidden_channels,
             self.out_channels,
             aggr='add',
             normalize=True,  # otherwise explode
             root_weight=False,
             **self.kwargs))
class SortPool(torch.nn.Module):
    def __init__(self, in_channels, hidden_channels, output_dim, num_layers):
        super(SortPool, self).__init__()
        self.k = 30
        self.conv1 = SAGEConv(in_channels, hidden_channels)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden_channels, hidden_channels))
        self.conv1d = Conv1d(hidden_channels, 32, 5)
        self.lin1 = Linear(32 * (self.k - 5 + 1), hidden_channels)
        self.lin2 = Linear(hidden_channels, output_dim)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.conv1d.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, x, edge_index, batch):
        # x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))

        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
        x = global_sort_pool(x, batch, self.k)
        x = x.view(len(x), self.k, -1).permute(0, 2, 1)
        x = F.relu(self.conv1d(x))
        x = x.view(len(x), -1)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return x
Ejemplo n.º 4
0
 def __init__(self, node_in_dim,node_out_dim=64,
              heads=1,
              dropout=0.1
              ):
     super(GraphSAGE, self).__init__()
     self.conv1 = SAGEConv(node_in_dim, node_out_dim)
     self.conv2 = SAGEConv(node_out_dim, node_out_dim)
Ejemplo n.º 5
0
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        hidden = args.hidden
        num_layers = 5
        self.conv1 = SAGEConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.set2set = Set2Set(hidden, processing_steps=4)
        self.lin1 = Linear(2 * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.set2set.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
        x = self.set2set(x, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)
class SortPool(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden, num_classes):
        super(SortPool, self).__init__()
        self.k = 10
        self.conv1 = SAGEConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.lin1 = nn.Linear(self.k * hidden, hidden)
        self.lin2 = nn.Linear(hidden, num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
        x = global_sort_pool(x, batch, self.k)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Ejemplo n.º 7
0
class SAGE(torch.nn.Module):    #已精调
    def __init__(self, num_layers=2, hidden=32, features_num=32, num_class=2):
        super(SAGE, self).__init__()
        self.conv1 = SAGEConv(hidden, hidden)
        self.conv2 = SAGEConv(hidden, hidden)
        self.out = Linear(hidden * 3, num_class)
        self.first_lin = Linear(features_num, hidden)
        self.fuse_weight = torch.nn.Parameter(torch.FloatTensor(num_layers),requires_grad=True)
        self.fuse_weight.data.fill_(float(1) / (num_layers + 1))

    def reset_parameters(self):
        self.first_lin.reset_parameters()
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight
        x = F.relu(self.first_lin(x))
        x = F.dropout(x, p=0.5, training=self.training)
        xx = x
        x = self.conv1(x, edge_index, edge_weight)
        x = F.dropout(x, p=0.2, training=self.training)
        xx = torch.cat([xx, x], dim=1)
        x = self.conv2(x, edge_index, edge_weight)
        x = F.dropout(x, p=0.2, training=self.training)
        xx = torch.cat([xx, x], dim=1)
        x = self.out(xx)
        return F.log_softmax(x, dim=-1)
Ejemplo n.º 8
0
    def __init__(self):
        super(GraphCluster, self).__init__()
        n_layers = 7
        in_channels = 128
        hidden_channels = 64
        class_num = 18
        bias = True
        normalize = False
        # self.embedding = nn.Sequential(nn.Linear(100, in_channels),
        #                                nn.ReLU(), )

        self.layers.append(
            SAGEConv(in_channels,
                     hidden_channels,
                     normalize=normalize,
                     bias=bias))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(
                SAGEConv(hidden_channels,
                         hidden_channels,
                         normalize=normalize,
                         bias=bias))
        # output layer
        self.out_layer = SAGEConv(hidden_channels,
                                  class_num,
                                  normalize=normalize,
                                  bias=bias)
Ejemplo n.º 9
0
class GraphSAGE(torch.nn.Module):
    def __init__(self,
                 num_features,
                 output_channels,
                 num_layers=3,
                 hidden=128,
                 **kwargs):
        super(GraphSAGE, self).__init__()
        self.conv1 = SAGEConv(num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, output_channels)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data, target_size, **kwargs):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
        x = global_mean_pool(x, batch, size=target_size)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Ejemplo n.º 10
0
 def __init__(self, input_dim, out_dim, filter_num, dropout=False):
     super(SAGE_Link, self).__init__()
     self.dropout = dropout
     self.conv1 = SAGEConv(input_dim, filter_num)
     self.conv2 = SAGEConv(filter_num, filter_num)
     #self.Conv = nn.Conv1d(filter_num, out_dim, kernel_size=1)
     self.linear = nn.Linear(filter_num * 2, out_dim)
Ejemplo n.º 11
0
class Net(torch.nn.Module):
    def __init__(self):
        super().__init__()
        hidden = args.hidden
        num_layers = 5
        self.k = 30
        self.conv1 = SAGEConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.conv1d = Conv1d(hidden, 32, 5)
        self.lin1 = Linear(32 * (self.k - 5 + 1), hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.conv1d.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
        x = global_sort_pool(x, batch, self.k)
        x = x.view(len(x), self.k, -1).permute(0, 2, 1)
        x = F.relu(self.conv1d(x))
        x = x.view(len(x), -1)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)
Ejemplo n.º 12
0
 def reset_parameters(self):
     for conv in self.down_convs:
         conv.reset_parameters()
     for pool in self.pools:
         pool.reset_parameters()
     for conv in self.up_convs:
         conv.reset_parameters()
Ejemplo n.º 13
0
    def __init__(self, input_size, depth, rate, shapes, device):
        super(Net, self).__init__()
        self.device = device
        self.depth = depth
        self.direction = 1
        self.down_list = torch.nn.ModuleList()
        self.up_list = torch.nn.ModuleList()
        self.pool_list = torch.nn.ModuleList()
        # encoder
        conv = SAGEConv(input_size, shapes[0])
        self.down_list.append(conv)
        for i in range(self.depth - 1):
            pool = SAGPooling(shapes[i], rate[i])
            self.pool_list.append(pool)
            conv = SAGEConv(shapes[i], shapes[i + 1])
            self.down_list.append(conv)
        pool = SAGPooling(shapes[-1], rate[-1])
        self.pool_list.append(pool)

        # decoder
        for i in range(self.depth - 1):
            conv = SAGEConv(shapes[self.depth - i - 1],
                            shapes[self.depth - i - 2])
            self.up_list.append(conv)
        conv = SAGEConv(shapes[0], input_size)
        self.up_list.append(conv)
    def __init__(self,
                 categories_nums,
                 features_num=16,
                 num_class=2,
                 large_features=False,
                 sparse=False):
        super(SAGE3, self).__init__()

        hidden = 64
        embed_size = 8
        dropout = 0.1
        self.dropout_p = dropout

        self.embeddings = torch.nn.ModuleList()
        for max_nums in categories_nums:
            self.embeddings.append(Embedding(max_nums, embed_size))


#        self.lin0 = Linear(embed_size*len(categories_nums)+features_num, hidden)
        self.conv1 = SAGEConv(embed_size * len(categories_nums) + features_num,
                              hidden)

        self.conv2 = SAGEConv(hidden, hidden)

        self.conv3 = SAGEConv(hidden, hidden)

        self.lin1 = Linear(hidden, num_class)
Ejemplo n.º 15
0
 def __init__(self):
     super(GCN, self).__init__()
     torch.manual_seed(12345)
     self.conv1 = SAGEConv(NUM_FEATURES, 4)
     self.conv2 = SAGEConv(4, 4)
     self.conv3 = SAGEConv(4, 2)
     self.classifier = Linear(2, NUM_CLASSES)
Ejemplo n.º 16
0
    def __init__(self,
                 hidden_channels,
                 num_layers,
                 max_z,
                 train_dataset=None,
                 use_feature=False,
                 node_embedding=None,
                 dropout=0.5):
        super(SAGE, self).__init__()
        self.use_feature = use_feature
        self.node_embedding = node_embedding
        self.max_z = max_z
        self.z_embedding = Embedding(self.max_z, hidden_channels)

        self.convs = ModuleList()
        initial_channels = hidden_channels
        if self.use_feature:
            initial_channels += train_dataset.num_features
        if self.node_embedding is not None:
            initial_channels += node_embedding.embedding_dim
        self.convs.append(SAGEConv(initial_channels, hidden_channels))
        for _ in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden_channels, hidden_channels))

        self.dropout = dropout
        self.lin1 = Linear(hidden_channels, hidden_channels)
        self.lin2 = Linear(hidden_channels, 1)
Ejemplo n.º 17
0
    def __init__(self, dim_node_features, dim_edge_features, dim_target,
                 predictor_class, config):
        """
        Initializes the model.
        :param dim_node_features: arbitrary object holding node feature information
        :param dim_edge_features: arbitrary object holding edge feature information
        :param dim_target: arbitrary object holding target information
        :param predictor_class: the class of the predictor that will classify node/graph embeddings produced by this DGN
        :param config: the configuration dictionary to extract further hyper-parameters
        """
        super().__init__()

        num_layers = config['num_layers']
        dim_embedding = config['dim_embedding']
        self.aggregation = config['aggregation']  # can be mean or max

        if self.aggregation == 'max':
            self.fc_max = nn.Linear(dim_embedding, dim_embedding)

        self.predictor = predictor_class(dim_node_features=dim_embedding *
                                         num_layers,
                                         dim_edge_features=dim_edge_features,
                                         dim_target=dim_target,
                                         config=config)

        self.layers = nn.ModuleList([])
        for i in range(num_layers):
            dim_input = dim_node_features if i == 0 else dim_embedding

            conv = SAGEConv(dim_input, dim_embedding)
            # Overwrite aggregation method (default is set to mean
            conv.aggr = self.aggregation

            self.layers.append(conv)
Ejemplo n.º 18
0
class GlobalAttentionNet(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden):
        super().__init__()
        self.conv1 = SAGEConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.att = GlobalAttention(Linear(hidden, 1))
        self.lin1 = Linear(hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.att.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
        x = self.att(x, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Ejemplo n.º 19
0
 def __init__(self,
              in_feats,
              hidden_dim,
              out_feats,
              num_layers,
              dropout=0.5,
              normalize=False,
              concat=False,
              use_bn=False):
     super(GraphSAGE, self).__init__()
     self.convlist = nn.ModuleList()
     self.bn_list = nn.ModuleList()
     self.num_layers = num_layers
     self.dropout = dropout
     self.use_bn = use_bn
     if num_layers == 1:
         self.convlist.append(
             SAGEConv(in_feats, out_feats, normalize, concat))
     else:
         self.convlist.append(
             SAGEConv(in_feats, hidden_dim, normalize, concat))
         if use_bn:
             self.bn_list.append(nn.BatchNorm1d(hidden_dim))
         for _ in range(num_layers - 2):
             self.convlist.append(
                 SAGEConv(hidden_dim, hidden_dim, normalize, concat))
             if use_bn:
                 self.bn_list.append(nn.BatchNorm1d(hidden_dim))
         self.convlist.append(
             SAGEConv(hidden_dim, out_feats, normalize, concat))
    def __init__(self, nfeat, nhid, nclass, dropout,nlayer=2):
        super(GraphSAGE2, self).__init__()
        self.conv1 = SAGEConv(nfeat, nhid)
        self.conv2 = SAGEConv(nhid, nclass)

        self.dropout_p = dropout
        self.sig = nn.Sigmoid()
Ejemplo n.º 21
0
class GraphSAGEWithJK(torch.nn.Module):
    def __init__(self, dataset, num_layers, hidden):
        super(GraphSAGEWithJK, self).__init__()
        self.conv1 = SAGEConv(dataset.num_features, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.jump = JumpingKnowledge(mode='cat')
        self.lin1 = Linear(num_layers * hidden, hidden)
        self.lin2 = Linear(hidden, dataset.num_classes)

    def reset_parameters(self):
        self.conv1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.jump.reset_parameters()
        self.lin1.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch
        x = F.relu(self.conv1(x, edge_index))
        xs = [x]
        for conv in self.convs:
            x = F.relu(conv(x, edge_index))
            xs += [x]
        x = self.jump(xs)
        x = global_mean_pool(x, batch)
        x = F.relu(self.lin1(x))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Ejemplo n.º 22
0
class GraphSAGE(torch.nn.Module):
    def __init__(self, num_layers=2, hidden=16, features_num=16, num_class=2):
        super().__init__()

        self.sage1 = SAGEConv(features_num, hidden)
        self.convs = torch.nn.ModuleList()
        for i in range(num_layers - 1):
            self.convs.append(SAGEConv(hidden, hidden))
        self.lin2 = Linear(hidden, num_class)

    def reset_parameters(self):
        self.first_lin.reset_parameters()
        self.sage1.reset_parameters()
        for conv in self.convs:
            conv.reset_parameters()
        self.lin2.reset_parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_weight

        x = F.relu(self.sage1(x, edge_index, edge_weight=edge_weight))
        x = F.dropout(x, p=0.5, training=self.training)
        for conv in self.convs:
            x = F.relu(conv(x, edge_index, edge_weight=edge_weight))
        x = F.dropout(x, p=0.5, training=self.training)
        x = self.lin2(x)
        return F.log_softmax(x, dim=-1)

    def __repr__(self):
        return self.__class__.__name__
Ejemplo n.º 23
0
    def __init__(self, hparams: dict):
        super().__init__()
        self.hparams = hparams

        for param_name in [
                "num_node_features",
                "num_conv_layers",
                "conv_size",
                "lin1_size",
                "lin2_size",
                "output_size",
        ]:
            if not isinstance(hparams[param_name], int):
                raise Exception("Wrong hyperparameter type!")

        if hparams["num_conv_layers"] < 1:
            raise Exception("Invalid number of layers!")

        if hparams["activation"] == "relu":
            activation = nn.ReLU
        elif hparams["activation"] == "prelu":
            activation = nn.PReLU
        else:
            raise Exception("Invalid activation function name.")

        if hparams["pool_method"] == "add":
            self.pooling_method = global_add_pool
        elif hparams["pool_method"] == "mean":
            self.pooling_method = global_mean_pool
        elif hparams["pool_method"] == "max":
            self.pooling_method = global_max_pool
        else:
            raise Exception("Invalid pooling method name")

        self.conv_modules = nn.ModuleList()
        self.activ_modules = nn.ModuleList()

        normalize = hparams.get("normalize", False)

        self.conv_modules.append(
            SAGEConv(hparams["num_node_features"],
                     hparams["conv_size"],
                     normalize=normalize))
        self.activ_modules.append(activation())

        for _ in range(hparams["num_conv_layers"] - 1):
            self.conv_modules.append(
                SAGEConv(hparams["conv_size"],
                         hparams["conv_size"],
                         normalize=normalize))
            self.activ_modules.append(activation())

        self.lin1 = nn.Linear(hparams["conv_size"], hparams["lin1_size"])
        self.activ_lin1 = activation()

        self.lin2 = nn.Linear(hparams["lin1_size"], hparams["lin2_size"])
        self.activ_lin2 = activation()

        self.output = nn.Linear(hparams["lin2_size"], hparams["output_size"])
Ejemplo n.º 24
0
    def __init__(self, config):
        super(Relational_GNN, self).__init__()
        self.num_rels = config['num_rels']
        self.in_dim = config['vocab_size']
        self.embed_dim = config['embed_dim']
        self.dropout = config['dropout']
        self.fc_dim = config['fc_dim']
        self.node_drop = config['node_drop']

        if config['model_name'] == 'rsage':
            self.conv1 = torch.nn.ModuleList([
                SAGEConv(self.in_dim,
                         self.embed_dim,
                         normalize=True,
                         concat=True) for _ in range(self.num_rels)
            ])
            self.conv2 = torch.nn.ModuleList([
                SAGEConv(self.embed_dim,
                         self.embed_dim,
                         normalize=True,
                         concat=True) for _ in range(self.num_rels)
            ])

        elif config['model_name'] == 'rgcn':
            self.conv1 = torch.nn.ModuleList([
                GCNConv(self.in_dim, self.embed_dim, improved=True)
                for _ in range(self.num_rels)
            ])
            self.conv2 = torch.nn.ModuleList([
                GCNConv(self.embed_dim, self.embed_dim, improved=True)
                for _ in range(self.num_rels)
            ])

        elif config['model_name'] == 'rgat':
            self.conv1 = torch.nn.ModuleList([
                GATConv(self.in_dim,
                        self.embed_dim,
                        heads=3,
                        concat=True,
                        dropout=0.1) for _ in range(self.num_rels)
            ])
            self.conv1 = torch.nn.ModuleList([
                GATConv(3 * self.embed_dim,
                        self.embed_dim,
                        heads=3,
                        concat=True,
                        dropout=0.1) for _ in range(self.num_rels)
            ])

        if config['model_name'] == 'rgat':
            self.classifier = nn.Sequential(
                nn.Dropout(config["dropout"]),
                nn.Linear(3 * self.embed_dim, self.fc_dim), nn.ReLU(),
                nn.Linear(self.fc_dim, config['n_classes']))
        else:
            self.classifier = nn.Sequential(
                nn.Dropout(config["dropout"]),
                nn.Linear(self.embed_dim, self.fc_dim), nn.ReLU(),
                nn.Linear(self.fc_dim, config['n_classes']))
Ejemplo n.º 25
0
    def __init__(self, in_channels, hidden_channels, out_channels):
        super(SAGE, self).__init__()

        self.num_layers = 2

        self.convs = torch.nn.ModuleList()
        self.convs.append(SAGEConv(in_channels, hidden_channels))
        self.convs.append(SAGEConv(hidden_channels, out_channels))
Ejemplo n.º 26
0
 def __init__(self, args):
     super(GraphSAGE, self).__init__()
     self.feature_dim = args.feature_dim
     self.embedding_dim = args.embedding_dim
     self.SAGE_hidden_dim = args.SAGE_hidden_dim
     self.conv1 = SAGEConv(self.feature_dim, self.SAGE_hidden_dim)
     self.activate_fn = nn.ELU(inplace=True)
     self.conv2 = SAGEConv(self.SAGE_hidden_dim, self.embedding_dim)
Ejemplo n.º 27
0
 def __init__(self, args):
     super(MetaGraphSAGE, self).__init__()
     self.use_cuda = args.use_cuda
     self.meta_num = args.meta_num
     self.SAGE_hidden_dim = args.SAGE_hidden_dim
     self.sageconv1 = nn.ModuleList([SAGEConv(args.feature_dim, self.SAGE_hidden_dim) for i in range(self.meta_num)])
     self.activate_fn = nn.ELU(inplace=True)
     self.sageconv2 = nn.ModuleList([SAGEConv(self.SAGE_hidden_dim, args.embedding_dim) for i in range(self.meta_num)])
Ejemplo n.º 28
0
 def __init__(self, hidden_channels):
     super(Net, self).__init__()
     in_channels = 31
     out_channels = 2
     self.conv1 = SAGEConv(in_channels, hidden_channels)
     self.conv2 = SAGEConv(hidden_channels, hidden_channels)
     self.conv3 = SAGEConv(hidden_channels, hidden_channels)
     self.lin = torch.nn.Linear(3 * hidden_channels, out_channels)
Ejemplo n.º 29
0
    def __init__(self, hidden_channels: int, out_channels: int,
                 dropout: float):
        super().__init__()
        self.dropout = dropout

        self.conv1 = SAGEConv((-1, -1), hidden_channels)
        self.conv2 = SAGEConv((-1, -1), hidden_channels)
        self.lin = Linear(-1, out_channels)
Ejemplo n.º 30
0
 def __init__(self, hidden_channels):
     super(Net, self).__init__()
     in_channels = dataset.num_node_features
     out_channels = dataset.num_classes
     self.conv1 = SAGEConv(in_channels, hidden_channels, concat=True)
     self.conv2 = SAGEConv(hidden_channels, hidden_channels, concat=True)
     self.conv3 = SAGEConv(hidden_channels, hidden_channels, concat=True)
     self.lin = torch.nn.Linear(3 * hidden_channels, out_channels)