def __init__(self, hparams: dict): super().__init__() self.hparams = hparams self.aggregation_method = hparams["aggregation_method"] if hparams["num_conv_layers"] < 1: raise Exception("Invalid number of layers!") self.conv_modules = nn.ModuleList() self.conv_modules.append( GCNConv(hparams["num_node_features"], hparams["conv_size"])) for _ in range(hparams["num_conv_layers"] - 1): conv = GCNConv(hparams["conv_size"], hparams["conv_size"]) self.conv_modules.append(conv) if self.aggregation_method == 'lstm': self.jk = JumpingKnowledge(self.aggregation_method, num_layers=hparams["num_conv_layers"], channels=hparams["conv_size"]) else: self.jk = JumpingKnowledge(self.aggregation_method) if self.aggregation_method == 'cat': self.lin = nn.Linear( int(hparams["conv_size"] * hparams["num_conv_layers"]), hparams["lin_size"]) else: self.lin = nn.Linear(int(hparams["conv_size"]), hparams["lin_size"]) self.output = nn.Linear(hparams["lin_size"], hparams["output_size"])
def test_jumping_knowledge(): num_nodes, channels, num_layers = 100, 17, 5 xs = list([torch.randn(num_nodes, channels) for _ in range(num_layers)]) model = JumpingKnowledge('cat') assert model.__repr__() == 'JumpingKnowledge(cat)' out = model(xs) assert out.size() == (num_nodes, channels * num_layers) if is_full_test(): jit = torch.jit.script(model) assert torch.allclose(jit(xs), out) model = JumpingKnowledge('max') assert model.__repr__() == 'JumpingKnowledge(max)' out = model(xs) assert out.size() == (num_nodes, channels) if is_full_test(): jit = torch.jit.script(model) assert torch.allclose(jit(xs), out) model = JumpingKnowledge('lstm', channels, num_layers) assert model.__repr__() == 'JumpingKnowledge(lstm)' out = model(xs) assert out.size() == (num_nodes, channels) if is_full_test(): jit = torch.jit.script(model) assert torch.allclose(jit(xs), out)
def test_jumping_knowledge(): num_nodes, channels, num_layers = 100, 16, 4 xs = list([torch.randn(num_nodes, channels) for _ in range(num_layers)]) model = JumpingKnowledge('cat') assert model.__repr__() == 'JumpingKnowledge(cat)' assert model(xs).size() == (num_nodes, channels * num_layers) model = JumpingKnowledge('max') assert model.__repr__() == 'JumpingKnowledge(max)' assert model(xs).size() == (num_nodes, channels) model = JumpingKnowledge('lstm', channels, num_layers) assert model.__repr__() == 'JumpingKnowledge(lstm)' assert model(xs).size() == (num_nodes, channels)
def __init__(self, nfeat, nhid, nclass, dropout=0.5, lr=0.01, weight_decay=5e-4, n_edge=1, with_relu=True, drop=False, with_bias=True, device=None): super(GIN, self).__init__() assert device is not None, "Please specify 'device'!" self.device = device self.nfeat = nfeat self.hidden_sizes = [nhid] self.nclass = int(nclass) self.dropout = dropout self.lr = lr self.drop = drop if not with_relu: self.weight_decay = 0 else: self.weight_decay = weight_decay self.with_relu = with_relu self.with_bias = with_bias self.n_edge = n_edge self.output = None self.best_model = None self.best_output = None self.adj_norm = None self.features = None self.gate = Parameter(torch.rand(1)) # creat a generator between [0,1] nclass = int(nclass) """GIN from torch-geometric""" num_features = nfeat dim = nhid nn1 = Sequential( Linear(num_features, dim), ReLU(), ) self.gc1 = GINConv(nn1) # self.bn1 = torch.nn.BatchNorm1d(dim) nn2 = Sequential( Linear(dim, dim), ReLU(), ) self.gc2 = GINConv(nn2) nn3 = Sequential( Linear(dim, dim), ReLU(), ) self.gc3 = GINConv(nn3) self.jump = JumpingKnowledge(mode='cat') # self.bn2 = torch.nn.BatchNorm1d(dim) self.fc1 = Linear(dim, dim) self.fc2 = Linear(dim * 1, int(nclass))
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout, mode='concat'): super().__init__() self.convs = torch.nn.ModuleList() self.convs.append(GCNConv(in_channels, hidden_channels, cached=False)) self.bns = torch.nn.ModuleList() self.bns.append(torch.nn.BatchNorm1d(hidden_channels)) for _ in range(num_layers - 1): self.convs.append( GCNConv(hidden_channels, hidden_channels, cached=False)) self.bns.append(torch.nn.BatchNorm1d(hidden_channels)) self.jump = JumpingKnowledge(mode) if mode == 'cat': self.lin1 = Linear(num_layers * hidden_channels, hidden_channels) else: self.lin1 = Linear(hidden_channels, hidden_channels) self.lin2 = Linear(hidden_channels, out_channels) self.dropout = dropout
def __init__(self, dataset, num_layers, hidden, mode='cat'): super(GIN0WithJK, self).__init__() self.conv1 = GINConv(Sequential( Linear(dataset.num_features, hidden), ReLU(), Linear(hidden, hidden), ReLU(), BN(hidden), ), train_eps=False) self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append( GINConv(Sequential( Linear(hidden, hidden), ReLU(), Linear(hidden, hidden), ReLU(), BN(hidden), ), train_eps=False)) self.jump = JumpingKnowledge(mode) if mode == 'cat': self.lin1 = Linear(num_layers * hidden, hidden) else: self.lin1 = Linear(hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self, dataset, num_layers, hidden, train_eps=False, mode='cat'): super().__init__() self.conv1 = GINConv(nn.Sequential( nn.Linear(dataset.num_features, hidden), nn.ReLU(), nn.Linear(hidden, hidden), nn.ReLU(), nn.BatchNorm1d(hidden), ), train_eps=train_eps) self.convs = nn.ModuleList() for i in range(num_layers - 1): self.convs.append( GINConv(nn.Sequential( nn.Linear(hidden, hidden), nn.ReLU(), nn.Linear(hidden, hidden), nn.ReLU(), nn.BatchNorm1d(hidden), ), train_eps=train_eps)) self.jump = JumpingKnowledge(mode) if mode == 'cat': self.lin1 = nn.Linear(num_layers * hidden, hidden) else: self.lin1 = nn.Linear(hidden, hidden) self.lin2 = nn.Linear(hidden, dataset.num_classes)
def __init__(self, dataset, num_layers, hidden, weight_conv='WeightConv1', multi_channel='False'): super(SMG_JK, self).__init__() self.lin0 = Linear(dataset.num_features, hidden) self.convs = torch.nn.ModuleList() for i in range(num_layers): self.convs.append(SparseConv(hidden, hidden)) self.masks = torch.nn.ModuleList() if multi_channel == 'True': out_channel = hidden else: out_channel = 1 if weight_conv != 'WeightConv2': for i in range(num_layers): self.masks.append(WeightConv1(hidden, hidden, out_channel)) else: for i in range(num_layers): self.masks.append( WeightConv2( Sequential(Linear(hidden * 2, hidden), ReLU(), Linear(hidden, hidden), ReLU(), Linear(hidden, out_channel), Sigmoid()))) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self, in_channels, out_channels, depth=3, jk_depth=7, base_conv=TAGConv, att_conv=GATConv, base_conv_settings={"K": 3}, att_conv_settings={}): super(AttentionBlock, self).__init__() assert out_channels % in_channels == 0, "out_channels must be a multiple of in_channels" ratio = out_channels // in_channels att_conv_settings = dict(**att_conv_settings, heads=ratio) self.base_conv_list = torch.nn.ModuleList([]) self.att_conv_list = torch.nn.ModuleList([]) self.slc_list = torch.nn.ModuleList([]) self.jk = JumpingKnowledge("lstm", ratio * in_channels, jk_depth) for _ in range(depth): self.slc_list.append(SublayerConnection(in_channels)) self.base_conv_list.append( base_conv(in_channels, in_channels, **base_conv_settings)) self.att_conv_list.append( att_conv(in_channels, in_channels, **att_conv_settings)) self.reset_parameters()
def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, num_layers, hidden, ratio=0.8, dropout=0, num_class=0): super(ASAP, self).__init__() self.num_class = num_class self.max_seq_len = max_seq_len self.node_encoder = node_encoder self.conv1 = GraphConv(emb_dim, hidden, aggr='mean') self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.convs.extend([ GraphConv(hidden, hidden, aggr='mean') for i in range(num_layers - 1) ]) self.pools.extend([ ASAPooling(hidden, ratio, dropout=dropout) for i in range((num_layers) // 2) ]) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) # self.lin2 = Linear(hidden, dataset.num_classes) if self.num_class > 0: # classification self.graph_pred_linear = torch.nn.Linear(hidden, self.num_class) else: self.graph_pred_linear_list = torch.nn.ModuleList() for i in range(max_seq_len): self.graph_pred_linear_list.append(torch.nn.Linear(hidden, num_vocab))
def __init__(self, args, num_nodes=10, num_layers=4, hidden=16, ratio=0.25): super(DiffPool, self).__init__() self.args = args num_features = self.args.filters_3 self.att = DenseAttentionModule(self.args) num_nodes = ceil(ratio * num_nodes) self.embed_block1 = Block(num_features, hidden, hidden) self.pool_block1 = Block(num_features, hidden, num_nodes) self.embed_blocks = torch.nn.ModuleList() self.pool_blocks = torch.nn.ModuleList() for i in range((num_layers // 2) - 1): num_nodes = ceil(ratio * num_nodes) self.embed_blocks.append(Block(hidden, hidden, hidden)) self.pool_blocks.append(Block(hidden, hidden, num_nodes)) self.jump = JumpingKnowledge(mode="cat") self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden) self.lin2 = Linear(hidden, num_features)
def __init__(self, in_channels, hidden_channels, out_channels, mode="cat"): super(Block, self).__init__() # self.conv1 = DenseSAGEConv(in_channels, hidden_channels) # self.conv2 = DenseSAGEConv(hidden_channels, out_channels) # self.conv1 = DenseGCNConv(in_channels, hidden_channels) # self.conv2 = DenseGCNConv(hidden_channels, out_channels) nn1 = torch.nn.Sequential( Linear(in_channels, hidden_channels), ReLU(), Linear(hidden_channels, hidden_channels), ) nn2 = torch.nn.Sequential( Linear(hidden_channels, out_channels), ReLU(), Linear(out_channels, out_channels), ) self.conv1 = DenseGINConv(nn1, train_eps=True) self.conv2 = DenseGINConv(nn2, train_eps=True) self.jump = JumpingKnowledge(mode) if mode == "cat": self.lin = Linear(hidden_channels + out_channels, out_channels) else: self.lin = Linear(out_channels, out_channels)
def __init__(self, dataset, num_layers, hidden, jpgs=False, jp=False, hop=2, num_patches=5, \ ratio=0.25, plot=False, dropout=False, ge=False): super(DiffPool, self).__init__() Block = [Block_1hop, Block_2hop, Block_3hop][hop-1] self.num_patches = num_patches self.dropout = dropout self.plot = plot num_nodes = ceil(ratio * dataset[0].num_nodes) self.embed_block1 = Block(dataset.num_features, hidden, hidden, jpgs) self.pool_block1 = Block(dataset.num_features, hidden, num_nodes, jpgs) self.embed_blocks = torch.nn.ModuleList() self.pool_blocks = torch.nn.ModuleList() for i in range((num_layers // 2) - 1): num_nodes = ceil(ratio * num_nodes) self.embed_blocks.append(Block(hidden, hidden, hidden, jpgs)) self.pool_blocks.append(Block(hidden, hidden, num_nodes, jpgs)) self.pool_block_last = Block(hidden, hidden, 1, jpgs) self.jp = jp self.ge = ge if self.jp: self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear((len(self.embed_blocks) + 1) * hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes) else: # self.lin1 = Linear(num_patches*hidden, hidden) # !!! 因为改成了average graph embeddings,所以dimension也需要修改 self.lin1 = Linear(hidden, dataset.num_classes)
def __init__(self, in_channels, hidden_channels, out_channels, edge_index, num_nodes, num_layers=2, dropout=0.5, save_mem=False, num_mlp_layers=1, use_bn=True, conv_dropout=True): super(H2GCN, self).__init__() self.feature_embed = MLP(in_channels, hidden_channels, hidden_channels, num_layers=num_mlp_layers, dropout=dropout) self.convs = nn.ModuleList() self.convs.append(H2GCNConv()) self.bns = nn.ModuleList() self.bns.append(nn.BatchNorm1d(hidden_channels*2*len(self.convs) ) ) for l in range(num_layers - 1): self.convs.append(H2GCNConv()) if l != num_layers-2: self.bns.append(nn.BatchNorm1d(hidden_channels*2*len(self.convs) ) ) self.dropout = dropout self.activation = F.relu self.use_bn = use_bn self.conv_dropout = conv_dropout # dropout neighborhood aggregation steps self.jump = JumpingKnowledge('cat') last_dim = hidden_channels*(2**(num_layers+1)-1) self.final_project = nn.Linear(last_dim, out_channels) self.num_nodes = num_nodes self.init_adj(edge_index)
def __init__(self, in_channels, hidden_channels, out_channels, num_layers=2, dropout=0.5, heads=2, jk_type='max'): super(GATJK, self).__init__() self.convs = nn.ModuleList() self.convs.append( GATConv(in_channels, hidden_channels, heads=heads, concat=True)) self.bns = nn.ModuleList() self.bns.append(nn.BatchNorm1d(hidden_channels*heads)) for _ in range(num_layers - 2): self.convs.append( GATConv(hidden_channels*heads, hidden_channels, heads=heads, concat=True) ) self.bns.append(nn.BatchNorm1d(hidden_channels*heads)) self.convs.append( GATConv(hidden_channels*heads, hidden_channels, heads=heads)) self.dropout = dropout self.activation = F.elu # note: uses elu self.jump = JumpingKnowledge(jk_type, channels=hidden_channels*heads, num_layers=1) if jk_type == 'cat': self.final_project = nn.Linear(hidden_channels*heads*num_layers, out_channels) else: # max or lstm self.final_project = nn.Linear(hidden_channels*heads, out_channels)
def __init__(self, in_channels, hidden_channels, out_channels, num_layers=2, dropout=0.5, save_mem=False, jk_type='max'): super(GCNJK, self).__init__() self.convs = nn.ModuleList() self.convs.append( GCNConv(in_channels, hidden_channels, cached=not save_mem, normalize=not save_mem)) self.bns = nn.ModuleList() self.bns.append(nn.BatchNorm1d(hidden_channels)) for _ in range(num_layers - 2): self.convs.append( GCNConv(hidden_channels, hidden_channels, cached=not save_mem, normalize=not save_mem)) self.bns.append(nn.BatchNorm1d(hidden_channels)) self.convs.append( GCNConv(hidden_channels, hidden_channels, cached=not save_mem, normalize=not save_mem)) self.dropout = dropout self.activation = F.relu self.jump = JumpingKnowledge(jk_type, channels=hidden_channels, num_layers=1) if jk_type == 'cat': self.final_project = nn.Linear(hidden_channels * num_layers, out_channels) else: # max or lstm self.final_project = nn.Linear(hidden_channels, out_channels)
def __init__(self, num_classes, num_layers, feat_dim, embed_dim, jk_layer, process_step, dropout): super(Net, self).__init__() self.dropout = dropout self.num_layers = num_layers self.convs = torch.nn.ModuleList() for i in range(num_layers): if i == 0: self.convs.append( AGGINConv(Sequential(Linear(feat_dim, embed_dim), ReLU(), Linear(embed_dim, embed_dim), ReLU(), BN(embed_dim)), train_eps=True, dropout=self.dropout)) else: self.convs.append( AGGINConv(Sequential(Linear(embed_dim, embed_dim), ReLU(), Linear(embed_dim, embed_dim), ReLU(), BN(embed_dim)), train_eps=True, dropout=self.dropout)) if jk_layer.isdigit(): jk_layer = int(jk_layer) self.jump = JumpingKnowledge(mode='lstm', channels=embed_dim, num_layers=jk_layer) self.gpl = (Set2Set(embed_dim, processing_steps=process_step)) self.fc1 = Linear(2 * embed_dim, embed_dim) # self.fc1 = Linear(embed_dim, embed_dim) self.fc2 = Linear(embed_dim, num_classes) elif jk_layer == 'cat': self.jump = JumpingKnowledge(mode=jk_layer) self.gpl = (Set2Set(num_layers * embed_dim, processing_steps=process_step)) self.fc1 = Linear(2 * embed_dim, embed_dim) # self.fc1 = Linear(num_layers * embed_dim, embed_dim) self.fc2 = Linear(embed_dim, num_classes) elif jk_layer == 'max': self.jump = JumpingKnowledge(mode=jk_layer) self.gpl = (Set2Set(embed_dim, processing_steps=process_step)) self.fc1 = Linear(2 * embed_dim, embed_dim) # self.fc1 = Linear(embed_dim, embed_dim) self.fc2 = Linear(embed_dim, num_classes)
def __init__(self, num_classes, gnn_layers, embed_dim, hidden_dim, jk_layer, process_step, dropout): super(Net, self).__init__() self.dropout = dropout self.convs = torch.nn.ModuleList() self.embedding = Embedding(6, embed_dim) for i in range(gnn_layers): if i == 0: self.convs.append( AGGINConv(Sequential(Linear(2 * embed_dim + 2, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim), ReLU(), BN(hidden_dim)), train_eps=True)) else: self.convs.append( AGGINConv(Sequential(Linear(hidden_dim, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim), ReLU(), BN(hidden_dim)), train_eps=True)) if jk_layer.isdigit(): jk_layer = int(jk_layer) self.jk = JumpingKnowledge(mode='lstm', channels=hidden_dim, gnn_layers=jk_layer) self.s2s = (Set2Set(hidden_dim, processing_steps=process_step)) self.fc1 = Linear(2 * hidden_dim, hidden_dim) self.fc2 = Linear(hidden_dim, int(hidden_dim / 2)) self.fc3 = Linear(int(hidden_dim / 2), num_classes) elif jk_layer == 'cat': self.jk = JumpingKnowledge(mode=jk_layer) self.s2s = (Set2Set(gnn_layers * hidden_dim, processing_steps=process_step)) self.fc1 = Linear(2 * gnn_layers * hidden_dim, hidden_dim) self.fc2 = Linear(hidden_dim, int(hidden_dim / 2)) self.fc3 = Linear(int(hidden_dim / 2), num_classes) elif jk_layer == 'max': self.jk = JumpingKnowledge(mode=jk_layer) self.s2s = (Set2Set(hidden_dim, processing_steps=process_step)) self.fc1 = Linear(2 * hidden_dim, hidden_dim) self.fc2 = Linear(hidden_dim, int(hidden_dim / 2)) self.fc3 = Linear(int(hidden_dim / 2), num_classes)
def __init__(self, in_channels, hidden_channels, out_channels, jp=False): super(Block_2hop, self).__init__() self.conv1 = DenseSAGEConv(in_channels, hidden_channels) self.conv2 = DenseSAGEConv(hidden_channels, out_channels) self.jp = jp if self.jp: self.jump = JumpingKnowledge('cat') self.lin = Linear(hidden_channels + out_channels, out_channels)
def __init__(self, num_features, num_layers): super(TAGWithJK, self).__init__() self.conv1 = TAGConv(num_features, 8) self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append(TAGConv(8, 8)) self.jump = JumpingKnowledge('cat') self.fc = torch.nn.Linear(2 * num_layers * 8, 2)
def __init__(self, dataset, num_layers, hidden): super(Graclus, self).__init__() self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean') self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append(GraphConv(hidden, hidden, aggr='mean')) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self, in_channels, hidden_channels, out_channels, mode='cat'): super(Block, self).__init__() self.conv1 = DenseSAGEConv(in_channels, hidden_channels) self.conv2 = DenseSAGEConv(hidden_channels, out_channels) self.jump = JumpingKnowledge(mode) if mode == 'cat': self.lin = Linear(hidden_channels + out_channels, out_channels) else: self.lin = Linear(out_channels, out_channels)
def reset_parameters(self): self.embed_block1.reset_parameters() self.pool_block1.reset_parameters() for embed_block, pool_block in zip(self.embed_blocks, self.pool_blocks): embed_block.reset_parameters() pool_block.reset_parameters() self.jump = JumpingKnowledge(mode='cat') self.lin1.reset_parameters() self.lin2.reset_parameters()
def __init__(self, dataset, num_layers, hidden, mode='cat'): super(GCNWithJK, self).__init__() self.conv1 = GCNConv(dataset.num_features, hidden) self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append(GCNConv(hidden, hidden)) self.jump = JumpingKnowledge(mode) if mode == 'cat': self.lin1 = Linear(num_layers * hidden, hidden) else: self.lin1 = Linear(hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self, num_input_features, num_layers, hidden, mode='cat'): super(GraphSAGEWithJK, self).__init__() self.conv1 = SAGEConv(num_input_features, hidden) self.convs = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append(SAGEConv(hidden, hidden)) self.jump = JumpingKnowledge(mode) if mode == 'cat': self.lin1 = Linear(3 * num_layers * hidden, hidden) else: self.lin1 = Linear(3 * hidden, hidden) self.lin2 = Linear(hidden, 2)
def __init__(self, dataset, hidden, ratio=0.25): # we only use 1 layer for coarsening super(Coarsening, self).__init__() # self.embed_block1 = GNNBlock(dataset.num_features, hidden, hidden) self.embed_block1 = DenseGCNConv(dataset.num_features, hidden) self.coarse_block1 = CoarsenBlock(hidden, ratio) self.embed_block2 = DenseGCNConv(hidden, dataset.num_features) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(hidden + dataset.num_features, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self,inp_dim=3,filters=[16,16,16],heads=[2,2,2],drop=0.1,edge_drop=0.1,bn=True, skip=True,jump=True,jk_mode='lstm'): ''' Gat with edge drop,skip connection and jumping knowledge connections. --args-- skip: if True. skip connection will be present. skip connections at a layer(i) can be identity if input and output dims match. jump: if True. connections are created from every layer to output layer. jk_mode: 'lstm' or 'max'. refer "Representation Learning on Graphs with Jumping Knowledge Networks" <https://arxiv.org/abs/1806.03536>` ''' super().__init__()#all params are added to _modules internally, this makes sure its initialized. assert len(heads)==len(filters) self.gat_modules=nn.ModuleList() self.bn=bn self.skip=skip self.jump=jump if self.bn: self.bn_modules=nn.ModuleList() if self.skip: self.skip_conns=nn.ModuleList() if self.jump: self.jump_lyr=JumpingKnowledge(jk_mode,filters[-1],len(filters)+1) self.proj_lyrs=nn.ModuleList()#will be used to project all layers, including input, to final out dim. if inp_dim!=filters[-1]: self.proj_lyrs.append(nn.Linear(inp_dim,filters[-1])) else: self.proj_lyrs.append(nn.Identity()) for i,j in enumerate(filters): if i==0: self.gat_modules.append(GATConv(in_channels=inp_dim,out_channels=filters[i],heads=heads[i],concat=False,dropout=edge_drop)) else: self.gat_modules.append(GATConv(in_channels=filters[i-1],out_channels=filters[i],heads=heads[i],concat=False,dropout=edge_drop)) if self.bn: bn_dim=filters[i] self.bn_modules.append(BatchNorm(in_channels=bn_dim)) if self.skip: skip_in_dim=inp_dim if i==0 else filters[i-1] if skip_in_dim!=filters[i]:# y=GatConv(x)+W*x. W*x tranforms x to the same dimension as GatConv(x) self.skip_conns.append(nn.Linear(in_features=skip_in_dim,out_features=filters[i],bias=False)) else:# y=GatConv(x)+x self.skip_conns.append(nn.Identity()) if self.jump: if filters[i]!=filters[-1]:#if current layer out dim!= final layer out dim, then a projection is needed. self.proj_lyrs.append(nn.Linear(filters[i],filters[-1])) else: self.proj_lyrs.append(nn.Identity()) self.leaky=nn.LeakyReLU() self.drop=nn.Dropout(p=edge_drop)
def __init__(self, num_layers, hidden_list, activation, data): super(ModelGCN, self).__init__() assert len(hidden_list) == num_layers + 1 self.linear_1 = Linear(data.num_features, hidden_list[0]) self.convs = torch.nn.ModuleList() for i in range(num_layers): self.convs.append(GCNConv(hidden_list[i], hidden_list[i + 1])) self.JK = JumpingKnowledge(mode='max') self.linear_2 = Linear(hidden_list[-1], data.num_class) if activation == "relu": self.activation = relu elif activation == "leaky_relu": self.activation = leaky_relu
def __init__(self, dataset, args): in_channels = dataset.num_features out_channels = dataset.num_classes super(GCN_JKNet, self).__init__() self.conv1 = GCNConv(in_channels, 16) self.conv2 = GCNConv(16, 16) self.lin1 = torch.nn.Linear(16, out_channels) self.one_step = APPNP(K=1, alpha=0) self.JK = JumpingKnowledge(mode='lstm', channels=16, num_layers=4 )
def __init__(self, dataset, hidden, num_layers=2, ratio=0.5): super(MultiLayerCoarsening, self).__init__() self.embed_block1 = DenseGCNConv(dataset.num_features, hidden) self.coarse_block1 = CoarsenBlock(hidden, ratio) self.embed_block2 = DenseGCNConv(hidden, dataset.num_features) # self.embed_block2 = GNNBlock(hidden, hidden, dataset.num_features) self.num_layers = num_layers self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(hidden + dataset.num_features, hidden) self.lin2 = Linear(hidden, dataset.num_classes)