def test_sag_pooling(): in_channels = 16 edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2]]) num_nodes = edge_index.max().item() + 1 x = torch.randn((num_nodes, in_channels)) for GNN in [GraphConv, GCNConv, GATConv, SAGEConv]: pool = SAGPooling(in_channels, ratio=0.5, GNN=GNN) assert pool.__repr__() == (f'SAGPooling({GNN.__name__}, 16, ' f'ratio=0.5, multiplier=1.0)') out = pool(x, edge_index) assert out[0].size() == (num_nodes // 2, in_channels) assert out[1].size() == (2, 2) pool = SAGPooling(in_channels, ratio=None, GNN=GNN, min_score=0.1) assert pool.__repr__() == (f'SAGPooling({GNN.__name__}, 16, ' f'min_score=0.1, multiplier=1.0)') out = pool(x, edge_index) assert out[0].size(0) <= x.size(0) and out[0].size(1) == (16) assert out[1].size(0) == 2 and out[1].size(1) <= edge_index.size(1) pool = SAGPooling(in_channels, ratio=2, GNN=GNN) assert pool.__repr__() == (f'SAGPooling({GNN.__name__}, 16, ' f'ratio=2, multiplier=1.0)') out = pool(x, edge_index) assert out[0].size() == (2, in_channels) assert out[1].size() == (2, 2)
def __init__(self, in_channels, out_channels, inner_dim=16, ratio=0.5, pools=2): super(GraphEncoder, self).__init__() self.in_conv = GCNConv(in_channels, inner_dim) self.out_conv = GCNConv(inner_dim, out_channels) self.in_pools = torch.nn.ModuleList([]) self.out_pools = [] for p in range(pools): self.in_pools.append(SAGPooling(inner_dim, ratio=ratio)) self.out_pools.append(SAGPooling(inner_dim, ratio=1.0/ratio)) self.out_pools = torch.nn.ModuleList(list(reversed(self.out_pools)))
def __init__(self, in_feats): super(Net, self).__init__() hs_1 = in_feats * 2 self.conv1 = SAGEConv(in_feats, hs_1) self.bn1 = BatchNorm(hs_1) self.pool1 = SAGPooling(hs_1, ratio=0.5) hs_2 = int(hs_1 * 2) self.conv2 = SAGEConv(hs_1, hs_2) self.bn2 = BatchNorm(hs_2) self.pool2 = SAGPooling(hs_2, ratio=0.5) num_classes = 2 self.lin1 = Linear(hs_2, num_classes).cuda()
def __init__(self): # super(Net, self).__init__() # self.conv1 = GCNConv(5, 20) # self.pool = SAGPooling(20, ratio=0.8) # LAG I MODELLEN self.conv2 = GCNConv(20, 15) # self.pool2 = torch.nn.AdaptiveMaxPool2d((1, 8)) # self.nn1 = torch.nn.Linear(8, 8) #
def __init__(self, dataset, num_layers, hidden, ratio=0.25): super(SAGPool, self).__init__() self.conv1 = GNN_Block(dataset.num_features, hidden) self.pool1 = SAGPooling(hidden, ratio) self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.convs.extend([ GNN_Block(hidden, hidden) for i in range(num_layers - 1) ]) self.pools.extend( [SAGPooling(hidden, ratio) for i in range((num_layers)-1)]) self.embed_final = GNN_Block(hidden, hidden) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear((num_layers+1)*hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self, input_dim, time_step, hidden_dim, inner_edge, inner20_edge, outer_edge, input_num, use_gru, device): super(CategoricalGraphPool, self).__init__() # basic parameters self.dim = hidden_dim self.input_dim = input_dim self.time_step = time_step self.inner_edge = inner_edge self.inner20_edge = inner20_edge self.outer_edge = outer_edge self.input_num = input_num self.use_gru = use_gru self.device = device # hidden layers self.pool_attention = AttentionBlock(20, hidden_dim) if self.use_gru: self.weekly_encoder = nn.GRU(hidden_dim, hidden_dim) self.encoder_list = nn.ModuleList([ SequenceEncoder(input_dim, time_step, hidden_dim) for _ in range(input_num) ]) self.cat_gat = GATConv(hidden_dim * 2, hidden_dim) self.inner_gat = GATConv(hidden_dim, hidden_dim) self.pooling_gcn = SAGPooling(hidden_dim, ratio=0.5) self.weekly_attention = AttentionBlock(input_num, hidden_dim) self.fusion = nn.Linear(hidden_dim * 3, hidden_dim) # output layer self.reg_layer = nn.Linear(hidden_dim, 1) self.cls_layer = nn.Linear(hidden_dim, 1)
def __init__(self, n_heads, in_features, head_out_feats, final_out_feats): super().__init__() self.n_heads = n_heads self.in_features = in_features self.out_features = head_out_feats self.conv = GATConv(in_features, head_out_feats, n_heads) self.readout = SAGPooling(n_heads * head_out_feats, min_score=-1)
def __init__(self): super(Net, self).__init__() self.conv1 = GCNConv(5,10) self.sagpool1 = SAGPooling(10,0.2,GCNConv) self.gatconv = GATConv(10,20,heads=3) self.nn = torch.nn.Sequential(torch.nn.Linear(60,30),torch.nn.ReLU(),torch.nn.Linear(30,3)) self.m = torch.nn.LogSoftmax(dim=1)
def __init__(self, num_features, n_hidden, min_score): super(GCNNet, self).__init__() self.conv1 = GCNConv(num_features, n_hidden) self.conv3 = GCNConv(n_hidden, n_hidden // 4) self.pool = SAGPooling(n_hidden, min_score=min_score, GNN=GCNConv) self.activation = gelu self.final_pooling = global_add_pool
def __init__(self, config): super(GIN, self).__init__() self.config = config self.gin_convs = torch.nn.ModuleList() self.batch_norms = torch.nn.ModuleList() for layer in range(self.config.num_layers - 1): if layer == 0: nn = Sequential( Linear(self.config.num_feature_dim, self.config.hidden_dim), ReLU(), Linear(self.config.hidden_dim, self.config.hidden_dim)) else: nn = Sequential( Linear(self.config.hidden_dim, self.config.hidden_dim), ReLU(), Linear(self.config.hidden_dim, self.config.hidden_dim)) self.gin_convs.append(GINConv(nn)) self.batch_norms.append( torch.nn.BatchNorm1d(self.config.hidden_dim)) if self.config.pooling_type == "sagpool": self.pool1 = SAGPooling(self.config.hidden_dim, ratio=self.config.poolratio) elif self.config.pooling_type == "topk": self.pool1 = TopKPooling(self.config.hidden_dim, ratio=self.config.poolratio) elif self.config.pooling_type == "asa": self.pool1 = ASAPooling(self.config.hidden_dim, ratio=self.config.poolratio) self.fc1 = Linear(self.config.hidden_dim, self.config.hidden_dim) self.fc2 = Linear(self.config.hidden_dim, self.config.embed_dim)
def __init__(self, edge_index): super(MyNet, self).__init__() self.edge_index = edge_index self.conv1 = GCNConv(6, 64) self.pool1 = SAGPooling(64, ratio=0.70, GNN=GCNConv) self.conv2 = GCNConv(64, 32) self.fc1 = nn.Linear(32, 1) self.sigmoid = nn.Sigmoid()
def __init__(self, dataset, embedding_layer, hidden_dim = cmd_args.hidden_dim): super().__init__() self.embedding_layer = embedding_layer self.edge_offset = dataset.attr_encoder.edge_offset self.conv1 = GraphConvE(hidden_dim, hidden_dim) self.pool1 = SAGPooling(hidden_dim) self.conv2 = GraphConvE(hidden_dim, hidden_dim,) self.pool2 = SAGPooling(hidden_dim) self.conv3 = GraphConvE(hidden_dim, hidden_dim) self.pool3 = SAGPooling(hidden_dim) self.conv4 = GraphConvE(hidden_dim, hidden_dim) self.pool4 = SAGPooling(hidden_dim) self.lin1 = torch.nn.Linear(hidden_dim * 2, hidden_dim) self.lin2 = torch.nn.Linear(hidden_dim, hidden_dim) self.lin3 = torch.nn.Linear(hidden_dim, hidden_dim)
def __init__(self, dataset, num_layers, hidden): super(SAGPool, self).__init__() self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean') self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() for i in range(num_layers - 1): self.convs.append(GraphConv(hidden, hidden, aggr='mean')) self.pools.append(SAGPooling(hidden, ratio=0.8, gnn='SAGE')) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def poollayer(self, pooltype): self.pooltype = pooltype if self.pooltype == 'TopKPool': self.pool1 = TopKPooling(1024) self.pool2 = TopKPooling(1024) elif self.pooltype == 'EdgePool': self.pool1 = EdgePooling(1024) self.pool2 = EdgePooling(1024) elif self.pooltype == 'ASAPool': self.pool1 = ASAPooling(1024) self.pool2 = ASAPooling(1024) elif self.pooltype == 'SAGPool': self.pool1 = SAGPooling(1024) self.pool2 = SAGPooling(1024) else: print('Such graph pool method is not implemented!!') return self.pool1, self.pool2
def build_block(self, in_channels, out_channels, hiddens, ratio=1.0): mlp = nn.Sequential( nn.Linear(2 * in_channels, hiddens), nn.ReLU(), nn.Linear(hiddens, out_channels), ) conv = EdgeConv(nn=mlp, aggr=self.aggr) if self.ratio < 1.0: pool = SAGPooling(out_channels, ratio=ratio) else: pool = None return conv, pool
def __init__(self, num_vocab, max_seq_len, node_encoder, emb_dim, pooling_ratio=0.5, dropout_ratio=0.5, num_layers=3, num_class=0): super(SAGPoolGNN, self).__init__() self.num_class = num_class self.emb_dim = emb_dim self.num_vocab = num_vocab self.max_seq_len = max_seq_len self.node_encoder = node_encoder if self.num_class > 0: # classification self.graph_pred_linear = torch.nn.Linear(self.emb_dim, self.num_class) else: self.graph_pred_linear_list = torch.nn.ModuleList() for i in range(max_seq_len): self.graph_pred_linear_list.append(torch.nn.Linear(self.emb_dim, self.num_vocab)) # SAGPool original part self.num_features = emb_dim self.nhid = emb_dim self.pooling_ratio = pooling_ratio self.dropout_ratio = dropout_ratio self.num_layers = num_layers self.conv1 = GCNConv(self.num_features, self.nhid) self.pool1 = SAGPooling(self.nhid, ratio=self.pooling_ratio) self.convs = nn.ModuleList([GCNConv(self.nhid, self.nhid) for _ in range(num_layers - 1)]) self.pools = nn.ModuleList([SAGPooling(self.nhid, ratio=self.pooling_ratio) for _ in range(num_layers-1)]) # self.conv1 = GCNConv(self.num_features, self.nhid) # self.conv2 = GCNConv(self.nhid, self.nhid) # self.conv3 = GCNConv(self.nhid, self.nhid) # # self.pool1 = SAGPooling(self.nhid, ratio=self.pooling_ratio) # self.pool2 = SAGPooling(self.nhid, ratio=self.pooling_ratio) # self.pool3 = SAGPooling(self.nhid, ratio=self.pooling_ratio) self.lin1 = torch.nn.Linear(self.nhid * 2, self.nhid) self.lin2 = torch.nn.Linear(self.nhid, self.nhid)
def __init__(self, features=1036, nhid=128, grph_dim=32, nonlinearity=torch.tanh, dropout_rate=0.25, GNN='GCN', use_edges=0, pooling_ratio=0.20, act=None, label_dim=1, init_max=True): super(GraphNet, self).__init__() self.dropout_rate = dropout_rate self.use_edges = use_edges self.act = act self.conv1 = SAGEConv(features, nhid) self.pool1 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN) #, nonlinearity=nonlinearity) self.conv2 = SAGEConv(nhid, nhid) self.pool2 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN) #, nonlinearity=nonlinearity) self.conv3 = SAGEConv(nhid, nhid) self.pool3 = SAGPooling(nhid, ratio=pooling_ratio, gnn=GNN) #, nonlinearity=nonlinearity) self.lin1 = torch.nn.Linear(nhid * 2, nhid) self.lin2 = torch.nn.Linear(nhid, grph_dim) self.lin3 = torch.nn.Linear(grph_dim, label_dim) self.output_range = Parameter(torch.FloatTensor([6]), requires_grad=False) self.output_shift = Parameter(torch.FloatTensor([-3]), requires_grad=False) if init_max: init_max_weights(self) print("Initialzing with Max")
def __init__(self, in_node_feat, in_edge_feat, inplace=True, num_opt=4): super(AppearancePoolFusion, self).__init__() self.no_features = 128 self.conv1 = EdgeConvRot(in_node_feat, in_edge_feat, self.no_features) self.conv2 = EdgeConvRot(self.no_features, self.no_features, self.no_features) self.conv3 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.conv3_sub_pre = EdgeConvRot(self.no_features, self.no_features, self.no_features * 2) self.conv3_sub_pooling = SAGPooling(in_channels=2*self.no_features, GNN=GATConv) self.conv3_sub1 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features*2) self.conv3_sub2 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) # self.conv3_sub_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) # self.conv3_sub_pre = EdgeConvRot(self.no_features, self.no_features, self.no_features * 2) self.conv3_subsub_pooling = SAGPooling(in_channels=2*self.no_features, GNN=GATConv) self.conv3_subsub = EdgeConvRot(2 * self.no_features, 2 * self.no_features, 2*self.no_features) self.conv3_subsub2 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, 2*self.no_features) self.conv4 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.conv5 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.lin1 = Linear(self.no_features, num_opt) self.inplace = inplace
def __init__(self): super(PoolingFineNet, self).__init__() self.no_features = 32 # More features for large dataset self.conv1 = EdgeConvRot(4, 4, self.no_features) self.conv2 = EdgeConvRot(self.no_features, self.no_features + 4, self.no_features) self.conv3 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.conv3_sub_pre = EdgeConvRot(self.no_features, self.no_features, self.no_features * 2) self.conv3_sub_pooling = SAGPooling(in_channels=2*self.no_features, GNN=GATConv) self.conv3_sub1 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features*2) self.conv3_sub2 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) # self.conv3_sub_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) # self.conv3_sub_pre = EdgeConvRot(self.no_features, self.no_features, self.no_features * 2) self.conv3_subsub_pooling = SAGPooling(in_channels=2*self.no_features, GNN=GATConv) self.conv3_subsub = EdgeConvRot(2 * self.no_features, 2 * self.no_features, 2*self.no_features) self.conv3_subsub2 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, 2*self.no_features) self.conv4 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.conv5 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.lin1 = Linear(self.no_features, 4) self.m = torch.nn.Sigmoid()
def __init__(self, dataset, num_layers, hidden, ratio=0.8): super().__init__() self.conv1 = GraphConv(dataset.num_features, hidden, aggr='mean') self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.convs.extend([ GraphConv(hidden, hidden, aggr='mean') for i in range(num_layers - 1) ]) self.pools.extend( [SAGPooling(hidden, ratio) for i in range((num_layers) // 2)]) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) self.lin2 = Linear(hidden, dataset.num_classes)
def __init__(self, num_layers, hidden, num_node_features, num_classes, ratio=0.8): super(SAGPool_g, self).__init__() #self.conv1 = GraphConv(num_node_features, hidden, aggr='mean') self.conv1 = GCNConv(num_node_features, hidden, add_self_loops=False) self.convs = torch.nn.ModuleList() #self.pools = torch.nn.ModuleList() self.convs.extend([ #GraphConv(hidden, hidden, aggr='mean') GCNConv(hidden, hidden, add_self_loops=False) for i in range(num_layers - 1) ]) self.pool = SAGPooling(hidden * (num_layers), ratio) #self.pools.extend([SAGPooling(hidden, ratio) for i in range((num_layers-1) // 2)]) #self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) self.lin2 = Linear(hidden, num_classes)
def __init__(self, in_node_feat, in_edge_feat): super(PoolingFineNetWithAppearance, self).__init__() self.no_features = 64 # More features for large dataset self.conv1 = EdgeConvRot(in_node_feat + 4, in_edge_feat + 4, self.no_features) self.conv2 = EdgeConvRot(self.no_features, self.no_features + 4, self.no_features) self.conv3 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.conv3_sub_pre = EdgeConvRot(self.no_features, self.no_features, self.no_features * 2) self.conv3_sub_pooling = SAGPooling(in_channels=2*self.no_features, GNN=GATConv) self.conv3_sub1 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features*2) self.conv3_sub2 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) # self.conv3_sub_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) # self.conv3_sub_pre = EdgeConvRot(self.no_features, self.no_features, self.no_features * 2) self.conv3_subsub_pooling = SAGPooling(in_channels=2*self.no_features, GNN=GATConv) self.conv3_subsub = EdgeConvRot(2 * self.no_features, 2 * self.no_features, 2*self.no_features) self.conv3_subsub2 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, 2*self.no_features) self.conv4 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.conv5 = EdgeConvRot(2 * self.no_features, 2 * self.no_features, self.no_features) self.lin1 = Linear(self.no_features, 4) self.lin2 = Linear(self.no_features, 1) self.m = torch.nn.Sigmoid()
def __init__(self): super(Net, self).__init__() self.lin0 = torch.nn.Linear(dataset.num_features, dim) nn = Sequential(Linear(2, 64), ReLU(), Linear(64, dim * dim)) #nn = Sequential(Linear(5, dim * dim)) self.conv = NNConv(dim, dim, nn, aggr='mean') self.gru = GRU(dim, dim) # self.set2set = Set2Set(dim, processing_steps=1) self.pool1 = SAGPooling(dim, min_score=0.001, GNN=GCNConv) gatt_nn = Sequential(Linear(dim, dim), ReLU(), Linear(dim, 1)) self.gatt = GlobalAttention(gatt_nn) self.lin1 = torch.nn.Linear(dim, dim) self.lin2 = torch.nn.Linear(dim, 1)
def __init__(self, neighbor_hop=1, in_channels=100, out_channels=100, heads=10, dropout=0.5, negative_slope=0.2, pooling_ratio=1): super(GAT_Block, self).__init__() self.neighbor_hop = neighbor_hop # conv1和conv2是得Block到F(x)那两层,conv3目的是让x的列数与F(x)的列数一样 # 给conv1、conv2添加WN层 self.conv1 = GATConv(in_channels, out_channels, heads=heads, dropout=dropout) # GAT默认cat=Ture,也就是把heads路得出的特征拼接,所以self.conv1输出特征为out_channels * heads,self.conv2的输入是self.conv1的输出,所以self.conv2的in_channels=out_channels * heads self.conv2 = GATConv(out_channels * heads, out_channels, heads=heads, dropout=dropout, concat=False) self.conv3 = GATConv(in_channels, out_channels, heads=heads, dropout=dropout, concat=False) self.fc = torch.nn.Linear(in_channels, out_channels) # 添加BN层 self.BN1 = torch.nn.BatchNorm1d(out_channels * heads, eps=1e-05, momentum=0.1, affine=True) self.BN2 = torch.nn.BatchNorm1d(out_channels, eps=1e-05, momentum=0.1, affine=True) self.BN3 = torch.nn.BatchNorm1d(out_channels, eps=1e-05, momentum=0.1, affine=True) # 添加pool层,ratio表示取注意力值所占计算出来的注意力的比值,默认0.5 self.pool = SAGPooling(out_channels, ratio=pooling_ratio)
def __init__(self, num_layers, hidden, num_node_features, num_classes, ratio=0.8, min_score=None, use_weight=False, nonlinearity=None): super(SAGPool, self).__init__() self.use_weight = use_weight if (not self.use_weight): conv_layer = GraphConv conv_param = ({'aggr': 'mean'}) else: conv_layer = GCNConv conv_param = ({'add_self_loops': False}) self.nonlinearity = nonlinearity #self.conv1 = GraphConv(num_node_features, hidden, aggr='mean') #self.conv1 = GCNConv(num_node_features, hidden, add_self_loops=False) self.conv1 = conv_layer(num_node_features, hidden, **conv_param) self.convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.convs.extend([ #GraphConv(hidden, hidden, aggr='mean') #GCNConv(hidden, hidden, add_self_loops=False) conv_layer(hidden, hidden, **conv_param) for i in range(num_layers - 1) ]) self.pools.extend([ SAGPooling(hidden, ratio, min_score=min_score, GNN=conv_layer, nonlinearity=self.nonlinearity, **conv_param) for i in range((num_layers - 1) // 2) ]) self.jump = JumpingKnowledge(mode='cat') self.lin1 = Linear(num_layers * hidden, hidden) self.lin2 = Linear(hidden, num_classes)
def __init__(self, config): super(GCN, self).__init__() self.config = config self.gc1 = GCNConv(self.config.num_feature_dim, self.config.hidden) self.gc2 = GCNConv(self.config.hidden, self.config.hidden) if self.config.pooling_type == "sagpool": self.pool1 = SAGPooling(self.config.hidden, ratio=self.config.poolratio) elif self.config.pooling_type == "topk": self.pool1 = TopKPooling(self.config.hidden, ratio=self.config.poolratio) elif self.config.pooling_type == "asa": self.pool1 = ASAPooling(self.config.hidden, ratio=self.config.poolratio) self.fc = nn.Linear(self.config.hidden, self.config.embed_dim)
def __init__(self, cfg): super(SAGpool, self).__init__() model_cfg = cfg['MODEL'] hyper_cfg = cfg['HYPERPARAMS'] solver_cfg = cfg['SOLVER'] if hyper_cfg['GCN_TYPE'] == 'GCN': from torch_geometric.nn import GCNConv as GraphConv elif hyper_cfg['GCN_TYPE'] == 'SAGE': from torch_geometric.nn import SAGEConv as GraphConv else: raise NotImplementedError self.nhid = model_cfg['NUM_HIDDEN'] self.nfeat = model_cfg['NUM_FEATURES'] self.min_nodes = model_cfg['MIN_NODES'] self.conv_ch = model_cfg['CONV_CHANNEL'] self.ratio = model_cfg['POOL_RATIO'] self.num_layer = hyper_cfg['NUM_LAYER'] self.num_class = solver_cfg['NUM_CLASS'] self.conv1 = GraphConv(self.nfeat, self.nhid) self.convs = torch.nn.ModuleList() self.convs.extend([ GraphConv(self.nhid, self.nhid) for i in range(self.num_layer - 1) ]) self.att_global_pool = global_pool(self.nhid * self.num_layer, hyper_cfg) self.att_lin = torch.nn.Linear(self.nhid * self.num_layer * 2, self.nhid * self.num_layer * 2) self.pool = SAGPooling(self.nhid * self.num_layer, self.ratio) self.final_conv = GraphConv(self.nhid * self.num_layer, self.nhid) self.final_global_pool = global_pool(self.nhid, hyper_cfg) self.lin1 = torch.nn.Linear(self.nhid * 2, self.nhid) self.lin2 = torch.nn.Linear(self.nhid, int(self.nhid / 2)) self.lin3 = torch.nn.Linear(int(self.nhid / 2), self.num_class)
def __init__(self, in_channels, hidden_channels, out_channels, depth, pool_ratios=0.5, sum_res=True, act=F.relu, dropout_rate=0): super(GraphUNet, self).__init__() assert depth >= 1 self.in_channels = in_channels self.hidden_channels = hidden_channels self.out_channels = out_channels self.depth = depth self.pool_ratios = repeat(pool_ratios, depth) self.act = act self.sum_res = sum_res channels = hidden_channels self.dropout = torch.nn.Dropout(dropout_rate) self.down_convs = torch.nn.ModuleList() self.pools = torch.nn.ModuleList() self.down_convs.append(GCNConv(in_channels, channels, improved=True)) for i in range(depth): self.pools.append(SAGPooling(channels, self.pool_ratios[i])) self.down_convs.append(GCNConv(channels, channels, improved=True)) in_channels = channels if sum_res else 2 * channels self.up_convs = torch.nn.ModuleList() for i in range(depth - 1): self.up_convs.append(GCNConv(in_channels, channels, improved=True)) self.up_convs.append(GCNConv(in_channels, out_channels, improved=True)) self.reset_parameters()
def __init__(self, top_k=4): super(ScoreNetwork, self).__init__() self.no_features = 128 self.input_node_feat = 4 * top_k + top_k self.dropout_ratio = 0.6 self.conv1 = EdgeConvRot(self.input_node_feat, 4, self.no_features) self.conv2 = EdgeConvRot(self.no_features, self.no_features, self.no_features) self.conv2_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) self.conv3 = EdgeConvRot(self.no_features, self.no_features, self.no_features) self.conv3_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) self.conv4 = EdgeConvRot(self.no_features, self.no_features, self.no_features) self.conv4_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) self.lin1 = torch.nn.Linear(self.no_features*2, self.no_features) self.lin2 = torch.nn.Linear(self.no_features, self.no_features//2) self.lin3 = torch.nn.Linear(self.no_features//2, top_k)
class ScoreNetwork(nn.Module): def __init__(self, top_k=4): super(ScoreNetwork, self).__init__() self.no_features = 128 self.input_node_feat = 4 * top_k + top_k self.dropout_ratio = 0.6 self.conv1 = EdgeConvRot(self.input_node_feat, 4, self.no_features) self.conv2 = EdgeConvRot(self.no_features, self.no_features, self.no_features) self.conv2_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) self.conv3 = EdgeConvRot(self.no_features, self.no_features, self.no_features) self.conv3_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) self.conv4 = EdgeConvRot(self.no_features, self.no_features, self.no_features) self.conv4_pooling = SAGPooling(in_channels=self.no_features, GNN=GATConv) self.lin1 = torch.nn.Linear(self.no_features*2, self.no_features) self.lin2 = torch.nn.Linear(self.no_features, self.no_features//2) self.lin3 = torch.nn.Linear(self.no_features//2, top_k) def forward(self, node_feat, node_level, edge_index, edge_feat): N = node_feat.shape[0] K = node_feat.shape[1] E = edge_feat.shape[0] # edge_feat_ = update_attr_batch(node_feat, edge_index, edge_feat).view(-1, 4) node_feat = node_feat.view(N, -1) node_feat = torch.cat([node_feat, node_level], dim=1) x1, edge_x1 = self.conv1(node_feat, edge_index, edge_feat) x1, edge_x1 = F.relu(x1), F.relu(edge_x1) x2, edge_x2 = self.conv2(x1, edge_index, edge_x1) x2, edge_x2 = F.relu(x2), F.relu(edge_x2) x2_pool, edge_x2_index_pool, edge_x2_pool, batch, p2_to_x2, _ = self.conv2_pooling.forward( x2, edge_index, edge_attr=edge_x2) l1 = torch.cat([gmp(x2_pool, batch), gap(x2_pool, batch)], dim=1) x3, edge_x3 = self.conv3(x2_pool, edge_x2_index_pool, edge_x2_pool) x3, edge_x3 = F.relu(x3), F.relu(edge_x3) x3_pool, edge_x3_index_pool, edge_x3_pool, batch, p3_to_x3, _ = self.conv3_pooling.forward( x3, edge_x2_index_pool, edge_attr=edge_x3) l2 = torch.cat([gmp(x3_pool, batch), gap(x3_pool, batch)], dim=1) x4, edge_x4 = self.conv4(x3_pool, edge_x3_index_pool, edge_x3_pool) x4, edge_x4 = F.relu(x4), F.relu(edge_x4) x4_pool, edge_x4_index_pool, edge_x4_pool, batch, p4_to_x4, _ = self.conv4_pooling.forward( x4, edge_x3_index_pool, edge_attr=edge_x4) l3 = torch.cat([gmp(x4_pool, batch), gap(x4_pool, batch)], dim=1) l = l1 + l2 + l3 x = F.relu(self.lin1(l)) x = F.dropout(x, p=self.dropout_ratio, training=self.training) x = F.relu(self.lin2(x)) x = self.lin3(x) return x