def test_simple_pool(): ctx = F.ctx() g = dgl.DGLGraph(nx.path_graph(15)) sum_pool = nn.SumPooling() avg_pool = nn.AvgPooling() max_pool = nn.MaxPooling() sort_pool = nn.SortPooling(10) # k = 10 print(sum_pool, avg_pool, max_pool, sort_pool) # test#1: basic h0 = F.randn((g.number_of_nodes(), 5)) if F.gpu_ctx(): sum_pool = sum_pool.to(ctx) avg_pool = avg_pool.to(ctx) max_pool = max_pool.to(ctx) sort_pool = sort_pool.to(ctx) h0 = h0.to(ctx) h1 = sum_pool(g, h0) assert F.allclose(h1, F.sum(h0, 0)) h1 = avg_pool(g, h0) assert F.allclose(h1, F.mean(h0, 0)) h1 = max_pool(g, h0) assert F.allclose(h1, F.max(h0, 0)) h1 = sort_pool(g, h0) assert h1.shape[0] == 10 * 5 and h1.dim() == 1 # test#2: batched graph g_ = dgl.DGLGraph(nx.path_graph(5)) bg = dgl.batch([g, g_, g, g_, g]) h0 = F.randn((bg.number_of_nodes(), 5)) if F.gpu_ctx(): h0 = h0.to(ctx) h1 = sum_pool(bg, h0) truth = th.stack([F.sum(h0[:15], 0), F.sum(h0[15:20], 0), F.sum(h0[20:35], 0), F.sum(h0[35:40], 0), F.sum(h0[40:55], 0)], 0) assert F.allclose(h1, truth) h1 = avg_pool(bg, h0) truth = th.stack([F.mean(h0[:15], 0), F.mean(h0[15:20], 0), F.mean(h0[20:35], 0), F.mean(h0[35:40], 0), F.mean(h0[40:55], 0)], 0) assert F.allclose(h1, truth) h1 = max_pool(bg, h0) truth = th.stack([F.max(h0[:15], 0), F.max(h0[15:20], 0), F.max(h0[20:35], 0), F.max(h0[35:40], 0), F.max(h0[40:55], 0)], 0) assert F.allclose(h1, truth) h1 = sort_pool(bg, h0) assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.dim() == 2
def __init__(self, in_feats, out_feats, graph=False): super(Model, self).__init__() self.linear = th.nn.Linear(in_feats, out_feats) if graph: self.pool = nn.AvgPooling() else: self.pool = None
def test_simple_pool(): g = dgl.DGLGraph(nx.path_graph(15)) sum_pool = nn.SumPooling() avg_pool = nn.AvgPooling() max_pool = nn.MaxPooling() sort_pool = nn.SortPooling(10) # k = 10 print(sum_pool, avg_pool, max_pool, sort_pool) # test#1: basic h0 = th.rand(g.number_of_nodes(), 5) h1 = sum_pool(h0, g) assert th.allclose(h1, th.sum(h0, 0)) h1 = avg_pool(h0, g) assert th.allclose(h1, th.mean(h0, 0)) h1 = max_pool(h0, g) assert th.allclose(h1, th.max(h0, 0)[0]) h1 = sort_pool(h0, g) assert h1.shape[0] == 10 * 5 and h1.dim() == 1 # test#2: batched graph g_ = dgl.DGLGraph(nx.path_graph(5)) bg = dgl.batch([g, g_, g, g_, g]) h0 = th.rand(bg.number_of_nodes(), 5) h1 = sum_pool(h0, bg) truth = th.stack([ th.sum(h0[:15], 0), th.sum(h0[15:20], 0), th.sum(h0[20:35], 0), th.sum(h0[35:40], 0), th.sum(h0[40:55], 0) ], 0) assert th.allclose(h1, truth) h1 = avg_pool(h0, bg) truth = th.stack([ th.mean(h0[:15], 0), th.mean(h0[15:20], 0), th.mean(h0[20:35], 0), th.mean(h0[35:40], 0), th.mean(h0[40:55], 0) ], 0) assert th.allclose(h1, truth) h1 = max_pool(h0, bg) truth = th.stack([ th.max(h0[:15], 0)[0], th.max(h0[15:20], 0)[0], th.max(h0[20:35], 0)[0], th.max(h0[35:40], 0)[0], th.max(h0[40:55], 0)[0] ], 0) assert th.allclose(h1, truth) h1 = sort_pool(h0, bg) assert h1.shape[0] == 5 and h1.shape[1] == 10 * 5 and h1.dim() == 2
def __init__(self, in_dim, hidden_dim, n_classes,hidden_layers,n_steps,readout, activation_func,dropout,grid,device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.batch_norms = nn.ModuleList() self.grid = grid # input layer self.layers.append(conv.GatedGraphConv(in_dim,hidden_dim,n_steps,1)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # hidden layers for k in range(0,hidden_layers): self.layers.append(conv.GatedGraphConv(hidden_dim,hidden_dim,n_steps,1)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # dropout layer self.dropout=nn.Dropout(p=dropout) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2)) elif self.readout == 'sort': self.readout_fcn = conv.SortPooling(100) elif self.readout == 'set': self.readout_fcn = conv.Set2Set(hidden_dim,2,2) else: self.readout_fcn = SppPooling(hidden_dim,self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim), nn.ReLU(inplace=True), nn.Linear(hidden_dim, n_classes), ) elif self.readout == 'sort': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim*100, n_classes), ) else: var=hidden_dim if self.readout == 'gap' or self.readout == 'set': var*=2 self.classify = nn.Linear(var, n_classes)
def __init__(self,in_dim,hidden_dim,n_classes,hidden_layers,readout, activation,feat_drop,edge_drop,alpha,K,grid,device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.grid = grid # input layer self.layers.append(nn.Linear(in_dim, hidden_dim)) # hidden layers for i in range(hidden_layers): self.layers.append(nn.Linear(hidden_dim, hidden_dim)) self.activation = activation if feat_drop: self.feat_drop = nn.Dropout(feat_drop) else: self.feat_drop = lambda x: x self.propagate = conv.APPNPConv(K, alpha, edge_drop) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2)) else: self.readout_fcn = SppPooling(hidden_dim,self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim*2), nn.ReLU(inplace=True), nn.Linear(2*hidden_dim, n_classes), ) else: var=hidden_dim if self.readout == 'gap': var*=2 self.classify = nn.Linear(var, n_classes) self.reset_parameters()
def __init__(self, in_dim, hidden_dim, n_classes,hidden_layers,aggregate,readout, activation,dropout,device,grid=8): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.grid = grid # input layer self.layers.append(conv.SAGEConv(in_dim,hidden_dim,aggregate,feat_drop=0.0, activation=activation)) # hidden layers for k in range(0,hidden_layers): self.layers.append(conv.SAGEConv(hidden_dim,hidden_dim,aggregate,feat_drop=dropout, activation=activation)) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2)) else: self.readout_fcn = SppPooling(hidden_dim,self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim*2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(2*hidden_dim, 2*hidden_dim), nn.ReLU(inplace=True), nn.Linear(2*hidden_dim, n_classes), ) else: var=hidden_dim if self.readout=='gap': var*=2 self.classify = nn.Linear(var, n_classes)
def __init__(self, in_dim, hidden_dim, embed_dim, hidden_layers, hops, readout, activation_func, dropout, local, norm, grid, K, device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.batch_norms = nn.ModuleList() self.grid = grid self.K = K self.hidden_dim = hidden_dim self.local = local self.norm = norm self.layers.append( conv.TAGConv(in_dim, hidden_dim, hops, activation=activation_func)) # hidden layers for k in range(0, hidden_layers): self.layers.append( conv.TAGConv(hidden_dim, hidden_dim, hops, activation=activation_func)) # dropout layer self.dropout = nn.Dropout(p=dropout) if self.local: return # readout layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling( nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim, hidden_dim * 2)) elif self.readout == 'sort': self.readout_fcn = conv.SortPooling(self.K) elif self.readout == 'set': self.readout_fcn = conv.Set2Set(hidden_dim, 2, 1) elif self.readout == 'cov': self.readout_fcn = CovPooling(hidden_dim) else: self.readout_fcn = SppPooling(hidden_dim, self.grid) if self.readout == 'spp': self.embed = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(2 * hidden_dim, 2 * hidden_dim), nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, embed_dim)) elif self.readout == 'sort': self.embed = nn.Sequential( #nn.Dropout(), nn.Linear(hidden_dim * self.K, embed_dim)) elif self.readout == 'cov': self.embed = nn.Sequential( nn.Dropout(), nn.Linear(int(((hidden_dim + 1) * hidden_dim) / 2), embed_dim)) else: var = hidden_dim if self.readout == 'gap' or self.readout == 'set': var *= 2 self.embed = nn.Linear(var, embed_dim)
def __init__(self, in_dim, hidden_dim, n_classes,hidden_layers,in_stats,out_stats,gfc_layers,readout, activation_func,dropout,grid,K,device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.batch_norms = nn.ModuleList() self.grid = grid self.K = K self.hidden_dim = hidden_dim self.layers.append(GcapsConv(in_dim,hidden_dim,gfc_layers,1,out_stats,activation=activation_func)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # hidden layers for k in range(0,hidden_layers): self.layers.append(GcapsConv(hidden_dim,hidden_dim,gfc_layers,in_stats,out_stats,activation=activation_func)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # dropout layer self.dropout=nn.Dropout(p=dropout) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2)) elif self.readout == 'sort': self.readout_fcn = conv.SortPooling(self.K) elif self.readout == 'set': self.readout_fcn = conv.Set2Set(hidden_dim,2,1) elif self.readout == 'cov': self.readout_fcn = CovPooling(hidden_dim) else: self.readout_fcn = SppPooling(hidden_dim,self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, n_classes) # nn.Conv2d(hidden_dim,64,kernel_size=3,padding=1), # nn.ReLU(inplace=True), # nn.Dropout(), # nn.Linear(2*hidden_dim, 2*hidden_dim), # nn.ReLU(inplace=True), # nn.Linear(2*hidden_dim, n_classes), # nn.Conv2d(hidden_dim,64,kernel_size=3,padding=1), # nn.ReLU(inplace=True), # nn.Conv2d(384,256,kernel_size=3,padding=1), #nn.ReLU(inplace=True), #nn.Flatten(1), #nn.Dropout(p=dropout), #nn.Linear(64*self.grid*self.grid, n_classes) ) elif self.readout == 'sort': self.classify = nn.Sequential( #nn.Dropout(), nn.Linear(hidden_dim*self.K, n_classes) ) elif self.readout == 'cov': self.classify = nn.Sequential( nn.Dropout(), nn.Linear( int(((hidden_dim+1)*hidden_dim)/2), n_classes) ) else: var=hidden_dim if self.readout == 'gap' or self.readout == 'set': var*=2 self.classify = nn.Linear(var*out_stats, n_classes)
def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, ctype, hops, readout, activation_func, dropout, grid, device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.batch_norms = nn.ModuleList() self.grid = grid # input layer if ctype == 'tagconv': self.layers.append( conv.TAGConv(in_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(in_dim, hidden_dim, hops, cached=False, norm=activation_func)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # hidden layers for k in range(0, hidden_layers): if ctype == 'tagconv': self.layers.append( conv.TAGConv(hidden_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(hidden_dim, hidden_dim, hops, cached=False, norm=activation_func)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # dropout layer self.dropout = nn.Dropout(p=dropout) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling( nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim, hidden_dim * 2)) else: self.readout_fcn = SppPooling(hidden_dim, self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(2 * hidden_dim, 2 * hidden_dim), nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, n_classes), ) else: var = hidden_dim if self.readout == 'gap': var *= 2 self.classify = nn.Linear(var, n_classes)
def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, ctype, hops, readout, activation_func, dropout, grid, K, norm, device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.n_layers = nn.ModuleList() self.grid = grid self.K = K self.hidden_dim = hidden_dim self.norm = norm self.mish = Mish() # input layer if ctype == 'tagconv': self.layers.append( conv.TAGConv(in_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(in_dim, hidden_dim, hops, cached=False, norm=activation_func)) if self.norm == 'batch': self.n_layers.append(nn.BatchNorm1d(hidden_dim)) elif self.norm == 'layer': self.n_layers.append( nn.LayerNorm(hidden_dim, elementwise_affine=False)) elif self.norm == 'group': self.n_layers.append(nn.GroupNorm(16, hidden_dim)) elif self.norm == 'instance': self.n_layers.append(nn.InstanceNorm1d(hidden_dim)) else: self.n_layers.append(GraphNorm(hidden_dim, affine=False)) # hidden layers for k in range(0, hidden_layers): if ctype == 'tagconv': self.layers.append( conv.TAGConv(hidden_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(hidden_dim, hidden_dim, hops, cached=False, norm=activation_func)) if self.norm == 'batch': self.n_layers.append(nn.BatchNorm1d(hidden_dim)) elif self.norm == 'layer': self.n_layers.append( nn.LayerNorm(hidden_dim, elementwise_affine=False)) elif self.norm == 'group': self.n_layers.append(nn.GroupNorm(16, hidden_dim)) elif self.norm == 'instance': self.n_layers.append(nn.InstanceNorm1d(hidden_dim)) else: self.n_layers.append(GraphNorm(hidden_dim, affine=False)) # dropout layer self.dropout = nn.Dropout(p=dropout) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling( nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim, hidden_dim * 2)) elif self.readout == 'sort': self.readout_fcn = conv.SortPooling(self.K) elif self.readout == 'set': self.readout_fcn = conv.Set2Set(hidden_dim, 2, 1) elif self.readout == 'cov': self.readout_fcn = CovPooling(hidden_dim) else: self.readout_fcn = SppPooling(hidden_dim, self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(2 * hidden_dim, 2 * hidden_dim), nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, n_classes)) elif self.readout == 'sort': self.classify = nn.Sequential( #nn.Dropout(), nn.Linear(hidden_dim * self.K, n_classes)) elif self.readout == 'cov': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(int(((hidden_dim + 1) * hidden_dim) / 2), n_classes)) else: var = hidden_dim if self.readout == 'gap' or self.readout == 'set': var *= 2 self.classify = nn.Linear(var, n_classes)