def test_tagconv(): g = dgl.DGLGraph(nx.path_graph(3)) ctx = F.ctx() adj = g.adjacency_matrix(ctx=ctx) norm = th.pow(g.in_degrees().float(), -0.5) conv = nn.TAGConv(5, 2, bias=True) conv = conv.to(ctx) print(conv) # test#1: basic h0 = F.ones((3, 5)) h1 = conv(g, h0) assert len(g.ndata) == 0 assert len(g.edata) == 0 shp = norm.shape + (1, ) * (h0.dim() - 1) norm = th.reshape(norm, shp).to(ctx) assert F.allclose(h1, _S2AXWb(adj, norm, h0, conv.lin.weight, conv.lin.bias)) conv = nn.TAGConv(5, 2) conv = conv.to(ctx) # test#2: basic h0 = F.ones((3, 5)) h1 = conv(g, h0) assert h1.shape[-1] == 2 # test reset_parameters old_weight = deepcopy(conv.lin.weight.data) conv.reset_parameters() new_weight = conv.lin.weight.data assert not F.allclose(old_weight, new_weight)
def test_tagconv_e_weight(g, idtype): ctx = F.ctx() g = g.astype(idtype).to(ctx) conv = nn.TAGConv(5, 5, bias=True) conv = conv.to(ctx) feat = F.randn((g.number_of_nodes(), 5)) eweight = F.ones((g.num_edges(), )) conv = conv.to(ctx) h = conv(g, feat, edge_weight=eweight) assert h.shape[-1] == 5
def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, ctype, hops, readout, activation_func, dropout, grid, device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.batch_norms = nn.ModuleList() self.grid = grid # input layer if ctype == 'tagconv': self.layers.append( conv.TAGConv(in_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(in_dim, hidden_dim, hops, cached=False, norm=activation_func)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # hidden layers for k in range(0, hidden_layers): if ctype == 'tagconv': self.layers.append( conv.TAGConv(hidden_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(hidden_dim, hidden_dim, hops, cached=False, norm=activation_func)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) # dropout layer self.dropout = nn.Dropout(p=dropout) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling( nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim, hidden_dim * 2)) else: self.readout_fcn = SppPooling(hidden_dim, self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(2 * hidden_dim, 2 * hidden_dim), nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, n_classes), ) else: var = hidden_dim if self.readout == 'gap': var *= 2 self.classify = nn.Linear(var, n_classes)
def __init__(self, in_dim, hidden_dim, embed_dim, hidden_layers, hops, readout, activation_func, dropout, local, norm, grid, K, device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.batch_norms = nn.ModuleList() self.grid = grid self.K = K self.hidden_dim = hidden_dim self.local = local self.norm = norm self.layers.append( conv.TAGConv(in_dim, hidden_dim, hops, activation=activation_func)) # hidden layers for k in range(0, hidden_layers): self.layers.append( conv.TAGConv(hidden_dim, hidden_dim, hops, activation=activation_func)) # dropout layer self.dropout = nn.Dropout(p=dropout) if self.local: return # readout layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling( nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim, hidden_dim * 2)) elif self.readout == 'sort': self.readout_fcn = conv.SortPooling(self.K) elif self.readout == 'set': self.readout_fcn = conv.Set2Set(hidden_dim, 2, 1) elif self.readout == 'cov': self.readout_fcn = CovPooling(hidden_dim) else: self.readout_fcn = SppPooling(hidden_dim, self.grid) if self.readout == 'spp': self.embed = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(2 * hidden_dim, 2 * hidden_dim), nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, embed_dim)) elif self.readout == 'sort': self.embed = nn.Sequential( #nn.Dropout(), nn.Linear(hidden_dim * self.K, embed_dim)) elif self.readout == 'cov': self.embed = nn.Sequential( nn.Dropout(), nn.Linear(int(((hidden_dim + 1) * hidden_dim) / 2), embed_dim)) else: var = hidden_dim if self.readout == 'gap' or self.readout == 'set': var *= 2 self.embed = nn.Linear(var, embed_dim)
def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, ctype, hops, readout, activation_func, dropout, grid, K, norm, device): super(Classifier, self).__init__() self.device = device self.readout = readout self.layers = nn.ModuleList() self.n_layers = nn.ModuleList() self.grid = grid self.K = K self.hidden_dim = hidden_dim self.norm = norm self.mish = Mish() # input layer if ctype == 'tagconv': self.layers.append( conv.TAGConv(in_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(in_dim, hidden_dim, hops, cached=False, norm=activation_func)) if self.norm == 'batch': self.n_layers.append(nn.BatchNorm1d(hidden_dim)) elif self.norm == 'layer': self.n_layers.append( nn.LayerNorm(hidden_dim, elementwise_affine=False)) elif self.norm == 'group': self.n_layers.append(nn.GroupNorm(16, hidden_dim)) elif self.norm == 'instance': self.n_layers.append(nn.InstanceNorm1d(hidden_dim)) else: self.n_layers.append(GraphNorm(hidden_dim, affine=False)) # hidden layers for k in range(0, hidden_layers): if ctype == 'tagconv': self.layers.append( conv.TAGConv(hidden_dim, hidden_dim, hops, activation=activation_func)) else: self.layers.append( conv.SGConv(hidden_dim, hidden_dim, hops, cached=False, norm=activation_func)) if self.norm == 'batch': self.n_layers.append(nn.BatchNorm1d(hidden_dim)) elif self.norm == 'layer': self.n_layers.append( nn.LayerNorm(hidden_dim, elementwise_affine=False)) elif self.norm == 'group': self.n_layers.append(nn.GroupNorm(16, hidden_dim)) elif self.norm == 'instance': self.n_layers.append(nn.InstanceNorm1d(hidden_dim)) else: self.n_layers.append(GraphNorm(hidden_dim, affine=False)) # dropout layer self.dropout = nn.Dropout(p=dropout) # last layer if self.readout == 'max': self.readout_fcn = conv.MaxPooling() elif self.readout == 'mean': self.readout_fcn = conv.AvgPooling() elif self.readout == 'sum': self.readout_fcn = conv.SumPooling() elif self.readout == 'gap': self.readout_fcn = conv.GlobalAttentionPooling( nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim, hidden_dim * 2)) elif self.readout == 'sort': self.readout_fcn = conv.SortPooling(self.K) elif self.readout == 'set': self.readout_fcn = conv.Set2Set(hidden_dim, 2, 1) elif self.readout == 'cov': self.readout_fcn = CovPooling(hidden_dim) else: self.readout_fcn = SppPooling(hidden_dim, self.grid) if self.readout == 'spp': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(2 * hidden_dim, 2 * hidden_dim), nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, n_classes)) elif self.readout == 'sort': self.classify = nn.Sequential( #nn.Dropout(), nn.Linear(hidden_dim * self.K, n_classes)) elif self.readout == 'cov': self.classify = nn.Sequential( nn.Dropout(), nn.Linear(int(((hidden_dim + 1) * hidden_dim) / 2), n_classes)) else: var = hidden_dim if self.readout == 'gap' or self.readout == 'set': var *= 2 self.classify = nn.Linear(var, n_classes)