示例#1
0
    def __init__(self, in_dim, hidden_dim, n_classes,hidden_layers,n_steps,readout,
                 activation_func,dropout,grid,device):
        super(Classifier, self).__init__()
        self.device      = device
        self.readout     = readout
        self.layers      = nn.ModuleList()
        self.batch_norms = nn.ModuleList() 
        self.grid        = grid

        # input layer
        self.layers.append(conv.GatedGraphConv(in_dim,hidden_dim,n_steps,1))
        self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
                
        # hidden layers
        for k in range(0,hidden_layers):
            self.layers.append(conv.GatedGraphConv(hidden_dim,hidden_dim,n_steps,1))
            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
            
        # dropout layer
        self.dropout=nn.Dropout(p=dropout)
                
        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2))
        elif self.readout == 'sort':
            self.readout_fcn = conv.SortPooling(100)
        elif self.readout == 'set':
            self.readout_fcn = conv.Set2Set(hidden_dim,2,2)
        else:
            self.readout_fcn = SppPooling(hidden_dim,self.grid)
        
        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim),
                nn.ReLU(inplace=True),
                nn.Linear(hidden_dim, n_classes),
            )
        elif self.readout == 'sort':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim*100, n_classes),
            )
        else:
            var=hidden_dim
            if self.readout == 'gap' or self.readout == 'set':
                var*=2
            self.classify = nn.Linear(var, n_classes)
示例#2
0
    def __init__(self,in_dim,hidden_dim,n_classes,hidden_layers,readout,
                 activation,feat_drop,edge_drop,alpha,K,grid,device):
        super(Classifier, self).__init__()
        self.device = device
        self.readout = readout
        self.layers = nn.ModuleList()
        self.grid = grid
        # input layer
        self.layers.append(nn.Linear(in_dim, hidden_dim))
        # hidden layers
        for i in range(hidden_layers):
            self.layers.append(nn.Linear(hidden_dim, hidden_dim))

        self.activation = activation

        if feat_drop:
            self.feat_drop = nn.Dropout(feat_drop)
        else:
            self.feat_drop = lambda x: x

        self.propagate = conv.APPNPConv(K, alpha, edge_drop)

        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2))
        else:
            self.readout_fcn = SppPooling(hidden_dim,self.grid)
        
        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim*2),
                nn.ReLU(inplace=True),
                nn.Linear(2*hidden_dim, n_classes),
            )
        else:
            var=hidden_dim
            if self.readout == 'gap':
                var*=2
            self.classify = nn.Linear(var, n_classes)


        self.reset_parameters()
示例#3
0
def test_glob_att_pool():
    g = dgl.DGLGraph(nx.path_graph(10))

    gap = nn.GlobalAttentionPooling(th.nn.Linear(5, 1), th.nn.Linear(5, 10))
    print(gap)

    # test#1: basic
    h0 = th.rand(g.number_of_nodes(), 5)
    h1 = gap(h0, g)
    assert h1.shape[0] == 10 and h1.dim() == 1

    # test#2: batched graph
    bg = dgl.batch([g, g, g, g])
    h0 = th.rand(bg.number_of_nodes(), 5)
    h1 = gap(h0, bg)
    assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.dim() == 2
示例#4
0
    def __init__(self, in_dim, hidden_dim, n_classes,hidden_layers,aggregate,readout,
                 activation,dropout,device,grid=8):
        super(Classifier, self).__init__()
        self.device  = device
        self.readout = readout
        self.layers  = nn.ModuleList()
        self.grid    = grid
        
        # input layer
        self.layers.append(conv.SAGEConv(in_dim,hidden_dim,aggregate,feat_drop=0.0,
                                    activation=activation))

        # hidden layers
        for k in range(0,hidden_layers):
            self.layers.append(conv.SAGEConv(hidden_dim,hidden_dim,aggregate,feat_drop=dropout,
                                        activation=activation))

        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2))
        else:
            self.readout_fcn = SppPooling(hidden_dim,self.grid)

        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim*2),
                nn.ReLU(inplace=True),
                nn.Dropout(),
                nn.Linear(2*hidden_dim, 2*hidden_dim),
                nn.ReLU(inplace=True),
                nn.Linear(2*hidden_dim, n_classes),
            )
        else:
            var=hidden_dim
            if self.readout=='gap':
                var*=2
                
            self.classify = nn.Linear(var, n_classes)
示例#5
0
def test_glob_att_pool():
    ctx = F.ctx()
    g = dgl.DGLGraph(nx.path_graph(10))

    gap = nn.GlobalAttentionPooling(th.nn.Linear(5, 1), th.nn.Linear(5, 10))
    gap = gap.to(ctx)
    print(gap)

    # test#1: basic
    h0 = F.randn((g.number_of_nodes(), 5))
    h1 = gap(g, h0)
    assert h1.shape[0] == 1 and h1.shape[1] == 10 and h1.dim() == 2

    # test#2: batched graph
    bg = dgl.batch([g, g, g, g])
    h0 = F.randn((bg.number_of_nodes(), 5))
    h1 = gap(bg, h0)
    assert h1.shape[0] == 4 and h1.shape[1] == 10 and h1.dim() == 2
    def __init__(self, in_dim, hidden_dim, embed_dim, hidden_layers, hops,
                 readout, activation_func, dropout, local, norm, grid, K,
                 device):
        super(Classifier, self).__init__()
        self.device = device
        self.readout = readout
        self.layers = nn.ModuleList()
        self.batch_norms = nn.ModuleList()
        self.grid = grid
        self.K = K
        self.hidden_dim = hidden_dim
        self.local = local
        self.norm = norm

        self.layers.append(
            conv.TAGConv(in_dim, hidden_dim, hops, activation=activation_func))

        # hidden layers
        for k in range(0, hidden_layers):
            self.layers.append(
                conv.TAGConv(hidden_dim,
                             hidden_dim,
                             hops,
                             activation=activation_func))

        # dropout layer
        self.dropout = nn.Dropout(p=dropout)

        if self.local:
            return

        # readout layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(
                nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim,
                                                    hidden_dim * 2))
        elif self.readout == 'sort':
            self.readout_fcn = conv.SortPooling(self.K)
        elif self.readout == 'set':
            self.readout_fcn = conv.Set2Set(hidden_dim, 2, 1)
        elif self.readout == 'cov':
            self.readout_fcn = CovPooling(hidden_dim)
        else:
            self.readout_fcn = SppPooling(hidden_dim, self.grid)

        if self.readout == 'spp':
            self.embed = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2),
                nn.ReLU(inplace=True), nn.Dropout(),
                nn.Linear(2 * hidden_dim, 2 * hidden_dim),
                nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, embed_dim))
        elif self.readout == 'sort':
            self.embed = nn.Sequential(
                #nn.Dropout(),
                nn.Linear(hidden_dim * self.K, embed_dim))
        elif self.readout == 'cov':
            self.embed = nn.Sequential(
                nn.Dropout(),
                nn.Linear(int(((hidden_dim + 1) * hidden_dim) / 2), embed_dim))
        else:
            var = hidden_dim
            if self.readout == 'gap' or self.readout == 'set':
                var *= 2
            self.embed = nn.Linear(var, embed_dim)
示例#7
0
    def __init__(self, in_dim, hidden_dim, n_classes,hidden_layers,in_stats,out_stats,gfc_layers,readout,
                 activation_func,dropout,grid,K,device):
        super(Classifier, self).__init__()
        self.device      = device
        self.readout     = readout
        self.layers      = nn.ModuleList()
        self.batch_norms = nn.ModuleList() 
        self.grid        = grid
        self.K           = K
        self.hidden_dim  = hidden_dim

        self.layers.append(GcapsConv(in_dim,hidden_dim,gfc_layers,1,out_stats,activation=activation_func))
        self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
                
        # hidden layers
        for k in range(0,hidden_layers):
            self.layers.append(GcapsConv(hidden_dim,hidden_dim,gfc_layers,in_stats,out_stats,activation=activation_func))
            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))
            
        # dropout layer
        self.dropout=nn.Dropout(p=dropout)
                
        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(nn.Linear(hidden_dim,1),nn.Linear(hidden_dim,hidden_dim*2))
        elif self.readout == 'sort':
            self.readout_fcn = conv.SortPooling(self.K)
        elif self.readout == 'set':
            self.readout_fcn = conv.Set2Set(hidden_dim,2,1)
        elif self.readout == 'cov':
            self.readout_fcn = CovPooling(hidden_dim)
        else:
            self.readout_fcn = SppPooling(hidden_dim,self.grid)
        
        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, n_classes)
                # nn.Conv2d(hidden_dim,64,kernel_size=3,padding=1),
                # nn.ReLU(inplace=True),
                # nn.Dropout(),
                # nn.Linear(2*hidden_dim, 2*hidden_dim),
                # nn.ReLU(inplace=True),
                # nn.Linear(2*hidden_dim, n_classes),
                
                # nn.Conv2d(hidden_dim,64,kernel_size=3,padding=1),
                # nn.ReLU(inplace=True),
                # nn.Conv2d(384,256,kernel_size=3,padding=1),
                #nn.ReLU(inplace=True),
                #nn.Flatten(1),
                #nn.Dropout(p=dropout),
                #nn.Linear(64*self.grid*self.grid, n_classes)
            )
        elif self.readout == 'sort':
            self.classify = nn.Sequential(
                #nn.Dropout(),
                nn.Linear(hidden_dim*self.K, n_classes)
            )
        elif self.readout == 'cov':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear( int(((hidden_dim+1)*hidden_dim)/2), n_classes)
            )            
        else:
            var=hidden_dim
            if self.readout == 'gap' or self.readout == 'set':
                var*=2
            self.classify = nn.Linear(var*out_stats, n_classes)
示例#8
0
    def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, ctype,
                 hops, readout, activation_func, dropout, grid, device):
        super(Classifier, self).__init__()
        self.device = device
        self.readout = readout
        self.layers = nn.ModuleList()
        self.batch_norms = nn.ModuleList()
        self.grid = grid

        # input layer
        if ctype == 'tagconv':
            self.layers.append(
                conv.TAGConv(in_dim,
                             hidden_dim,
                             hops,
                             activation=activation_func))
        else:
            self.layers.append(
                conv.SGConv(in_dim,
                            hidden_dim,
                            hops,
                            cached=False,
                            norm=activation_func))
        self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        # hidden layers
        for k in range(0, hidden_layers):
            if ctype == 'tagconv':
                self.layers.append(
                    conv.TAGConv(hidden_dim,
                                 hidden_dim,
                                 hops,
                                 activation=activation_func))
            else:
                self.layers.append(
                    conv.SGConv(hidden_dim,
                                hidden_dim,
                                hops,
                                cached=False,
                                norm=activation_func))
            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        # dropout layer
        self.dropout = nn.Dropout(p=dropout)

        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(
                nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim,
                                                    hidden_dim * 2))
        else:
            self.readout_fcn = SppPooling(hidden_dim, self.grid)

        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2),
                nn.ReLU(inplace=True),
                nn.Dropout(),
                nn.Linear(2 * hidden_dim, 2 * hidden_dim),
                nn.ReLU(inplace=True),
                nn.Linear(2 * hidden_dim, n_classes),
            )
        else:
            var = hidden_dim
            if self.readout == 'gap':
                var *= 2
            self.classify = nn.Linear(var, n_classes)
    def __init__(self, in_dim, hidden_dim, n_classes, hidden_layers, ctype,
                 hops, readout, activation_func, dropout, grid, K, norm,
                 device):
        super(Classifier, self).__init__()
        self.device = device
        self.readout = readout
        self.layers = nn.ModuleList()
        self.n_layers = nn.ModuleList()
        self.grid = grid
        self.K = K
        self.hidden_dim = hidden_dim
        self.norm = norm

        self.mish = Mish()

        # input layer
        if ctype == 'tagconv':
            self.layers.append(
                conv.TAGConv(in_dim,
                             hidden_dim,
                             hops,
                             activation=activation_func))
        else:
            self.layers.append(
                conv.SGConv(in_dim,
                            hidden_dim,
                            hops,
                            cached=False,
                            norm=activation_func))

        if self.norm == 'batch':
            self.n_layers.append(nn.BatchNorm1d(hidden_dim))
        elif self.norm == 'layer':
            self.n_layers.append(
                nn.LayerNorm(hidden_dim, elementwise_affine=False))
        elif self.norm == 'group':
            self.n_layers.append(nn.GroupNorm(16, hidden_dim))
        elif self.norm == 'instance':
            self.n_layers.append(nn.InstanceNorm1d(hidden_dim))
        else:
            self.n_layers.append(GraphNorm(hidden_dim, affine=False))

        # hidden layers
        for k in range(0, hidden_layers):
            if ctype == 'tagconv':
                self.layers.append(
                    conv.TAGConv(hidden_dim,
                                 hidden_dim,
                                 hops,
                                 activation=activation_func))
            else:
                self.layers.append(
                    conv.SGConv(hidden_dim,
                                hidden_dim,
                                hops,
                                cached=False,
                                norm=activation_func))

            if self.norm == 'batch':
                self.n_layers.append(nn.BatchNorm1d(hidden_dim))
            elif self.norm == 'layer':
                self.n_layers.append(
                    nn.LayerNorm(hidden_dim, elementwise_affine=False))
            elif self.norm == 'group':
                self.n_layers.append(nn.GroupNorm(16, hidden_dim))
            elif self.norm == 'instance':
                self.n_layers.append(nn.InstanceNorm1d(hidden_dim))
            else:
                self.n_layers.append(GraphNorm(hidden_dim, affine=False))

        # dropout layer
        self.dropout = nn.Dropout(p=dropout)

        # last layer
        if self.readout == 'max':
            self.readout_fcn = conv.MaxPooling()
        elif self.readout == 'mean':
            self.readout_fcn = conv.AvgPooling()
        elif self.readout == 'sum':
            self.readout_fcn = conv.SumPooling()
        elif self.readout == 'gap':
            self.readout_fcn = conv.GlobalAttentionPooling(
                nn.Linear(hidden_dim, 1), nn.Linear(hidden_dim,
                                                    hidden_dim * 2))
        elif self.readout == 'sort':
            self.readout_fcn = conv.SortPooling(self.K)
        elif self.readout == 'set':
            self.readout_fcn = conv.Set2Set(hidden_dim, 2, 1)
        elif self.readout == 'cov':
            self.readout_fcn = CovPooling(hidden_dim)
        else:
            self.readout_fcn = SppPooling(hidden_dim, self.grid)

        if self.readout == 'spp':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(hidden_dim * self.grid * self.grid, hidden_dim * 2),
                nn.ReLU(inplace=True), nn.Dropout(),
                nn.Linear(2 * hidden_dim, 2 * hidden_dim),
                nn.ReLU(inplace=True), nn.Linear(2 * hidden_dim, n_classes))
        elif self.readout == 'sort':
            self.classify = nn.Sequential(
                #nn.Dropout(),
                nn.Linear(hidden_dim * self.K, n_classes))
        elif self.readout == 'cov':
            self.classify = nn.Sequential(
                nn.Dropout(),
                nn.Linear(int(((hidden_dim + 1) * hidden_dim) / 2), n_classes))
        else:
            var = hidden_dim
            if self.readout == 'gap' or self.readout == 'set':
                var *= 2
            self.classify = nn.Linear(var, n_classes)