def __init__(
        self,
        num_nodes: int,
        in_channels: int,
        hidden_channels: int,
        out_channels: int,
        kernel_size: int,
        K: int,
        normalization: str = "sym",
        bias: bool = True,
    ):
        super(STConv, self).__init__()
        self.num_nodes = num_nodes
        self.in_channels = in_channels
        self.hidden_channels = hidden_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.K = K
        self.normalization = normalization
        self.bias = bias

        self._temporal_conv1 = TemporalConv(
            in_channels=in_channels,
            out_channels=hidden_channels,
            kernel_size=kernel_size,
        )

        self._graph_conv = ChebConv(
            in_channels=hidden_channels,
            out_channels=hidden_channels,
            K=K,
            normalization=normalization,
            bias=bias,
        )

        self._temporal_conv2 = TemporalConv(
            in_channels=hidden_channels,
            out_channels=out_channels,
            kernel_size=kernel_size,
        )

        self._batch_norm = nn.BatchNorm2d(num_nodes)
Esempio n. 2
0
 def __init__(self, hist_len, pred_len, in_dim, city_num, batch_size,
              device, edge_index):
     super(GC_LSTM, self).__init__()
     self.edge_index = torch.LongTensor(edge_index)
     self.edge_index = self.edge_index.view(2, 1, -1).repeat(
         1, batch_size,
         1) + torch.arange(batch_size).view(1, -1, 1) * batch_size
     self.edge_index = self.edge_index.view(2, -1)
     self.device = device
     self.hist_len = hist_len
     self.pred_len = pred_len
     self.city_num = city_num
     self.batch_size = batch_size
     self.in_dim = in_dim
     self.hid_dim = 32
     self.out_dim = 1
     self.gcn_out = 1
     self.conv = ChebConv(self.in_dim, self.gcn_out, K=2)
     self.lstm_cell = LSTMCell(self.in_dim + self.gcn_out, self.hid_dim)
     self.fc_out = nn.Linear(self.hid_dim, self.out_dim)
    def __init__(self, num_node_features, num_edge_features,
                 node_hidden_channels, edge_hidden_channels, num_layers,
                 num_classes):
        super(MyDeeperGCN, self).__init__()

        self.node_encoder = ChebConv(num_node_features, node_hidden_channels,
                                     T)
        self.edge_encoder = Linear(num_edge_features, edge_hidden_channels)

        self.layers = torch.nn.ModuleList()
        for i in range(1, num_layers + 1):
            conv = NNConv(
                node_hidden_channels, node_hidden_channels,
                MapE2NxN(edge_hidden_channels,
                         node_hidden_channels * node_hidden_channels,
                         hidden_channels3))
            norm = LayerNorm(node_hidden_channels, elementwise_affine=True)
            act = ReLU(inplace=True)

            layer = DeepGCNLayer(conv,
                                 norm,
                                 act,
                                 block='res+',
                                 dropout=dropout1,
                                 ckpt_grad=i % 3)
            self.layers.append(layer)

        self.postition = PositionEncode(node_hidden_channels)
        self.transformer = torch.nn.TransformerEncoder(
            torch.nn.TransformerEncoderLayer(node_hidden_channels,
                                             8,
                                             256,
                                             dropout=0.2,
                                             activation='relu'), 2)

        self.lin = Linear(node_hidden_channels, num_classes)
        self.dropout = Dropout(dropout2)
Esempio n. 4
0
class ChebNet(nn.Module):
    """ 2 Layer ChebNet based on pytorch geometric.

    Parameters
    ----------
    nfeat : int
        size of input feature dimension
    nhid : int
        number of hidden units
    nclass : int
        size of output dimension
    num_hops: int
        number of hops in ChebConv
    dropout : float
        dropout rate for ChebNet
    lr : float
        learning rate for ChebNet
    weight_decay : float
        weight decay coefficient (l2 normalization) for GCN.
        When `with_relu` is True, `weight_decay` will be set to 0.
    with_bias: bool
        whether to include bias term in ChebNet weights.
    device: str
        'cpu' or 'cuda'.

    Examples
    --------
	We can first load dataset and then train ChebNet.

    >>> from deeprobust.graph.data import Dataset
    >>> from deeprobust.graph.defense import ChebNet
    >>> data = Dataset(root='/tmp/', name='cora')
    >>> adj, features, labels = data.adj, data.features, data.labels
    >>> idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
    >>> cheby = ChebNet(nfeat=features.shape[1],
              nhid=16, num_hops=3,
              nclass=labels.max().item() + 1,
              dropout=0.5, device='cpu')
    >>> cheby = cheby.to('cpu')
    >>> pyg_data = Dpr2Pyg(data) # convert deeprobust dataset to pyg dataset
    >>> cheby.fit(pyg_data, patience=10, verbose=True) # train with earlystopping
    """
    def __init__(self,
                 nfeat,
                 nhid,
                 nclass,
                 num_hops=3,
                 dropout=0.5,
                 lr=0.01,
                 weight_decay=5e-4,
                 with_bias=True,
                 device=None):

        super(ChebNet, self).__init__()

        assert device is not None, "Please specify 'device'!"
        self.device = device

        self.conv1 = ChebConv(nfeat, nhid, K=num_hops, bias=with_bias)

        self.conv2 = ChebConv(nhid, nclass, K=num_hops, bias=with_bias)

        self.dropout = dropout
        self.weight_decay = weight_decay
        self.lr = lr
        self.output = None
        self.best_model = None
        self.best_output = None

    def forward(self, data):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.conv1(x, edge_index))
        x = F.dropout(x, p=self.dropout, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=1)

    def initialize(self):
        """Initialize parameters of ChebNet.
        """
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def fit(self,
            pyg_data,
            train_iters=200,
            initialize=True,
            verbose=False,
            patience=500,
            **kwargs):
        """Train the ChebNet model, when idx_val is not None, pick the best model
        according to the validation loss.

        Parameters
        ----------
        pyg_data :
            pytorch geometric dataset object
        train_iters : int
            number of training epochs
        initialize : bool
            whether to initialize parameters before training
        verbose : bool
            whether to show verbose logs
        patience : int
            patience for early stopping, only valid when `idx_val` is given
        """

        self.device = self.conv1.weight.device
        if initialize:
            self.initialize()

        self.data = pyg_data[0].to(self.device)
        # By default, it is trained with early stopping on validation
        self.train_with_early_stopping(train_iters, patience, verbose)

    def train_with_early_stopping(self, train_iters, patience, verbose):
        """early stopping based on the validation loss
        """
        if verbose:
            print('=== training ChebNet model ===')
        optimizer = optim.Adam(self.parameters(),
                               lr=self.lr,
                               weight_decay=self.weight_decay)

        labels = self.data.y
        train_mask, val_mask = self.data.train_mask, self.data.val_mask

        early_stopping = patience
        best_loss_val = 100

        for i in range(train_iters):
            self.train()
            optimizer.zero_grad()
            output = self.forward(self.data)

            loss_train = F.nll_loss(output[train_mask], labels[train_mask])
            loss_train.backward()
            optimizer.step()

            if verbose and i % 10 == 0:
                print('Epoch {}, training loss: {}'.format(
                    i, loss_train.item()))

            self.eval()
            output = self.forward(self.data)
            loss_val = F.nll_loss(output[val_mask], labels[val_mask])

            if best_loss_val > loss_val:
                best_loss_val = loss_val
                self.output = output
                weights = deepcopy(self.state_dict())
                patience = early_stopping
            else:
                patience -= 1
            if i > early_stopping and patience <= 0:
                break

        if verbose:
            print('=== early stopping at {0}, loss_val = {1} ==='.format(
                i, best_loss_val))
        self.load_state_dict(weights)

    def test(self):
        """Evaluate ChebNet performance on test set.

        Parameters
        ----------
        idx_test :
            node testing indices
        """
        self.eval()
        test_mask = self.data.test_mask
        labels = self.data.y
        output = self.forward(self.data)
        # output = self.output
        loss_test = F.nll_loss(output[test_mask], labels[test_mask])
        acc_test = utils.accuracy(output[test_mask], labels[test_mask])
        print("Test set results:", "loss= {:.4f}".format(loss_test.item()),
              "accuracy= {:.4f}".format(acc_test.item()))
        return acc_test.item()
Esempio n. 5
0
    def __init__(self, num_features, num_classes, nh1=64, K=8):
        super(KipfNet_old, self).__init__()
#        self.conv1 = GCNConv(n_features, 60, cached=True)
#        self.conv2 = GCNConv(60, n_classes, cached=True)
        self.conv1 = ChebConv(num_features, nh1, K=K)
        self.conv2 = ChebConv(nh1, num_classes, K=K)
Esempio n. 6
0
 def __init__(self):
     super(Net, self).__init__()
     # self.conv1 = GCNConv(dataset.num_features, 16, cached=True)
     # self.conv2 = GCNConv(16, dataset.num_classes, cached=True)
     self.conv1 = ChebConv(data.num_features, 16, K=2)
     self.conv2 = ChebConv(16, data.num_features, K=2)
Esempio n. 7
0
 def __init__(self, dataset):
     super(Net, self).__init__()
     self.conv1 = ChebConv(dataset.num_features, args.hidden, args.num_hops)
     self.conv2 = ChebConv(args.hidden, dataset.num_classes, args.num_hops)
Esempio n. 8
0
 def __init__(self, features_num, num_class, hidden, num_hops, dropout):
     super(Cheb_Net, self).__init__()
     self.dropout = dropout
     self.conv1 = ChebConv(features_num, hidden, num_hops)
     self.conv2 = ChebConv(hidden, num_class, num_hops)
Esempio n. 9
0
 def __init__(self, num_features, num_nodes):
     super(ChebNet, self).__init__()
     self.conv1 = ChebConv(num_features, 32, 2)
     self.conv2 = ChebConv(32, 64, 2)
     self.fc1 = torch.nn.Linear(64, 128)
     self.fc2 = torch.nn.Linear(128, num_nodes)
Esempio n. 10
0
    def init_layers(self, batch):


        self.layer_lst = [] ##[in_channel, out_channel, in_pn, out_pn, max_neighbor_num, neighbor_num_lst,neighbor_id_lstlst,conv_layer, residual_layer]
        
        self.layer_num = len(self.channel_lst)
        
        in_point_num = self.point_num
        in_channel = 3
        
        for l in range(self.layer_num):
            out_channel = self.channel_lst[l]
            weight_num = self.weight_num_lst[l]
            residual_rate = self.residual_rate_lst[l]
            

            connection_info  = np.load(self.connection_layer_fn_lst[l])
            print ("##Layer",self.connection_layer_fn_lst[l])
            out_point_num = connection_info.shape[0]

            neighbor_num_lst = torch.FloatTensor(connection_info[:,0].astype(float)).cuda() #out_point_num*1
            neighbor_id_dist_lstlst = connection_info[:, 1:] #out_point_num*(max_neighbor_num*2)
            neighbor_id_lstlst = neighbor_id_dist_lstlst.reshape((out_point_num, -1,2))[:,:,0] #out_point_num*max_neighbor_num
            #neighbor_id_lstlst = torch.LongTensor(neighbor_id_lstlst)
            avg_neighbor_num = neighbor_num_lst.mean().item()
            max_neighbor_num = neighbor_id_lstlst.shape[1]

            pc_mask  = torch.ones(in_point_num+1).cuda()
            pc_mask[in_point_num]=0
            neighbor_mask_lst = pc_mask[ torch.LongTensor(neighbor_id_lstlst)].contiguous() #out_pn*max_neighbor_num neighbor is 1 otherwise 0
            
            

            if((residual_rate<0) or (residual_rate>1)):
                print ("Invalid residual rate", residual_rate)
            ####parameters for conv###############
            conv_layer = ""
            if(residual_rate<1):
                edge_index_lst = self.get_edge_index_lst_from_neighbor_id_lstlst(neighbor_id_lstlst, neighbor_num_lst)
                print ("edge_index_lst", edge_index_lst.shape)
                if(self.conv_method=="Cheb"):
                    conv_layer = edge_index_lst, ChebConv(in_channel, out_channel, K=6, normalization='sym', bias=True)
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()/10
                        self.register_parameter("GMM"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="GAT"):
                    #conv_layer = edge_index_lst,GATConv(in_channel, out_channel, heads=1)
                    if((l!=((self.layer_num/2)-2)) and (l!=(self.layer_num-1))): #not middle or last layer
                        conv_layer = edge_index_lst, GATConv(in_channel, out_channel, heads=8, concat=True)
                        out_channel=out_channel*8
                    else:
                        conv_layer = edge_index_lst, GATConv(in_channel, out_channel, heads=8, concat=False) 
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("GAT"+str(l)+"_"+str(ii),p)
                        ii+=1
                    #conv_layer = edge
                elif(self.conv_method=="GMM"):
                    pseudo_coordinates = self.get_GMM_pseudo_coordinates( edge_index_lst, neighbor_num_lst) #edge_num*2
                    #print ("pseudo_coordinates", pseudo_coordinates.shape)
                    conv_layer = edge_index_lst,pseudo_coordinates, GMMConv(in_channel, out_channel, dim=2 , kernel_size=25)
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("GMM"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="FeaST"):
                    
                    conv_layer = edge_index_lst,FeaStConv(in_channel, out_channel, heads=32)
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("FeaST"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="vw"):
                    weights = torch.randn(out_point_num, max_neighbor_num, out_channel,in_channel)/(avg_neighbor_num*weight_num)
                    
                    weights = nn.Parameter(weights.cuda())
                    
                    self.register_parameter("weights"+str(l),weights)
                    
                    bias = nn.Parameter(torch.zeros(out_channel).cuda())
                    if(self.perpoint_bias == 1):
                        bias= nn.Parameter(torch.zeros(out_point_num, out_channel).cuda())
                    self.register_parameter("bias"+str(l),bias)
                    
                    conv_layer = (weights, bias)
                else:
                    print ("ERROR! Unknown convolution layer!!!")
                

            zeros_batch_outpn_outchannel = torch.zeros((batch, out_point_num, out_channel)).cuda()
            ####parameters for residual###############
            residual_layer = ""

            if(residual_rate>0):
                p_neighbors = ""
                weight_res = ""

                if((out_point_num != in_point_num) and (self.use_vanilla_pool == 0)):
                    p_neighbors = nn.Parameter((torch.randn(out_point_num, max_neighbor_num)/(avg_neighbor_num)).cuda())
                    self.register_parameter("p_neighbors"+str(l),p_neighbors)
                
                if(out_channel != in_channel):
                    weight_res = torch.randn(out_channel,in_channel)
                    #self.normalize_weights(weight_res)
                    weight_res = weight_res/out_channel
                    weight_res = nn.Parameter(weight_res.cuda())
                    self.register_parameter("weight_res"+str(l),weight_res)
                
                residual_layer = (weight_res, p_neighbors)
            
            #####put everythin together
            
            layer = (in_channel, out_channel, in_point_num, out_point_num, weight_num, max_neighbor_num, neighbor_num_lst,neighbor_id_lstlst, conv_layer, residual_layer, residual_rate, neighbor_mask_lst, zeros_batch_outpn_outchannel)
            
            self.layer_lst +=[layer]
            
            print ("in_channel", in_channel,"out_channel",out_channel, "in_point_num", in_point_num, "out_point_num", out_point_num, "weight_num", weight_num,
                   "max_neighbor_num", max_neighbor_num, "avg_neighbor_num", avg_neighbor_num)
            
            in_point_num=out_point_num
            in_channel = out_channel
Esempio n. 11
0
    def __init__(self,
                 clusters=None,
                 knn=4,
                 maxCluster=0,
                 clustering='None',
                 categories=None,
                 coords=False,
                 n_hidden=64,
                 n_hidden2=32,
                 n_hidden3=16,
                 K_block=6,
                 K_mix=1,
                 skipconv=False,
                 do_bn=True,
                 conv='Cheb',
                 layers=1,
                 midSkip=False,
                 p=0.5,
                 includeHeading=False):
        super(KipfNet, self).__init__()

        self.clusters = clusters
        self.knn = knn
        self.maxCluster = maxCluster
        self.clustering = clustering
        self.categories = categories - 1
        self.skipconv = skipconv
        self.coords = coords
        self.midSkip = midSkip
        self.layers = layers
        self.bn = BatchNorm1d(n_hidden)
        self.bn2 = BatchNorm1d(n_hidden2)
        self.bn3 = BatchNorm1d(n_hidden3)
        self.p = p

        # Input size depends on heading channel
        if includeHeading:
            n_input = 36
        else:
            n_input = 24

        # Add coordinates and categories to input
        if coords:
            n_input = n_input + 2
            if categories is not None:
                n_input = n_input + 1
        if layers == 1:
            midSkip = False

        # Build model (selected number of convolution blocks)
        self.m  # Build model (selected number of convolution blocks)oduleList1 = torch.nn.ModuleList()
        self.skipList1 = torch.nn.ModuleList()
        for i in range(layers):
            if i == 0:
                self.moduleList1.append(
                    Kipfblock(n_input=n_input,
                              n_hidden=n_hidden,
                              K=K_block,
                              bn=do_bn,
                              conv=conv))
                n_mix = n_hidden + n_input
            elif i == 1:
                self.moduleList1.append(
                    Kipfblock(n_input=n_hidden,
                              n_hidden=n_hidden2,
                              K=K_block,
                              bn=do_bn,
                              conv=conv))
                n_mix = n_hidden2 + n_hidden
            else:
                self.moduleList1.append(
                    Kipfblock(n_input=n_hidden2,
                              n_hidden=n_hidden3,
                              K=K_block,
                              bn=do_bn,
                              conv=conv))
                n_mix = n_hidden3 + n_hidden2
            if midSkip:
                if i == 0:
                    if conv == 'Cheb':
                        self.skipList1.append(
                            ChebConv(n_mix, n_hidden, K=K_mix))
                    elif conv == 'GCN':
                        self.skipList1.append(GCNConv(n_mix, n_hidden))
                    elif conv == 'SAGE':
                        self.skipList1.append(SAGEConv(n_mix, n_hidden))
                    elif conv == 'Graph':
                        self.skipList1.append(GraphConv(n_mix, n_hidden))
                elif i == 1:
                    if conv == 'Cheb':
                        self.skipList1.append(
                            ChebConv(n_mix, n_hidden2, K=K_mix))
                    elif conv == 'GCN':
                        self.skipList1.append(GCNConv(n_mix, n_hidden2))
                    elif conv == 'SAGE':
                        self.skipList1.append(SAGEConv(n_mix, n_hidden2))
                    elif conv == 'Graph':
                        self.skipList1.append(GraphConv(n_mix, n_hidden2))
                else:
                    if conv == 'Cheb':
                        self.skipList1.append(
                            ChebConv(n_mix, n_hidden3, K=K_mix))
                    elif conv == 'GCN':
                        self.skipList1.append(GCNConv(n_mix, n_hidden3))
                    elif conv == 'SAGE':
                        self.skipList1.append(SAGEConv(n_mix, n_hidden3))
                    elif conv == 'Graph':
                        self.skipList1.append(GraphConv(n_mix, n_hidden3))

        #  Pooled Branch (selected number of convolution blocks)
        if clustering != 'None':
            self.moduleList2 = torch.nn.ModuleList()
            self.skipList2 = torch.nn.ModuleList()
            for i in range(layers):
                if i == 0:
                    self.moduleList2.append(
                        Kipfblock(n_input=n_input,
                                  n_hidden=n_hidden,
                                  K=K_block,
                                  bn=do_bn,
                                  conv=conv))
                    n_mix = n_hidden + n_input
                elif i == 1:
                    self.moduleList2.append(
                        Kipfblock(n_input=n_hidden,
                                  n_hidden=n_hidden2,
                                  K=K_block,
                                  bn=do_bn,
                                  conv=conv))
                    n_mix = n_hidden2 + n_hidden
                else:
                    self.moduleList2.append(
                        Kipfblock(n_input=n_hidden2,
                                  n_hidden=n_hidden3,
                                  K=K_block,
                                  bn=do_bn,
                                  conv=conv))
                    n_mix = n_hidden3 + n_hidden2
                if midSkip:
                    if i == 0:
                        if conv == 'Cheb':
                            self.skipList2.append(
                                ChebConv(n_mix, n_hidden, K=K_mix))
                        elif conv == 'GCN':
                            self.skipList2.append(GCNConv(n_mix, n_hidden))
                        elif conv == 'SAGE':
                            self.skipList2.append(SAGEConv(n_mix, n_hidden))
                        elif conv == 'Graph':
                            self.skipList2.append(GraphConv(n_mix, n_hidden))
                    elif i == 1:
                        if conv == 'Cheb':
                            self.skipList2.append(
                                ChebConv(n_mix, n_hidden2, K=K_mix))
                        elif conv == 'GCN':
                            self.skipList2.append(GCNConv(n_mix, n_hidden2))
                        elif conv == 'SAGE':
                            self.skipList2.append(SAGEConv(n_mix, n_hidden2))
                        elif conv == 'Graph':
                            self.skipList2.append(GraphConv(n_mix, n_hidden2))
                    else:
                        if conv == 'Cheb':
                            self.skipList2.append(
                                ChebConv(n_mix, n_hidden3, K=K_mix))
                        elif conv == 'GCN':
                            self.skipList2.append(GCNConv(n_mix, n_hidden3))
                        elif conv == 'SAGE':
                            self.skipList2.append(SAGEConv(n_mix, n_hidden3))
                        elif conv == 'Graph':
                            self.skipList2.append(GraphConv(n_mix, n_hidden3))

        # Input size for final convolution
        if layers == 1:
            n_mix = n_hidden
        elif layers == 2:
            n_mix = n_hidden2
        else:
            n_mix = n_hidden3

        if clustering != 'None':
            n_mix = n_mix * 2

        if skipconv:
            n_mix = n_mix + n_input

        # Output size depends on heading channel
        if includeHeading:
            n_output = 9
        else:
            n_output = 6

        # Select convolution type
        if conv == 'Cheb':
            self.conv_mix = ChebConv(n_mix, n_output, K=K_mix)
        elif conv == 'GCN':
            self.conv_mix = GCNConv(n_mix, n_output)
        elif conv == 'SAGE':
            self.conv_mix = SAGEConv(n_mix, n_output)
        elif conv == 'Graph':
            self.conv_mix = GraphConv(n_mix, n_output)
Esempio n. 12
0
 def __init__(self, inChannels, outChannels):
     super(GraphConvRNN, self).__init__()
     # self.A = GCNConv(inChannels, outChannels)
     # self.B = GCNConv(outChannels, outChannels)
     self.A = ChebConv(inChannels, outChannels, K=4)
     self.B = ChebConv(outChannels, outChannels, K=4)
Esempio n. 13
0
 def __init__(self, input_dim, out_dim, filter_num, K, dropout=False):
     super(Cheb_Link, self).__init__()
     self.dropout = dropout
     self.conv1 = ChebConv(input_dim, filter_num, K)
     self.conv2 = ChebConv(filter_num, filter_num, K)
     self.linear = nn.Linear(filter_num * 2, out_dim)
Esempio n. 14
0
 def __init__(self, input_dim, hidden_dim, num_classes, num_hops, dropout):
     super(ChebNet, self).__init__()
     self.dropout = dropout
     self.layer_1 = ChebConv(input_dim, hidden_dim, num_hops)
     self.layer_2 = ChebConv(hidden_dim, num_classes, num_hops)
Esempio n. 15
0
 def __init__(self, in_dim, hidden_dim, out_dim):
     super(ChebNet, self).__init__()
     self.conv1 = ChebConv(in_dim, hidden_dim, K=2)
     self.conv2 = ChebConv(hidden_dim, out_dim, K=2)
Esempio n. 16
0
 def __init__(self, num_channels):
     super(Block, self).__init__()
     self.conv1 = ChebConv(num_channels, num_channels, 6)
     self.conv2 = ChebConv(num_channels, num_channels, 6)
     self.conv3 = ChebConv(num_channels, num_channels, 6)
Esempio n. 17
0
    def __init__(
            self,
            in_channels=1,
            hidden_channels=1,
            out_channels=1,
            normalize=False,
            add_loop=False,
            gnn_k=1,
            gnn_type=1,
            jump=None,  #None,max,lstm
            res=False,
            activation='leaky'):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        self.in_channels = in_channels
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k = gnn_k  #number of repitiions of gnn
        self.gnn_type = gnn_type

        self.jump = jump
        if not (jump is None):
            if jump != 'lstm':
                self.jk = JumpingKnowledge(jump)
            else:
                self.jk = JumpingKnowledge(jump, out_channels, gnn_k)
        if activation == 'leaky':
            self.activ = F.leaky_relu
        elif activation == 'elu':
            self.activ = F.elu
        elif activation == 'relu':
            self.activ = F.relu
        self.res = res
        if self.gnn_type in [10, 12] and self.res == True:
            raise Exception('res must be false when gnn_type==10 or 12!')
        if self.k == 1 and self.res == True:
            raise Exception('res must be false when gnn_k==1!')
        if self.k == 1 and not (self.jump is None):
            raise Exception(
                'jumping knowledge only serves for the case where k>1!')
        if gnn_type == 0:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=False)
        if gnn_type == 1:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=True)

        if gnn_type == 2:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 cached=False)
        if gnn_type == 3:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
        if gnn_type == 4:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=2)
        if gnn_type == 5:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=4)
        if gnn_type == 6:
            self.conv1 = GraphConv(in_channels=1,
                                   out_channels=out_channels,
                                   aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels,
                                   out_channels=out_channels,
                                   aggr='add')
        if gnn_type == 7:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 8:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 9:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)
        if gnn_type == 10:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 11:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 12:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type == 14:
            self.conv1 = ARMAConv(in_channels=1,
                                  out_channels=hidden_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
        if gnn_type == 15:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
        if gnn_type == 16:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
        if gnn_type == 17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type == 18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type == 19:
            self.conv1 = RGCNConv(in_channels=1,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
            self.conv2 = RGCNConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=out_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=out_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type == 25:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 26:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 27:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 28:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 29:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
Esempio n. 18
0
 def __init__(self):
     super(ChebNet, self).__init__()
     self.conv1 = ChebConv(num_dim, 16, K=2)
     self.conv2 = ChebConv(16, num_class, K=2)
Esempio n. 19
0
 def __init__(self, K=2):
     super(ChebNet, self).__init__()
     self.conv1 = ChebConv(dataset.num_features, 16, K)
     self.conv2 = ChebConv(16, dataset.num_classes, K)
Esempio n. 20
0
    def __init__(self,
                 in_dim,
                 hidden_dim,
                 out_dim,
                 dropout=0.5,
                 name='gat',
                 heads=8,
                 residual=True):
        super(GNNModelPYG, self).__init__()
        self.dropout = dropout
        self.name = name
        self.residual = None
        if residual:
            if in_dim == out_dim:
                self.residual = Identity()
            else:
                self.residual = Linear(in_dim, out_dim)

        if name == 'gat':
            self.conv1 = GATConv(in_dim,
                                 hidden_dim,
                                 heads=heads,
                                 dropout=dropout)
            self.conv2 = GATConv(hidden_dim * heads,
                                 out_dim,
                                 heads=1,
                                 concat=False,
                                 dropout=dropout)
        elif name == 'gcn':
            self.conv1 = GCNConv(in_dim,
                                 hidden_dim,
                                 cached=True,
                                 normalize=True,
                                 add_self_loops=False)
            self.conv2 = GCNConv(hidden_dim,
                                 out_dim,
                                 cached=True,
                                 normalize=True,
                                 add_self_loops=False)
        elif name == 'cheb':
            self.conv1 = ChebConv(in_dim, hidden_dim, K=2)
            self.conv2 = ChebConv(hidden_dim, out_dim, K=2)
        elif name == 'spline':
            self.conv1 = SplineConv(in_dim, hidden_dim, dim=1, kernel_size=2)
            self.conv2 = SplineConv(hidden_dim, out_dim, dim=1, kernel_size=2)
        elif name == 'gin':
            self.conv1 = GINConv(
                Sequential(Linear(in_dim, hidden_dim), ReLU(),
                           Linear(hidden_dim, hidden_dim)))
            self.conv2 = GINConv(
                Sequential(Linear(hidden_dim, hidden_dim), ReLU(),
                           Linear(hidden_dim, out_dim)))
        elif name == 'unet':
            self.conv1 = GraphUNet(in_dim, hidden_dim, out_dim, depth=3)
        elif name == 'agnn':
            self.lin1 = Linear(in_dim, hidden_dim)
            self.conv1 = AGNNConv(requires_grad=False)
            self.conv2 = AGNNConv(requires_grad=True)
            self.lin2 = Linear(hidden_dim, out_dim)
        else:
            raise NotImplemented("""
            Unknown model name. Choose from gat, gcn, cheb, spline, gin, unet, agnn."""
                                 )
Esempio n. 21
0
 def __init__(self, nfeat, nhid, nclass, dropout,nlayer=1):
     super(ChebGCN1, self).__init__()
     chebgcn_para1 = 2
     self.conv1 = ChebConv(nfeat, nclass,chebgcn_para1)
     self.dropout_p = dropout
    def __init__(self,
                 num_features,
                 num_classes,
                 nh=38,
                 K=6,
                 K_mix=2,
                 inout_skipconn=True,
                 depth=3,
                 p=0.5,
                 bn=False):
        super(Graph_ensnet, self).__init__()
        self.inout_skipconn = inout_skipconn
        self.depth = depth

        self.Kipfblock_list = nn.ModuleList()
        self.Sage_list = nn.ModuleList()
        self.Sg_list = nn.ModuleList()
        #self.skipproject_list = nn.ModuleList()
        #self.norms_list = nn.ModuleList()

        if isinstance(nh, list):
            # if you give every layer a differnt number of channels
            # you need one number of channels for every layer!
            assert len(nh) == depth

        else:
            channels = nh
            nh = []
            for i in range(depth):
                nh.append(channels)

        for i in range(depth):
            if i == 0:
                self.Kipfblock_list.append(
                    Kipfblock(n_input=num_features,
                              n_hidden=nh[0],
                              K=K,
                              p=p,
                              bn=bn))
                self.Sage_list.append(Sageblock(num_features, nh[0]))
                self.Sg_list.append(Sgblock(num_features, nh[0], K=5))
                #self.skipproject_list.append(ChebConv(num_features, nh[0], K=1))
            else:
                self.Kipfblock_list.append(
                    Kipfblock(n_input=nh[i - 1],
                              n_hidden=nh[i],
                              K=K,
                              p=p,
                              bn=bn))
                self.Sage_list.append(Sageblock(nh[i - 1], nh[0]))
                self.Sg_list.append(Sgblock(nh[i - 1], nh[0], K=5))
                #self.skipproject_list.append(ChebConv(nh[i - 1], nh[i], K=1))

        if inout_skipconn:
            self.conv_mix = ChebConv(nh[-1] + num_features,
                                     num_classes,
                                     K=K_mix)
            self.sage_mix = Sageblock(nh[-1] + num_features, num_classes)
            self.sg_mix = Sgblock(nh[-1] + num_features, num_classes, K=5)
        else:
            self.conv_mix = ChebConv(nh[-1], num_classes, K=K_mix)
Esempio n. 23
0
    def __init__(self,
                 in_channels=1,
                 hidden_channels=1,
                 out_channels=1,
                 normalize=False,
                 add_loop=False,
                 gnn_k=1,
                 gnn_type=1):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k=gnn_k#number of repitiions of gnn
        self.gnn_type=gnn_type
        if gnn_type==0:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=False)
        if gnn_type==1:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=True)
        
        if gnn_type==2:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels, cached=True)
        if gnn_type==3:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels,improved=True, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels,improved=True, cached=True)
        if gnn_type==4:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=2)
        if gnn_type==5:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=4)
        if gnn_type==6:
            self.conv1 = GraphConv(in_channels=1, out_channels=hidden_channels,aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels, out_channels=out_channels,aggr='add')
        if gnn_type==7:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=3, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=3, aggr='add', bias=True)
        if gnn_type==8:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=7, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=7, aggr='add', bias=True)
        if gnn_type==9:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
        if gnn_type==10:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==11:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
        
        if gnn_type==12:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type==14:
            self.conv1 = ARMAConv(in_channels=1, out_channel=hidden_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels, out_channel=out_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
        if gnn_type==15:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=1, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=1, cached=True, bias=True)
        if gnn_type==16:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=3, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=3, cached=True, bias=True)
        if gnn_type==17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type==18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type==19:
            self.conv1 =RGCNConv(in_channels=1, out_channels=hidden_channels, num_relations=3, num_bases=2, bias=True)
            self.conv2 =RGCNConv(in_channels=hidden_channels, out_channels=out_channels, num_relations=3, num_bases=2, bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type==25:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==26:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==27:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==28:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
        if gnn_type==29:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
Esempio n. 24
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = ChebConv(80, 40, K=2)
     self.conv2 = ChebConv(40, 20, K=2)
     self.fc = nn.Linear(20, 5)
Esempio n. 25
0
 def __init__(self, n_features, embed_dim=128, out_features=1):
     super(Net2, self).__init__()
     self.conv1 = ChebConv(n_features, embed_dim, K=2, cached=False)
     self.conv2 = ChebConv(embed_dim, embed_dim, K=2, cached=False)
Esempio n. 26
0
 def __init__(self, dataset, args):
     super(ChebNet, self).__init__()
     self.conv1 = ChebConv(dataset.num_features, 32, K=2)
     self.conv2 = ChebConv(32, dataset.num_classes, K=2)
     self.dropout = args.dropout