Пример #1
0
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(
            dataset.num_features,
            16,
            cached=True,
        )
        self.conv2 = GCNConv(
            16,
            dataset.num_classes,
            cached=True,
        )
        # self.conv1 = ChebConv(data.num_features, 16, K=2)
        # self.conv2 = ChebConv(16, data.num_features, K=2)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1)
Пример #2
0
class NetGCN(torch.nn.Module):
    def __init__(self, num_features, num_classes, dim=10):
        super(NetGCN, self).__init__()

        self.conv1 = GCNConv(num_features,
                             dim,
                             normalize=False,
                             cached=False,
                             bias=False)
        self.conv2 = GCNConv(dim,
                             dim,
                             normalize=False,
                             cached=False,
                             bias=False)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

        self.fc1 = Linear(dim, 1, bias=False)

    def forward(self, x, edge_index, batch):
        x = F.relu(self.conv1(x, edge_index))
        x = self.conv2(x, edge_index)

        x = global_mean_pool(x, batch)
        x = self.fc1(x)
        return torch.sigmoid(x)
Пример #3
0
class Net(torch.nn.Module):
    def __init__(self, dataset, device):
        super(Net, self).__init__()
        self.device = device
        self.dataset = dataset  # 数据集加载
        # 线性特征提取
        self.fc1 = nn.Linear(dataset.num_features, 128)  # 节点特征MLP提取
        # 卷积操作
        self.conv1 = GCNConv(128, 128)
        self.conv2 = GCNConv(128, 128)
        # 反卷积操作
        self.dconv1 = GCNConv(128, 128)
        self.dconv2 = GCNConv(128, 128)
        # 线性融合, 节点特征的降维融合
        self.fc2 = nn.Linear(128, dataset.num_features)

        # 反向传播更新的参数
        self.w1 = self.fc1.parameters()
        self.conv1_reg_params1 = self.conv1.parameters()
        self.conv2_reg_params1 = self.conv2.parameters()
        self.dconv1_reg_params2 = self.dconv1.parameters()
        self.dconv2_reg_params2 = self.dconv2.parameters()
        self.w2 = self.fc2.parameters()

    def forward(self):
        pre_x = []  # 关键属性的提取
        normal_x = []  # 每层中节点的分别嵌入
        # 图数据中每个数据集分别卷积
        for data in self.dataset:
            x, edge_index = data.x, data.edge_index
            x = self.fc1(x)
            pre_x.append(x)
            x = self.conv1(x, edge_index)
            x = self.conv2(x, edge_index)
            normal_x.append(x)

        # 初始化变量
        graph_number = len(normal_x)
        graph_id = list(range(graph_number))
        # 不同层中节点嵌入的混合
        xs = []
        for i in graph_id:
            edge_index = self.dataset[i].edge_index
            sum_x = torch.zeros(normal_x[i].size())
            for j in graph_id:
                if i != j:
                    sum_x += normal_x[j]
            # 反卷积操作,实际公式和真实的卷积相同
            normal_x[i] = self.dconv1(sum_x, edge_index)
            normal_x[i] = self.dconv2(normal_x[i], edge_index)
            xs.append(normal_x[i])

        # 节点的多特征融合部分需要进一步论证,需要一种比较好的方法
        fin_feat = xs[0]
        for feat_index in range(1, len(xs) - 1):
            fin_feat = fin_feat * xs[feat_index]
        # fin_feat = F.log_softmax(fin_feat)
        fin_feat = F.sigmoid(fin_feat)
        Loss_embedding = self.fc2(fin_feat)
        return pre_x, xs, fin_feat, Loss_embedding
Пример #4
0
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(
            num_features,
            16,
            cached=True,
            #  normalize=not args.use_gdc
        )
        self.conv2 = GCNConv(
            16,
            dataset.num_classes,
            cached=True,
            #  normalize=not args.use_gdc
        )
        # self.conv1 = ChebConv(data.num_features, 16, K=2)
        # self.conv2 = ChebConv(16, data.num_features, K=2)
        self.mask, self.mask_bias = construct_edge_mask(data.x.shape[0],
                                                        init_strategy='const')
        self.feat_mask = construct_feat_mask(data.x.shape[0],
                                             num_features,
                                             init_strategy="normal")
        # mask, feat_mask = mask.to(device), feat_mask.to(device)
        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self, x, edge_index, edge_weight):
        # x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        self.feat_mask = nn.Parameter(torch.sigmoid(self.feat_mask))
        x *= self.feat_mask
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1), self.feat_mask
Пример #5
0
class GNN_deep(nn.Module):
    def __init__(self, args):
        # what to do with use_gdc

        super(GNN, self).__init__()
        self.args = args
        self.d = args.n_node_features
        num_nodes = args.n_nodes

        self.conv1 = GCNConv(self.d, 32, normalize=True)
        self.conv2 = GCNConv(32, 32, normalize=True)
        self.conv3 = GCNConv(32, 16, normalize=True)
        self.conv4 = GCNConv(16, 1, normalize=True)
        # then, we have to return a results for pi and v, respectively
        self.activ1 = nn.Linear(16, num_nodes)
        self.activ2 = nn.Linear(16, 1)
        # define parameters
        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self, data):
        # print("analyzing data")
        # print(data.x)
        # print(data.edge_index)
        # print(data.weight)
        # input()

        x, edges, weights = data.x, data.edge_index, data.weight
        weights = weights.float()

        # print(weights)
        # print(weights[0])
        # print(weights[0].item())
        # print(type(weights[0].item()))
        # input()

        x = self.conv1(x, edges, weights)
        x = F.relu(x)
        x = F.dropout(x)
        x = self.conv2(x, edges, weights)
        x = F.relu(x)
        x = F.dropout(x)
        x = self.conv3(x, edges, weights)
        x = F.relu(x)

        c = self.conv4(x, edges, weights)

        #print(c)
        #input()

        # choice = torch.masked_select(c.squeeze(), choices)
        choice = F.softmax(c, dim=0).view(self.args.n_nodes)

        v = global_mean_pool(x, torch.zeros(data.num_nodes, dtype=torch.long).to(self.args.device))
        value = self.activ2(v)

        # print("value: ", value.squeeze())

        return choice, value.squeeze()
Пример #6
0
class Attacker(torch.nn.Module):
    def __init__(self):
        super(Attacker, self).__init__()  # 调用父类初始化
        self.attack = False
        self.conv1 = GCNConv(CONFIG.FeatureLen(), 180)
        self.conv2 = GCNConv(180, 120)
        self.lin1 = torch.nn.Linear(120, CONFIG.ClassNum())
        # 这次反而需要调整的参数是输入
        self.X = torch.tensor([1], dtype=torch.float)
        self.edges = torch.tensor([[], []], dtype=torch.long)

    def loadX(self, X: torch.tensor):
        self.X = X
        if self.X.requires_grad is False:
            self.X.requires_grad = True
        return

    def loadEdges(self, edges: torch.tensor):
        self.edges = edges
        self.attack = True
        return

    def forward(self, data: Data):
        """
        :param data: 传入数据集
        :return:
        """
        if self.attack is False:
            edges = data.edge_index
        else:
            edges = torch.cat((data.edge_index, self.edges), 1)  # 横向叠加
        edges, _ = torch_geometric.utils.add_remaining_self_loops(edges)
        # print(edges)
        if self.attack is False:
            x = data.x
        else:
            x = torch.cat((data.x, self.X), 0)  # 0代表纵向叠加
        x = self.conv1(x, edges)
        x = F.leaky_relu(x)
        x = self.conv2(x, edges)
        x = F.leaky_relu(x)
        x = self.lin1(x)
        x = F.log_softmax(x, dim=1)
        return x

    def freeze(self):
        for parameter in self.conv1.parameters():
            parameter.requires_grad = False
        for parameter in self.conv2.parameters():
            parameter.requires_grad = False
        for parameter in self.lin1.parameters():
            parameter.requires_grad = False

    def get_x_grad(self) -> torch.tensor:
        return self.X.grad
Пример #7
0
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(data.in_feats, 16)
        self.conv2 = GCNConv(16, data.n_classes)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self):
        x, adj = data.x, data.adj
        x = F.relu(self.conv1(x, adj))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, adj)
        return F.log_softmax(x, dim=1)
Пример #8
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 hiddens=[16],
                 activations=['relu'],
                 dropout=0.5,
                 weight_decay=5e-4,
                 lr=0.01,
                 use_bias=True):

        super().__init__()

        paras = []
        acts = []
        layers = ModuleList()

        # use ModuleList to create layers with different size
        inc = in_channels
        for hidden, activation in zip(hiddens, activations):
            layer = GCNConv(inc,
                            hidden,
                            cached=True,
                            bias=use_bias,
                            normalize=False)
            layers.append(layer)
            paras.append(
                dict(params=layer.parameters(), weight_decay=weight_decay))
            acts.append(get_activation(activation))
            inc = hidden

        layer = GCNConv(inc,
                        out_channels,
                        cached=True,
                        bias=use_bias,
                        normalize=False)
        layers.append(layer)
        # do not use weight_decay in the final layer
        paras.append(dict(params=layer.parameters(), weight_decay=0.))

        self.acts = acts
        self.layers = layers
        self.dropout = Dropout(dropout)
        self.compile(loss=torch.nn.CrossEntropyLoss(),
                     optimizer=optim.Adam(paras, lr=lr),
                     metrics=[Accuracy()])
Пример #9
0
class GCN(torch.nn.Module):
    def __init__(self, dataset, hidden):
        super(GCN, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, hidden)
        self.conv2 = GCNConv(hidden, dataset.num_classes)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()


    def reset_parameters(self):
        self.conv1.reset_parameters()
        self.conv2.reset_parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1)
Пример #10
0
class GraphNet(torch.nn.Module):
    def __init__(self, num_features, num_classes):
        super(GraphNet, self).__init__()
        self.conv1 = GCNConv(num_features, 16, cached=True)
        self.conv2 = GCNConv(16, num_classes, cached=True)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1)

    def get_hidden_embeddings(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        return x
Пример #11
0
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(dataset.num_features,
                             16,
                             cached=True,
                             normalize=not args.use_gdc)
        self.conv2 = GCNConv(16,
                             dataset.num_classes,
                             cached=True,
                             normalize=not args.use_gdc)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self):
        x, edge_index = data.x, data.edge_index
        x = F.relu(self.conv1(x, edge_index))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index)
        return F.log_softmax(x, dim=1)
Пример #12
0
class GCNNet(torch.nn.Module):
    def __init__(self, output_shape, args):
        torch.nn.Module.__init__(self)
        self.conv1 = GCNConv(dataset.num_features,
                             16,
                             cached=True,
                             normalize=not args.use_gdc)
        self.conv2 = GCNConv(16,
                             dataset.num_classes,
                             cached=True,
                             normalize=not args.use_gdc)
        # self.conv1 = ChebConv(data.num_features, 16, K=2)
        # self.conv2 = ChebConv(16, data.num_features, K=2)

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = torch.nn.functional.relu(self.conv1(x, edge_index, edge_weight))
        x = torch.nn.functional.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return torch.nn.functional.log_softmax(x, dim=1)
Пример #13
0
class DFAGraphNet(torch.nn.Module):
    def __init__(self, num_features, num_classes, training_method='dfa'):
        super(DFAGraphNet, self).__init__()
        self.conv1 = GCNConv(num_features, 16, cached=True)
        self.dfa_1 = DFALayer()
        self.conv2 = GCNConv(16, num_classes, cached=True)

        self.dfa = DFA(dfa_layers=[self.dfa_1], no_training=training_method != 'dfa')

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()

    def forward(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, p=0.1, training=self.training)
        x = self.dfa_1(x)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(self.dfa(x), dim=1)

    def get_hidden_embeddings(self, data):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        return x
class Net(torch.nn.Module):
    def __init__(self, lrate):
        super(Net, self).__init__()
        self.conv1 = GCNConv(dataset.num_features,
                             16,
                             cached=True,
                             normalize=not args.use_gdc)
        self.conv2 = GCNConv(16,
                             dataset.num_classes,
                             cached=True,
                             normalize=not args.use_gdc)
        #self.optimizer = torch.optim.Adam(self.parameters(), lr=lrate, weight_decay=0.0005)
        self.optimizer = torch.optim.Adam([
            dict(params=self.conv1.parameters(), weight_decay=5e-4),
            dict(params=self.conv2.parameters(), weight_decay=0)
        ],
                                          lr=lrate)
        #self.optimizer = torch.optim.SGD(self.parameters(), lr=lrate)
        # self.conv1 = ChebConv(data.num_features, 16, K=2)
        # self.conv2 = ChebConv(16, data.num_features, K=2)

    def forward(self):
        x, edge_index, edge_weight = graph_data.x, graph_data.edge_index, graph_data.edge_attr
        x = F.relu(self.conv1(x, edge_index, edge_weight))
        x = F.dropout(x, training=self.training)
        x = self.conv2(x, edge_index, edge_weight)
        return F.log_softmax(x, dim=1)

    def evaluate_proposal(self, data, w=None):
        self.los = 0
        if w is not None:
            self.loadparameters(w)
        if (data == 'train'):
            prob = copy.deepcopy(self.forward().detach())
            for _, mask in graph_data('train_mask'):
                y_pred = prob[mask].max(1)[1]
            loss = F.nll_loss(self.forward()[graph_data.train_mask],
                              graph_data.y[graph_data.train_mask])
            self.los += loss
            prob = prob[mask]
        else:
            prob = copy.deepcopy(self.forward().detach())
            for _, mask in graph_data('test_mask'):
                y_pred = prob[mask].max(1)[1]
            loss = F.nll_loss(self.forward()[graph_data.test_mask],
                              graph_data.y[graph_data.test_mask])
            self.los += loss
            prob = prob[mask]
        return y_pred, prob

    #  One-Step Gradient

    def langevin_gradient(self, w=None):
        if w is not None:
            self.loadparameters(w)
        self.los = 0
        self.optimizer.zero_grad()
        loss = F.nll_loss(self.forward()[graph_data.train_mask],
                          graph_data.y[graph_data.train_mask])
        loss.backward()
        self.optimizer.step()
        self.los += copy.deepcopy(loss.item())
        return copy.deepcopy(self.state_dict())

    #Retrieve Weights and Biases from model
    def getparameters(self, w=None):
        l = np.array([1, 2])
        dic = {}
        if w is None:
            dic = self.state_dict()
        else:
            dic = copy.deepcopy(w)
        for name in sorted(dic.keys()):
            l = np.concatenate(
                (l, np.array(copy.deepcopy(dic[name])).reshape(-1)), axis=None)
        l = l[2:]
        return l

    #Convert Numpy array to Ordered Dict (Used for Model parameters)
    def dictfromlist(self, param):
        dic = {}
        i = 0
        for name in sorted(self.state_dict().keys()):
            dic[name] = torch.FloatTensor(
                param[i:i + (self.state_dict()[name]).view(-1).shape[0]]).view(
                    self.state_dict()[name].shape)
            i += (self.state_dict()[name]).view(-1).shape[0]
        # self.loadparameters(dic)
        return dic

    def loadparameters(self, param):
        self.load_state_dict(param)

    #Add Noise To Parameters
    def addnoiseandcopy(self, mea, std_dev):
        dic = {}
        w = self.state_dict()
        for name in (w.keys()):
            dic[name] = copy.deepcopy(w[name]) + torch.zeros(
                w[name].size()).normal_(mean=mea, std=std_dev)
        self.loadparameters(dic)
        return dic
Пример #15
0
class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GCNConv(dataset.num_features, 64, cached=True,
                             )
        self.conv2 = GCNConv(64, dataset.num_classes, cached=True,
                            )

        self.linear = torch.nn.Linear(64, dataset.num_classes)

        self.conv1_deepwalk = GCNConv(64, 32, cached=True,
                             )
        self.conv2_deepwalk = GCNConv(32, dataset.num_classes, cached=True,
                            )

        self.reg_params = self.conv1.parameters()
        self.non_reg_params = self.conv2.parameters()
        
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        
        index = 0
        with open('gcn.adjlist', 'w') as f:
            for i in range(x.shape[0]):
                f.write(str(i))
                for j in range(index, edge_index.shape[1]):
                    if edge_index[0][j].item() == i:
                        f.write(' '+ str(edge_index[1][j].item()))
                    else:
                        index = j
                        break
                f.write('\n')
        
        self.dat = get_embedding(edge_index, x.shape[0])
        self.dat = torch.from_numpy(self.dat).float().cuda()
 
        tensor_index=torch.Tensor(5000,x.shape[0], 64)
        tensor_neighbor_index=torch.Tensor(5000,x.shape[0], 64)

        edgewight = []
        sim_list = []
        for index, row in enumerate(self.dat):
            row = torch.squeeze(row, 0)
            row = row.repeat(x.shape[0], 1)
   
            if index < 5000:
                tensor_index[index]=row
                tensor_neighbor_index[index]=self.dat
            else:
                if index%5000 == 0:
                    sim = torch.cosine_similarity(tensor_index, tensor_neighbor_index, dim=-1)
                    sim_list.append(sim)
                tensor_index[index-5000*int(index/5000)]=row
                tensor_neighbor_index[index-5000*int(index/5000)]=self.dat
                
        if len(sim_list) <= 0:
            sim_ = torch.cosine_similarity(tensor_index, tensor_neighbor_index, dim=-1)
            sim = sim_[:x.shape[0]]
        else:
            sim = torch.cosine_similarity(tensor_index, tensor_neighbor_index, dim=-1)
            sim_list.append(sim)
            sim_ =  torch.cat(sim_list, dim=0)
            sim = sim_[:x.shape[0]]

        index = 0
        adlist = [] 

        for i in range(x.shape[0]):
            lists = []
            for j in range(index, edge_index.shape[1]):
                if edge_index[0][j].item() == i:
                    lists.append(edge_index[1][j].item())
                else:
                    index = j
                    break
            adlist.append(lists)
        mask = torch.ones(sim.size()[0])
        mask = 1 - mask.diag()
        #cora 0.86
        #citeseer 0.9
        #pubmed 1
        sim_vec = torch.nonzero((sim > 0.9).float()*mask)
        for k in sim_vec:
            node_index = k[0].item()
            node_neighbor_index = k[1].item()
            if node_neighbor_index not in adlist[node_index]:
                adlist[node_index].append(node_neighbor_index)
        node_total = []
        neighbor_total = []
        for i in range(len(adlist)):
            for j in range(len(adlist[i])):
                node_total.append(i)
                neighbor_total.append(adlist[i][j])

                
        self.edge_index_new = torch.Tensor(2, len(node_total)).long()
        
        self.edge_index_new[0] = torch.from_numpy(np.array(node_total))
        self.edge_index_new[1] = torch.from_numpy(np.array(neighbor_total))
        self.edge_index_new = self.edge_index_new.cuda()
        
        
    def forward(self):
        x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
        x_deepwalk = self.dat.float().cuda()
        x = F.relu(self.conv1(x, self.edge_index_new, edge_weight))
        x_deepwalk  = F.relu(self.conv1_deepwalk(x_deepwalk , self.edge_index_new, edge_weight))
        x = F.dropout(x, training=self.training)
        x_deepwalk  = F.dropout(x_deepwalk , training=self.training)
        x = self.conv2(x, self.edge_index_new, edge_weight)
        x_deepwalk  = self.conv2_deepwalk(x_deepwalk , self.edge_index_new, edge_weight)
        x = 0.1 * x_deepwalk  + 0.2 * x
        return F.log_softmax(x, dim=1)
Пример #16
0
class GCNResnet(nn.Module):
    def __init__(self, model, num_AU, num_classes, t=0, adj_file=None):

        super(GCNResnet, self).__init__()
        # For r3d, i.e.,3D cnn, output shape is (512,1)
        #self.features = nn.Sequential(
        #   model.stem,
        #  model.layer1,
        # model.layer2,
        # model.layer3,
        #model.layer4,
        #model.avgpool,
        #)

        # For p3d
        self.features = P3D199(pretrained=False, num_classes=18)

        ##################
        #self.num_classes = num_classes
        self.num_AU = num_AU
        #self.pooling = nn.MaxPool2d(14, 14)

        #self.gc1 = GraphConvolution(num_AU, 512) #1024
        #self.gc1_ = GraphConvolution(1, 512)
        #self.gc2 = GraphConvolution(1024, 512)# origin
        #self.gc2 = GraphConvolution(512, 19) #for p3d
        #self.gc2_ = GraphConvolution(512, 1)
        #self.relu = nn.LeakyReLU(0.2)
        #self.dropout = nn.Dropout(0.3)
        #self.fc1 = nn.Linear(19, 15)
        #self.fc2 = nn.Linear(15, num_classes)
        #_adj = gen_A(num_AU, t, adj_file)
        import pickle
        self.adj_file = pickle.load(open(adj_file, 'rb'), encoding='utf-8')
        #print(self.adj_file)
        #assert 0
        #self.A = Parameter(torch.from_numpy(adj_file).float()) #(19,19)
        #self.graph_pool = SAGPooling(in_channels=1, ratio=0.5, GNN=GraphConv)

        #####Adding SAGPool###########
        self.in_channels = 1  #x:(19,1)
        self.nhid = 32
        self.num_classes = 7  #args.num_classes
        self.pooling_ratio = 0.5  # 0.5
        self.dropout_ratio = 0.5

        self.conv1 = GCNConv(in_channels=self.in_channels,
                             out_channels=self.nhid)
        self.score_layer = GCNConv(self.nhid, 1)
        self.pool1 = SAGPool(self.nhid * 3, ratio=self.pooling_ratio)
        self.conv2 = GCNConv(self.nhid, self.nhid)
        #self.pool2 = SAGPool(self.nhid, ratio=self.pooling_ratio)
        self.conv3 = GCNConv(self.nhid, self.nhid)
        #self.pool3 = SAGPool(self.nhid, ratio=self.pooling_ratio)

        self.lin1 = nn.Linear(self.nhid * 3 * 2 * 2,
                              self.nhid)  #(self.nhid*4, self.nhid)
        self.lin2 = nn.Linear(self.nhid,
                              self.nhid // 2)  #(self.nhid, self.nhid//2)
        self.lin3 = nn.Linear(self.nhid // 2,
                              3)  #(self.nhid//2, self.num_classes)

    def forward(self, feature, inp):
        feature = self.features(feature)  # as input to gcn
        #print("p3d model output size:",feature.size())#torch.Size([1, 19])!!
        feature_ = feature.transpose(0, 1)
        #print("p3d model output size after transpose:",feature_.size())#torch.Size([19,1])!!

        #gcn
        #adj = gen_adj(self.A).detach() #(19,19)

        # COO format
        A_coo = coo_matrix(self.adj_file)
        #print(A_coo)
        #print(A_coo.row)
        #print(A_coo.col)
        a = []
        a.append(A_coo.row)
        a.append(A_coo.col)
        adj = np.asarray(a)
        #print(adj)

        edge_index = torch.from_numpy(adj).long().cuda()

        # 3 GCN layers
        gcn1 = F.relu(self.conv1(feature_, edge_index))
        #print(edge_index.size())
        #print(gcn1.size())
        #assert 0
        gcn2 = F.relu(self.conv2(gcn1, edge_index))
        gcn3 = F.relu(self.conv3(gcn2, edge_index))
        gcn_feature = torch.cat((gcn1, gcn2, gcn3), dim=1)
        #print("gcn_feature:",gcn_feature.size()) #(19,384)
        # batch setting
        #batch = np.arange(19)
        batch = np.ones((18))
        batch = torch.from_numpy(batch).long().cuda()
        #SAGPooling layer
        x, edge_index, _, batch, _ = self.pool1(gcn_feature, edge_index, None,
                                                batch)
        #print("pool1:",x.size())#(10,384)

        #readout layer
        readout = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)
        #print("readout:",readout.size()) #(2,768)
        x = readout.view(1, -1)  # convert (2,768) to (1,-1)
        fc1 = F.relu(self.lin1(x))
        #print("fc1:",fc1.size())#(2,128)
        #x = F.dropout(x, p=self.dropout_ratio, training=self.training)
        fc2 = F.relu(self.lin2(fc1))

        x = F.log_softmax(self.lin3(fc2), dim=-1)

        return x, feature

    def get_config_optim(self, lr, lrp):
        return [
            {
                'params': self.features.parameters(),
                'lr': lr * lrp
            },
            {
                'params': self.conv1.parameters(),
                'lr': lr
            },
            {
                'params': self.conv2.parameters(),
                'lr': lr
            },
            {
                'params': self.conv3.parameters(),
                'lr': lr
            },
            {
                'params': self.pool1.parameters(),
                'lr': lr
            },
            {
                'params': self.score_layer.parameters(),
                'lr': lr
            },
            {
                'params': self.lin1.parameters(),
                'lr': lr
            },
            {
                'params': self.lin2.parameters(),
                'lr': lr
            },
            {
                'params': self.lin3.parameters(),
                'lr': lr
            },
        ]