Beispiel #1
0
    def __init__(self, input_dim, hidden_dim, output_dim, dropout):
        super(GCN, self).__init__()

        self.gc1 = GraphConvolution(input_dim, hidden_dim)
        self.activate = nn.LeakyReLU(0.2, inplace=True)
        self.gc2 = GraphConvolution(hidden_dim, output_dim)
        self.dropout = dropout
Beispiel #2
0
class GNNp(nn.Module):
    def __init__(self, opt, adj):
        super(GNNp, self).__init__()
        self.opt = opt
        self.adj = adj

        opt_ = dict([('in', opt['num_class']), ('out', opt['hidden_dim'])])
        self.m1 = GraphConvolution(opt_, adj)

        opt_ = dict([('in', opt['hidden_dim']), ('out', opt['num_class'])])
        self.m2 = GraphConvolution(opt_, adj)

        if opt['cuda']:
            self.cuda()

    def reset(self):
        self.m1.reset_parameters()
        self.m2.reset_parameters()

    def forward(self, x):
        x = F.dropout(x, self.opt['input_dropout'], training=self.training)
        x = self.m1(x)
        x = F.relu(x)
        x = F.dropout(x, self.opt['dropout'], training=self.training)
        x = self.m2(x)
        return x
Beispiel #3
0
    def __init__(self, input_dim, output_dim):
        super(GCN, self).__init__()

        self.input_dim = input_dim  # 1433
        self.output_dim = output_dim

        print('input dim:', input_dim)
        print('output dim:', output_dim)
        # print('num_features_nonzero:', num_features_nonzero)

        # self.layers = nn.Sequential(GraphConvolution(self.input_dim, args.hidden, num_features_nonzero,
        self.layers = nn.Sequential(
            GraphConvolution(self.input_dim,
                             args.hidden,
                             activation=F.relu,
                             dropout=False,
                             is_sparse_inputs=True),

            # GraphConvolution(args.hidden, output_dim, num_features_nonzero,
            GraphConvolution(args.hidden,
                             args.hidden1,
                             activation=F.relu,
                             dropout=False,
                             is_sparse_inputs=False),
            GraphConvolution(args.hidden1,
                             self.output_dim,
                             activation=lambda x: x,
                             dropout=args.dropout,
                             is_sparse_inputs=False),
        )
Beispiel #4
0
    def __init__(self, input_dim, output_dim, num_features_nonzero):
        super(GCN, self).__init__()

        self.input_dim = input_dim  # 1433
        self.output_dim = output_dim

        print('input dim:', input_dim)
        print('output dim:', output_dim)
        print('num_features_nonzero:', num_features_nonzero)

        self.layers = nn.Sequential(
            GraphConvolution(self.input_dim,
                             args.hidden,
                             num_features_nonzero,
                             activation=F.relu,
                             dropout=0.5,
                             is_sparse_inputs=True),
            GraphConvolution(args.hidden,
                             32,
                             num_features_nonzero,
                             activation=F.relu,
                             dropout=0.5,
                             is_sparse_inputs=False),
        )

        self.out = nn.Linear(32, output_dim)
        self.dropout = nn.Dropout(0.5)
Beispiel #5
0
    def __init__(self, nfeat, nhid, nclass, dropout, nembed):
        super(GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nembed)
        self.lstm = nn.LSTM(nfeat, nembed, batch_first=True)
        self.flayer1 = nn.Sequential(nn.Linear(nembed, 8), nn.ReLU(True))
        self.flayer2 = nn.Sequential(nn.Linear(8, nclass))
        self.dropout = dropout
Beispiel #6
0
    def __init__(self, nfeat, nnode, nhid, dropout):
        super(GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nhid)
        self.dropout = dropout
        self.rd = Readout(nhid)
        self.fc1 = nn.Linear(nhid, 1)
        nn.init.kaiming_normal_(self.fc1.weight)
Beispiel #7
0
    def __init__(self, sequence_length, num_feature, seed, activation='sigmoid', hidden_layer=10, hidden_feature=10, output_normalize='softmax'):
        super().__init__()
        self.save_hyperparameters()

        num_input_feature = sequence_length * num_feature
        self.adjacency = nn.Parameter(torch.rand(200, 200) + torch.eye(200), True)
        layers = [GraphConvolution(num_input_feature, hidden_feature)]
        for i in range(hidden_layer):
            layers.append(GraphConvolution(hidden_feature, hidden_feature))
        layers.append(GraphConvolution(hidden_feature, 1))
        self.layers = nn.ModuleList(layers)
        self.batch_norms = nn.ModuleList([nn.BatchNorm1d(200) for i in range(hidden_layer - 1)])
Beispiel #8
0
    def __init__(self, opt, adj):
        super(GNNp, self).__init__()
        self.opt = opt
        self.adj = adj

        opt_ = dict([('in', opt['num_class']), ('out', opt['hidden_dim'])])
        self.m1 = GraphConvolution(opt_, adj)

        opt_ = dict([('in', opt['hidden_dim']), ('out', opt['num_class'])])
        self.m2 = GraphConvolution(opt_, adj)

        if opt['cuda']:
            self.cuda()
Beispiel #9
0
    def __init__(self, opt, adj):
        super(GNNq, self).__init__()
        self.opt = opt
        self.adj = adj

        opt_ = dict([('in', opt['num_feature']), ('out', opt['hidden_dim'])])
        self.m1 = GraphConvolution(opt_, adj)

        opt_ = dict([('in', opt['hidden_dim']), ('out', opt['num_class'])])
        self.m2 = GraphConvolution(opt_, adj)
        
        #opt_ = dict([('in', opt['hidden_dim']), ('out', opt['num_class'])])
        #self.m3 = GraphConvolution(opt_, adj)### used for auxiliary network. it will be used a fully-connected layer. for ease of implementation I used GCN layer.

        if opt['cuda']:
            self.cuda()
Beispiel #10
0
    def __init__(self):
        super(GCN, self).__init__()

        # self.input_dim = input_dim # 1433
        # self.output_dim = output_dim

        # self.weight = torch.nn.Linear(249, 249).cuda()
        # #self.adj = torch.nn.Linear(249,249).cuda()
        # self.dropout = 0
        # self.activation = F.relu
        # self.adj = nn.Parameter(torch.randn(6, 6)).cuda()

        self.layers = nn.Sequential(
            GraphConvolution(),
            #GraphConvolution(),
            GraphConvolution(),
        )
Beispiel #11
0
    def _build(self):
        self.layers.append(
            GraphConvolution(input_dim=self.input_dim,
                             output_dim=FLAGS.hidden1,
                             placeholders=self.placeholders,
                             act=tf.nn.relu,
                             dropout=True,
                             sparse_inputs=True,
                             logging=self.logging))

        self.layers.append(
            GraphConvolution(input_dim=FLAGS.hidden1,
                             output_dim=self.output_dim,
                             placeholders=self.placeholders,
                             act=lambda x: x,
                             dropout=True,
                             logging=self.logging))
Beispiel #12
0
    def __init__(self, input_dim, hiddens_dim, output_dim, dropout):
        super(GCN, self).__init__()
        self.dropout = dropout
        self.feature_size = [input_dim] + hiddens_dim + [output_dim]

        self.layers = nn.ModuleList()
        for i in range(len(self.feature_size)-1):
            self.layers.append(GraphConvolution(input_dim=self.feature_size[i],
                                                output_dim=self.feature_size[i+1]))
Beispiel #13
0
    def __init__(self, opt, adj):
        super(GNN_mix, self).__init__()
        self.opt = opt
        self.adj = adj

        opt_ = dict([('in', opt['num_feature']), ('out', 1000)])
        self.m1 = GraphConvolution(opt_, adj)

        #self.linear_m1_1 = nn.Linear(1000,500)
        #self.linear_m1_2 = nn.Linear(50,opt['num_class'] )

        opt_ = dict([('in', 1000), ('out', 500)])
        self.m2 = GraphConvolution(opt_, adj)

        #self.linear_m2_1 = nn.Linear(50,20)
        #self.linear_m2_2 = nn.Linear(20,opt['num_class'] )

        opt_ = dict([('in', 500), ('out', 100)])
        self.m3 = GraphConvolution(opt_, adj)

        #self.linear_m3_1 = nn.Linear(10,5 )
        #self.linear_m3_2 = nn.Linear(5,opt['num_class'] )

        opt_ = dict([('in', 100), ('out', opt['num_class'])])
        self.m4 = GraphConvolution(opt_, adj)
        """
        opt_ = dict([('in', opt['num_feature']), ('out', opt['hidden_dim'])])
        self.m1 = GraphConvolution(opt_, adj)

        opt_ = dict([('in', opt['hidden_dim']), ('out', opt['num_class'])])
        self.m2 = GraphConvolution(opt_, adj)
        """
        if opt['cuda']:
            self.cuda()
Beispiel #14
0
    def __init__(self, embedding_dim, hidden_dim, vocab_size, output_dim):
        super(LSTM_GCN, self).__init__()
        self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
        self.hidden_dim = hidden_dim  # LSTM隐层神经元数目
        # The LSTM takes word embeddings as inputs, and outputs hidden states
        # with dimensionality hidden_dim.
        self.lstm = nn.LSTM(embedding_dim, hidden_dim)

        # self.input_dim = input_dim # 1433
        self.output_dim = output_dim

        # print('input dim:', input_dim)
        print('output dim:', output_dim)
        # print('num_features_nonzero:', num_features_nonzero)

        # self.layers = nn.Sequential(GraphConvolution(self.input_dim, args.hidden, num_features_nonzero,
        self.layers = nn.Sequential(
            GraphConvolution(
                self.hidden_dim,
                args.hidden,
                activation=F.relu,
                dropout=False,
                # is_sparse_inputs=True),
                is_sparse_inputs=False),

            # # GraphConvolution(args.hidden, output_dim, num_features_nonzero,
            # GraphConvolution(args.hidden, args.hidden1,
            #                  activation=F.relu,
            #                  dropout=False,
            #                  is_sparse_inputs=False),
            GraphConvolution(args.hidden,
                             self.output_dim,
                             activation=lambda x: x,
                             dropout=args.dropout,
                             is_sparse_inputs=False),
        )
Beispiel #15
0
	def __init__(self, adj, num_v, num_classes, gc_dims, sc_dims, feat_dims, dropout=0.5):
		super(GGCN, self).__init__()

		terminal_cnt = 5
		actor_cnt = 1
		adj = adj + torch.eye(adj.size(0)).to(adj).detach()
		ident = torch.eye(adj.size(0)).to(adj)
		zeros = torch.zeros(adj.size(0), adj.size(1)).to(adj)
		self.adj = torch.cat([torch.cat([adj, ident, zeros], 1),
							  torch.cat([ident, adj, ident], 1),
							  torch.cat([zeros, ident, adj], 1)], 0).float()
		self.terminal = nn.Parameter(torch.randn(terminal_cnt, actor_cnt, feat_dims))

		self.gcl = GraphConvolution(gc_dims[0]+feat_dims, gc_dims[1], num_v, dropout=dropout)
		self.conv= StandConvolution(sc_dims, num_classes, dropout=dropout)

		nn.init.xavier_normal_(self.terminal)
Beispiel #16
0
class GNN(nn.Module):
    def __init__(self, opt, adj):
        super(GNN, self).__init__()
        self.opt = opt
        self.adj = adj

        opt_ = dict([('in', opt['num_feature']), ('out', opt['hidden_dim'])])
        self.m1 = GraphConvolution(opt_, adj)

        opt_ = dict([('in', opt['hidden_dim']), ('out', opt['num_class'])])
        self.m2 = GraphConvolution(opt_, adj)

        if opt['cuda']:
            self.cuda()

    def reset(self):
        self.m1.reset_parameters()
        self.m2.reset_parameters()

    def forward(self, x):
        x = F.dropout(x, self.opt['input_dropout'], training=self.training)
        x = self.m1(x)
        x = F.relu(x)
        x = F.dropout(x, self.opt['dropout'], training=self.training)
        x = self.m2(x)
        return x

    def forward_partition(self, x, adj_ss):
        x = F.dropout(x, self.opt['input_dropout'], training=self.training)
        x = self.m1(x, adj_ss)
        x = F.relu(x)
        x = F.dropout(x, self.opt['dropout'], training=self.training)
        x = self.m2.forward_partition(x, adj_ss)
        return x

    def forward_mix(self, x, target, target_discrete, idx, opt, mixup_layer):
        layer = random.choice(mixup_layer)
        if layer == 0:
            x, target, idx = get_augmented_network_input(
                self, x, target, target_discrete, idx, opt)
        x = F.dropout(x, self.opt['input_dropout'], training=self.training)
        x = self.m1(x)
        x = F.relu(x)
        if layer == 1:
            x, target, idx = get_augmented_network_input(
                self, x, target, target_discrete, idx, opt)
        x = F.dropout(x, self.opt['dropout'], training=self.training)
        x = self.m2(x)
        return x, target, idx

    def forward_aux(self,
                    x,
                    target=None,
                    train_idx=None,
                    mixup_input=False,
                    mixup_hidden=False,
                    mixup_alpha=0.0,
                    layer_mix=None):

        if mixup_hidden == True or mixup_input == True:
            if mixup_hidden == True:
                layer_mix = random.choice(layer_mix)
            elif mixup_input == True:
                layer_mix = 0

            if layer_mix == 0:
                x, target_a, target_b, lam = mixup_gnn_hidden(
                    x, target, train_idx, mixup_alpha)

            x = F.dropout(x, self.opt['input_dropout'], training=self.training)

            x = self.m1.forward_aux(x)
            x = F.relu(x)
            if layer_mix == 1:
                x, target_a, target_b, lam = mixup_gnn_hidden(
                    x, target, train_idx, mixup_alpha)

            x = F.dropout(x, self.opt['dropout'], training=self.training)
            x = self.m2.forward_aux(x)

            return x, target_a, target_b, lam

        else:

            x = F.dropout(x, self.opt['input_dropout'], training=self.training)
            x = self.m1.forward_aux(x)
            x = F.relu(x)
            x = F.dropout(x, self.opt['dropout'], training=self.training)
            x = self.m2.forward_aux(x)
            return x
Beispiel #17
0
import torch
from utils import load_data
from layer import GraphConvolution
import torch.optim as optim
import torch.nn.functional as f

device = 'cuda:0'
dataset = 'cora'
F, A, y, C, train_mask, val_mask, test_mask = load_data(dataset, device)
A = A.float()

N = A.shape[0]
H = 16
D = F.shape[1]

model = GraphConvolution(N, C, H, D, 2, A).to(device)
opt = optim.Adam(model.parameters(), lr=0.01)

loss_window = [0, 0]
loss_inc = 0

for epoch in range(200):
    model.train()
    output = model(F)
    loss = f.nll_loss(torch.log(output[train_mask] + 1e-10), y[train_mask])

    opt.zero_grad()
    loss.backward()
    opt.step()

    train_acc = (output[train_mask].argmax(dim=1)
Beispiel #18
0
    def __init__(self, nfeat, nhid, nclass, dropout):  # 底层节点的参数,feature的个数;隐层节点个数;最终的分类数
        super(GCN, self).__init__()  # super()._init_()在利用父类里的对象构造函数

        self.gc1 = GraphConvolution(nfeat, nhid)  # gc1输入尺寸nfeat,输出尺寸nhid
        self.gc2 = GraphConvolution(nhid, nclass)  # gc2输入尺寸nhid,输出尺寸ncalss
        self.dropout = dropout
Beispiel #19
0
class GNN_mix(nn.Module):
    def __init__(self, opt, adj):
        super(GNN_mix, self).__init__()
        self.opt = opt
        self.adj = adj

        opt_ = dict([('in', opt['num_feature']), ('out', 1000)])
        self.m1 = GraphConvolution(opt_, adj)

        #self.linear_m1_1 = nn.Linear(1000,500)
        #self.linear_m1_2 = nn.Linear(50,opt['num_class'] )

        opt_ = dict([('in', 1000), ('out', 500)])
        self.m2 = GraphConvolution(opt_, adj)

        #self.linear_m2_1 = nn.Linear(50,20)
        #self.linear_m2_2 = nn.Linear(20,opt['num_class'] )

        opt_ = dict([('in', 500), ('out', 100)])
        self.m3 = GraphConvolution(opt_, adj)

        #self.linear_m3_1 = nn.Linear(10,5 )
        #self.linear_m3_2 = nn.Linear(5,opt['num_class'] )

        opt_ = dict([('in', 100), ('out', opt['num_class'])])
        self.m4 = GraphConvolution(opt_, adj)
        """
        opt_ = dict([('in', opt['num_feature']), ('out', opt['hidden_dim'])])
        self.m1 = GraphConvolution(opt_, adj)

        opt_ = dict([('in', opt['hidden_dim']), ('out', opt['num_class'])])
        self.m2 = GraphConvolution(opt_, adj)
        """
        if opt['cuda']:
            self.cuda()

    def reset(self):
        self.m1.reset_parameters()
        self.m2.reset_parameters()

    def forward(self,
                x,
                target=None,
                train_idx=None,
                mixup_input=False,
                mixup_hidden=False,
                mixup_alpha=0.0,
                layer_mix=None):
        """    
        #import pdb; pdb.set_trace()
        if target is not None: 
            x, target_a, target_b, lam = mixup_gnn_hidden(x, target, train_idx, mixup_alpha)
        x = F.dropout(x, self.opt['input_dropout'], training=self.training)
        x = self.m1(x)
        x = F.relu(x)
        if target is not None: 
            x, target_a, target_b, lam = mixup_gnn_hidden(x, target, train_idx, mixup_alpha)
        x = F.dropout(x, self.opt['dropout'], training=self.training)
        x = self.m2(x)
        if target is not None:
            return x, target_a, target_b, lam
        else: 
            return x
        """
        #import pdb; pdb.set_trace()
        if mixup_hidden == True or mixup_input == True:
            if mixup_hidden == True:
                layer_mix = random.randint(1, layer_mix)
            elif mixup_input == True:
                layer_mix = 0

            if layer_mix == 0:
                x, target_a, target_b, lam = mixup_gnn_hidden(
                    x, target, train_idx, mixup_alpha)

            x = F.dropout(x, self.opt['input_dropout'], training=self.training)

            x = self.m1(x)
            x = F.relu(x)
            if layer_mix == 1:
                x, target_a, target_b, lam = mixup_gnn_hidden(
                    x, target, train_idx, mixup_alpha)

            x = F.dropout(x, self.opt['dropout'], training=self.training)
            x = self.m2(x)
            x = F.relu(x)

            if layer_mix == 2:
                x, target_a, target_b, lam = mixup_gnn_hidden(
                    x, target, train_idx, mixup_alpha)

            x = F.dropout(x, self.opt['dropout'], training=self.training)
            x = self.m3(x)
            x = F.relu(x)

            if layer_mix == 3:
                x, target_a, target_b, lam = mixup_gnn_hidden(
                    x, target, train_idx, mixup_alpha)

            x = F.dropout(x, self.opt['dropout'], training=self.training)
            x = self.m4(x)

            return x, target_a, target_b, lam
        else:
            x = F.dropout(x, self.opt['input_dropout'], training=self.training)
            x = self.m1(x)
            x = F.relu(x)
            x = F.dropout(x, self.opt['dropout'], training=self.training)
            x = self.m2(x)
            x = F.relu(x)
            x = F.dropout(x, self.opt['input_dropout'], training=self.training)
            x = self.m3(x)
            x = F.relu(x)
            x = F.dropout(x, self.opt['dropout'], training=self.training)
            x = self.m4(x)
            return x

    """
Beispiel #20
0
    def __init__(self, nfeat, nhid, nclass, dropout):
        super(GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nclass)
        self.dropout = dropout