def __init__(self, g, in_dim, num_hidden, num_classes, num_layers, activation=F.relu, dropout=0.5, log_softmax=False, projection_matrix=None, smooth_filter_k=2, *args, **kwargs): super(MultiLevelGCN, self).__init__() self.g = g self.projections = projection_matrix self.layers = nn.ModuleList() # input layer self.layers.append(GraphConv(in_dim, num_hidden, activation=activation)) # hidden layers self.layers.append(SmoothFilter()) self.smooth_filter_k = smooth_filter_k # for i in range(num_layers - 1): # self.layers.append( # GraphConv(num_hidden, num_hidden, activation=activation)) #TODO replace with SGCN # output layer self.layers.append(GraphConv(num_hidden, num_classes)) self.dropout = nn.Dropout(p=dropout) # TODO self.log_softmax = log_softmax
def __init__(self, in_feats, out_feats, n_units, dropout, activation): super(GCN, self).__init__() assert activation in ["relu", "elu"] self.activation = getattr(F, activation) self.gc1 = GraphConv(in_feats, n_units, activation=self.activation) self.gc2 = GraphConv(n_units, out_feats) self.dropout = dropout
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, edge_norm=False, residual=False): super().__init__() self.in_channels = in_dim self.out_channels = out_dim self.edge_norm = edge_norm self.batch_norm = batch_norm self.residual = residual if in_dim != out_dim: self.residual = False self.batchnorm_h = nn.BatchNorm1d(out_dim) self.activation = activation self.dropout = nn.Dropout(dropout) if edge_norm: self.conv = GraphConv(in_dim, out_dim, norm='none', weight=True, bias=True, allow_zero_in_degree=True) else: self.conv = GraphConv(in_dim, out_dim, norm='both', weight=True, bias=True, allow_zero_in_degree=True)
def __init__( self, in_feats, n_hidden, n_classes, dropout, ): super(GCN_DGL, self).__init__() self.layers = nn.ModuleList() self.embedding_h = nn.Linear(in_feats, n_hidden) # input layer self.layers.append( GraphConv(in_feats, n_hidden, allow_zero_in_degree=True)) #, activation=activation)) for _ in range(3): self.layers.append( GraphConv( n_hidden, n_hidden, allow_zero_in_degree=True)) #, activation=activation)) # output layer #self.layers.append(GraphConv(n_hidden, n_hidden)) self.dropout = nn.Dropout(p=dropout) self.MLP = MLPReadout(n_hidden, n_classes) self.fc1 = nn.Linear(n_hidden, n_hidden) self.fc2 = nn.Linear(n_hidden, n_classes) self.bn = nn.BatchNorm1d(n_hidden) self.avgpooling = AvgPooling()
def __init__(self): super().__init__() self.layer1 = GraphConv(4, 64) self.layer2 = GraphConv(64, 64) self.layer3 = GraphConv(64, 64) self.layer4 = GraphConv(64, 64) self.layer5 = GraphConv(64, 3)
def __init__(self, g, in_feats, n_hidden, n_classes, n_layers, activation, pooling, dropout): super(Classifier, self).__init__() self.g = g self.layers = nn.ModuleList() # input layer self.layers.append( GraphConv(in_feats, n_hidden, activation=activation, allow_zero_in_degree=True, norm='both')) # hidden layers for i in range(n_layers - 1): self.layers.append( GraphConv(n_hidden, n_hidden, activation=activation, allow_zero_in_degree=True, norm='both')) # output layer self.dropout = nn.Dropout(p=dropout) if pooling == 'sum': self.pool = SumPooling() elif pooling == 'mean': self.pool = AvgPooling() elif pooling == 'max': self.pool = MaxPooling() else: raise NotImplementedError self.classify = nn.Linear(n_hidden, n_classes)
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, residual=False, dgl_builtin=False): super().__init__() self.in_channels = in_dim self.out_channels = out_dim self.batch_norm = batch_norm self.residual = residual self.dgl_builtin = dgl_builtin # self.W_msg = nn.Linear(in_dim + edims, out_dim) # self.W_apply = nn.Linear(in_dim + out_dim, out_dim) if in_dim != out_dim: self.residual = False self.batchnorm_h = nn.BatchNorm1d(out_dim) self.activation = activation #Forward throws error without the dropout self.dropout = dropout if self.dgl_builtin == False: self.apply_mod = NodeApplyModule(in_dim, out_dim) elif dgl.__version__ < "0.5": self.conv = GraphConv(in_dim, out_dim) else: self.conv = GraphConv(in_dim, out_dim, norm='none', allow_zero_in_degree=True)
def __init__(self): super().__init__() self.layer1 = GraphConv(4, 128) self.layer2 = GraphConv(128, 128) self.layer3 = GraphConv(128, 128) self.layer4 = GraphConv(128, 128) self.layer5 = GraphConv(128, 3)
def __init__(self, in_dim, hidden_dim, n_classes): super(Classifier, self).__init__() self.conv1 = GraphConv(in_dim, hidden_dim) self.conv2 = GraphConv(hidden_dim, hidden_dim) self.conv3 = GraphConv(hidden_dim, hidden_dim) self.conv4 = GraphConv(hidden_dim, hidden_dim) self.classify = nn.Linear(hidden_dim, n_classes)
def __init__(self, g, in_dim, num_hidden, num_classes, num_layers, activation=F.relu, dropout=0.5, log_softmax=False, *args, **kwargs): super(GCN, self).__init__() self.g = g self.layers = nn.ModuleList() # input layer self.layers.append(GraphConv(in_dim, num_hidden, activation=activation)) # hidden layers for i in range(num_layers - 1): self.layers.append( GraphConv(num_hidden, num_hidden, activation=activation)) # output layer self.layers.append(GraphConv(num_hidden, num_classes)) self.dropout = nn.Dropout(p=dropout) self.log_softmax = log_softmax
def __init__(self, gcn_in_feats, gcn_out_feats, gcn_num_layers, han_num_meta_path, han_in_feats, han_hidden_feats, han_head_list, han_dropout, fc_hidden_feats ): super().__init__() self.gcns = nn.ModuleList( [GraphConv(gcn_in_feats, gcn_out_feats, activation=F.relu)]) self.gcns.extend( [GraphConv(gcn_out_feats, gcn_out_feats, activation=F.relu) for _ in range(gcn_num_layers - 1)] ) self.han = HAN(num_meta_paths=han_num_meta_path, in_feats=han_in_feats, hidden_feats=han_hidden_feats, head_list=han_head_list, dropout=han_dropout) self.fc = nn.Sequential( nn.Linear(gcn_out_feats + han_hidden_feats * han_head_list[-1] * 2, fc_hidden_feats), nn.ELU(), nn.Linear(fc_hidden_feats, 1) )
def __init__(self, in_channels, out_channels, hiddens=[16], activations=['relu'], dropout=0.5, weight_decay=5e-4, lr=0.01, use_bias=True): super().__init__() self.layers = ModuleList() inc = in_channels for hidden, activation in zip(hiddens, activations): layer = GraphConv(inc, hidden, activation=get_activation(activation), bias=use_bias) self.layers.append(layer) inc = hidden # output layer self.layers.append(GraphConv(inc, out_channels)) self.dropout = Dropout(p=dropout) self.compile(loss=torch.nn.CrossEntropyLoss(), optimizer=optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay), metrics=[Accuracy()])
def __init__(self, in_feats=166, n_hidden=76, num_layers=2, n_classes=2, classifier_hidden=510): # default parameters follow the official config super(EvolveGCNH, self).__init__() self.num_layers = num_layers self.pooling_layers = nn.ModuleList() self.recurrent_layers = nn.ModuleList() self.gnn_convs = nn.ModuleList() self.gcn_weights_list = nn.ParameterList() self.pooling_layers.append(TopK(in_feats, n_hidden)) # similar to EvolveGCNO self.recurrent_layers.append(MatGRUCell(in_feats=in_feats, out_feats=n_hidden)) self.gcn_weights_list.append(Parameter(torch.Tensor(in_feats, n_hidden))) self.gnn_convs.append( GraphConv(in_feats=in_feats, out_feats=n_hidden, bias=False, activation=nn.RReLU(), weight=False)) for _ in range(num_layers - 1): self.pooling_layers.append(TopK(n_hidden, n_hidden)) self.recurrent_layers.append(MatGRUCell(in_feats=n_hidden, out_feats=n_hidden)) self.gcn_weights_list.append(Parameter(torch.Tensor(n_hidden, n_hidden))) self.gnn_convs.append( GraphConv(in_feats=n_hidden, out_feats=n_hidden, bias=False, activation=nn.RReLU(), weight=False)) self.mlp = nn.Sequential(nn.Linear(n_hidden, classifier_hidden), nn.ReLU(), nn.Linear(classifier_hidden, n_classes)) self.reset_parameters()
def __init__( self, in_feats, n_hidden, n_classes, n_layers, activation, dropout, use_layernorm=True, ): super().__init__() self.layers = nn.ModuleList() self.use_layernorm = use_layernorm # construct input layer self.layers.append(GraphConv(in_feats, n_hidden, activation=activation)) # construct hidden layers for i in range(n_layers - 1): self.layers.append( GraphConv(n_hidden, n_hidden, activation=activation)) # construct output layers self.layers.append(GraphConv(n_hidden, n_classes)) self.dropout = nn.Dropout(p=dropout)
def __init__(self, in_dim, out_dim, activation, dropout, batch_norm, residual=False, dgl_builtin=False): super().__init__() self.in_channels = in_dim self.out_channels = out_dim self.batch_norm = batch_norm self.residual = residual self.dgl_builtin = dgl_builtin if in_dim != out_dim: self.residual = False self.batchnorm_h = nn.BatchNorm1d(out_dim) self.activation = activation self.dropout = nn.Dropout(dropout) if self.dgl_builtin == False: self.apply_mod = NodeApplyModule(in_dim, out_dim) elif dgl.__version__ < "0.5": self.conv = GraphConv(in_dim, out_dim) else: self.conv = GraphConv(in_dim, out_dim, allow_zero_in_degree=True)
def __init__(self, g, nfeat, nhid, nclass, dropout, depth, residual_type): super(GResNet, self).__init__() self.graph = g self.depth = depth self.gc_list = nn.ModuleList() self.residual_weight_list = nn.ParameterList() self.residual_type = residual_type self.adj = nn.Parameter(g.adjacency_matrix(), requires_grad=False) if self.depth == 1: self.gc_list.append(GraphConv(nfeat, nclass)) self.residual_weight_list.append( nn.Parameter(torch.FloatTensor(nfeat, nclass))) else: for i in range(self.depth): if i == 0: self.gc_list.append(GraphConv(nfeat, nhid)) self.residual_weight_list.append( nn.Parameter(torch.FloatTensor(nfeat, nhid))) elif i == self.depth - 1: self.gc_list.append(GraphConv(nhid, nclass)) self.residual_weight_list.append( nn.Parameter(torch.FloatTensor(nhid, nclass))) else: self.gc_list.append(GraphConv(nhid, nhid)) self.residual_weight_list.append( nn.Parameter(torch.FloatTensor(nhid, nhid))) for i in range(self.depth): stdv = 1. / sqrt(self.residual_weight_list[i].size(1)) self.residual_weight_list[i].data.uniform_(-stdv, stdv) self.dropout = dropout
def __init__(self, in_features, out_features, hids=[16], acts=['relu'], dropout=0.5, weight_decay=5e-4, lr=0.01, bias=True): super().__init__() conv = [] for hid, act in zip(hids, acts): conv.append(GraphConv(in_features, hid, bias=bias)) conv.append(activations.get(act)) conv.append(nn.Dropout(dropout)) in_features = hid conv.append(GraphConv(in_features, out_features)) conv = Sequential(*conv, inverse=True) # `inverse=True` is important self.conv = conv self.compile(loss=nn.CrossEntropyLoss(), optimizer=optim.Adam([dict(params=conv[0].parameters(), weight_decay=weight_decay), dict(params=conv[1:].parameters(), weight_decay=0.)], lr=lr), metrics=[Accuracy()])
def __init__(self, in_feats=166, n_hidden=256, num_layers=2, n_classes=2, classifier_hidden=307): # default parameters follow the official config super(EvolveGCNO, self).__init__() self.num_layers = num_layers self.recurrent_layers = nn.ModuleList() self.gnn_convs = nn.ModuleList() self.gcn_weights_list = nn.ParameterList() # In the paper, EvolveGCN-O use LSTM as RNN layer. According to the official code, # EvolveGCN-O use GRU as RNN layer. Here we follow the official code. # See: https://github.com/IBM/EvolveGCN/blob/90869062bbc98d56935e3d92e1d9b1b4c25be593/egcn_o.py#L53 # PS: I try to use torch.nn.LSTM directly, # like [pyg_temporal](github.com/benedekrozemberczki/pytorch_geometric_temporal/blob/master/torch_geometric_temporal/nn/recurrent/evolvegcno.py) # but the performance is worse than use torch.nn.GRU. # PPS: I think torch.nn.GRU can't match the manually implemented GRU cell in the official code, # we follow the official code here. self.recurrent_layers.append(MatGRUCell(in_feats=in_feats, out_feats=n_hidden)) self.gcn_weights_list.append(Parameter(torch.Tensor(in_feats, n_hidden))) self.gnn_convs.append( GraphConv(in_feats=in_feats, out_feats=n_hidden, bias=False, activation=nn.RReLU(), weight=False)) for _ in range(num_layers - 1): self.recurrent_layers.append(MatGRUCell(in_feats=n_hidden, out_feats=n_hidden)) self.gcn_weights_list.append(Parameter(torch.Tensor(n_hidden, n_hidden))) self.gnn_convs.append( GraphConv(in_feats=n_hidden, out_feats=n_hidden, bias=False, activation=nn.RReLU(), weight=False)) self.mlp = nn.Sequential(nn.Linear(n_hidden, classifier_hidden), nn.ReLU(), nn.Linear(classifier_hidden, n_classes)) self.reset_parameters()
def __init__(self, mode, in_dim, hidden_dim, n_classes): super(GCNClassifier, self).__init__() self.mode = mode self.conv1 = GraphConv(in_dim, hidden_dim) self.conv2 = GraphConv(hidden_dim, hidden_dim) # graph attention network / gated GNN #self.conv3 = GraphConv(hidden_dim, hidden_dim) # graph attention network / gated GNN self.classify = nn.Linear(hidden_dim, n_classes)
def __init__(self, n_in, n_hidden, num_classes): super(GCN, self).__init__() layers = torch.nn.ModuleList() layers.append(GraphConv(n_in, n_hidden, activation=torch.relu)) for _ in range(N_HIDDEN): layers.append(GraphConv(n_hidden, n_hidden, activation=torch.relu)) layers.append(GraphConv(n_hidden, num_classes)) self.layers = layers
def __init__(self, in_dim, hidden_dim, n_classes): super(ClassifierAttnMulti, self).__init__() self.conv1 = GraphConv(in_dim, hidden_dim, weight=True) self.conv2 = GraphConv(hidden_dim, hidden_dim) self.attn = nn.Linear(hidden_dim, 1) self.attnact = nn.Softmax(dim=1) self.classify2 = nn.Linear(64, n_classes) self.act = nn.Sigmoid()
def __init__(self, g, n_infeat, n_hidden, n_classes, n_layers, activation): super(GCN, self).__init__() self.g = g self.layers = nn.ModuleList() self.layers.append(GraphConv(n_infeat, n_hidden, activation=activation)) for i in range(n_layers - 1): self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation)) self.layers.append(GraphConv(n_hidden, n_classes))
def __init__(self, input_size: int, hidden_size: int): super(DCGRUCell, self).__init__() self.hidden_size = hidden_size self.ru_gate_g_conv = GraphConv(input_size + hidden_size, hidden_size * 2) self.candidate_g_conv = GraphConv(input_size + hidden_size, hidden_size)
def __init__(self): super(GraphUNet, self).__init__() scale = 2 self.do_conv1 = GraphConv(in_feats = 1, out_feats = 1) self.do_pool1 = TopKPooling(frac=0.75, in_feat=1, out_feat=1) self.do_conv2 = GraphConv(in_feats = 1, out_feats = 5*scale) self.do_pool2 = TopKPooling(frac=0.75, in_feat=5*scale, out_feat=5*scale) self.do_conv3 = GraphConv(in_feats = 5*scale, out_feats = 7*scale) self.do_pool3 = TopKPooling(frac=0.75, in_feat=7*scale, out_feat=7*scale) self.do_conv4 = GraphConv(in_feats = 7*scale, out_feats = 9*scale) self.do_pool4 = TopKPooling(frac=0.75, in_feat=9*scale, out_feat=9*scale) self.bn_conv = GraphConv(in_feats = 9*scale, out_feats = 9*scale) self.up_pool1 = UpPool(feat_dim = 9*scale) self.up_conv1 = GraphConv(in_feats = 9*scale, out_feats = 7*scale) self.up_pool2 = UpPool(feat_dim = 7*scale) self.up_conv2 = GraphConv(in_feats = 7*scale, out_feats = 5*scale) self.up_pool3 = UpPool(feat_dim = 5*scale) self.up_conv3 = GraphConv(in_feats = 5*scale, out_feats = 1) self.up_pool4 = UpPool(feat_dim = 1) self.up_conv4 = GraphConv(in_feats = 1, out_feats = 1) self.broadcast = Broadcasting() self.b_conv = GraphConv(in_feats = 1, out_feats = 1) self.proj = nn.Sequential( nn.Linear(1, 20), nn.LeakyReLU( -0.8 ), nn.Tanh(), nn.Linear(20, 40), nn.LeakyReLU( -0.8 ), nn.Tanh(), nn.Linear(40, 30), nn.LeakyReLU( -0.8 ), nn.Tanh(), nn.Linear(30, 10), nn.LeakyReLU( -0.8 ), nn.Tanh(), nn.Linear(10, 5), nn.LeakyReLU( -0.8 ), nn.Tanh(), nn.Linear(5, 3) )
def __init__(self, in_dim, hidden_dim, n_classes, saliency=False): super(GCNReg_1mlp, self).__init__() self.conv1 = GraphConv(in_dim, hidden_dim) self.conv2 = GraphConv(hidden_dim, hidden_dim) # self.conv3 = GraphConv(hidden_dim, hidden_dim) self.classify1 = nn.Linear(hidden_dim, hidden_dim) # self.classify2 = nn.Linear(hidden_dim, hidden_dim) self.classify3 = nn.Linear(hidden_dim, n_classes) self.saliency = saliency
def __init__(self, node_dim, hidden_dim, propagation_depth: int = 5): super(GCNGNN, self).__init__() self.convolutions = nn.ModuleList() self.convolutions.append(GraphConv(node_dim, hidden_dim)) for _ in range(propagation_depth - 1): self.convolutions.append(GraphConv(hidden_dim, hidden_dim))
def __init__(self, dev, feature_dim: int = 32, embedding_dim: int = 64): super().__init__(dev) self.feature_dim = feature_dim self.embedding_dim = embedding_dim self.embed = GraphConv(feature_dim, embedding_dim) self.conv = GraphConv(embedding_dim, embedding_dim) self.fc = nn.Linear(embedding_dim, 2) self.activate = nn.ReLU()
def __init__(self, g, n_layers, input_size, hidden_size, output_size, nonlinearity, **kwargs): super().__init__() self.g = g self.layers = nn.ModuleList() self.layers.append(GraphConv(input_size, hidden_size, activation=nonlinearity)) for i in range(n_layers - 1): self.layers.append(GraphConv(hidden_size, hidden_size, activation=nonlinearity)) self.layers.append(GraphConv(hidden_size, output_size))
def __init__(self, in_feats, hidden_size, num_classes): super(GCN, self).__init__() self.conv1 = GraphConv(in_feats, hidden_size) self.conv2 = GraphConv(hidden_size, num_classes) self.conv3 = torch.nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1) self.linear = nn.Linear(parameters['parts'] * 4, 2 * parameters['latent_size'])
def build_model(self): self.conv1 = GraphConv(self.hparams.num_features, self.hparams.hidden_dim) self.conv2 = GraphConv(self.hparams.hidden_dim, self.hparams.hidden_dim) self.conv3 = GraphConv(self.hparams.hidden_dim, self.hparams.hidden_dim) self.classify = nn.Linear(self.hparams.hidden_dim * 2, self.hparams.num_classes)