def __init__( self, in_feats, n_hidden, n_classes, dropout, ): super(GCN_DGL, self).__init__() self.layers = nn.ModuleList() self.embedding_h = nn.Linear(in_feats, n_hidden) # input layer self.layers.append( GraphConv(in_feats, n_hidden, allow_zero_in_degree=True)) #, activation=activation)) for _ in range(3): self.layers.append( GraphConv( n_hidden, n_hidden, allow_zero_in_degree=True)) #, activation=activation)) # output layer #self.layers.append(GraphConv(n_hidden, n_hidden)) self.dropout = nn.Dropout(p=dropout) self.MLP = MLPReadout(n_hidden, n_classes) self.fc1 = nn.Linear(n_hidden, n_hidden) self.fc2 = nn.Linear(n_hidden, n_classes) self.bn = nn.BatchNorm1d(n_hidden) self.avgpooling = AvgPooling()
def __init__(self, g): super(ProbTraffic_gcn_pool, self).__init__() self.g = g self.gcn = Net() self.avgpool = AvgPooling() self.layernorm = nn.LayerNorm(128) self.f1 = MLP(128, 256, 128, 0.2, True)
def __init__(self, net_params): super().__init__() self.n_layers = 2 self.embedding_h = nn.Linear(net_params.in_dim, net_params.hidden_dim) self.ginlayers = torch.nn.ModuleList() for layer in range(net_params.L): mlp = MLP(net_params.n_mlp_GIN, net_params.hidden_dim, net_params.hidden_dim, net_params.hidden_dim) self.ginlayers.append( GINLayer(ApplyNodeFunc(mlp), net_params.neighbor_aggr_GIN, net_params.dropout, net_params.graph_norm, net_params.batch_norm, net_params.residual, 0, net_params.learn_eps_GIN)) pass # Linear function for graph poolings (readout) of output of each layer # which maps the output of different layers into a prediction score self.linears_prediction = torch.nn.ModuleList() for layer in range(self.n_layers + 1): self.linears_prediction.append( nn.Linear(net_params.hidden_dim, net_params.n_classes)) pass if net_params.readout == 'sum': self.pool = SumPooling() elif net_params.readout == 'mean': self.pool = AvgPooling() elif net_params.readout == 'max': self.pool = MaxPooling() else: raise NotImplementedError pass
def __init__(self, in_dim, hid_dim, out_dim, num_stacks, num_layers, activation=None, dropout=0.0): super(ARMA4GC, self).__init__() self.conv1 = ARMAConv(in_dim=in_dim, out_dim=hid_dim, num_stacks=num_stacks, num_layers=num_layers, activation=activation, dropout=dropout) self.conv2 = ARMAConv(in_dim=hid_dim, out_dim=hid_dim, num_stacks=num_stacks, num_layers=num_layers, activation=activation, dropout=dropout) self.conv3 = ARMAConv(in_dim=hid_dim, out_dim=hid_dim, num_stacks=num_stacks, num_layers=num_layers, activation=activation, dropout=dropout) self.pool = AvgPooling() self.dropout = nn.Dropout(p=dropout) self.fc = nn.Linear(hid_dim, out_dim)
def __init__(self, in_feats, n_classes, n_hidden, n_heads, drop): super(GAT_DGL, self).__init__() #self.g = g #self.bn = bn self.layers = nn.ModuleList() self.embedding_h = nn.Linear(in_feats, n_hidden * n_heads) self.layers.append( GAT_DGLConv(in_feats, n_hidden, n_heads, allow_zero_in_degree=True)) for _ in range(2): self.layers.append( GAT_DGLConv(n_hidden * n_heads, n_hidden, n_heads, allow_zero_in_degree=True)) self.layers.append( GAT_DGLConv(n_hidden * n_heads, n_hidden, 1, allow_zero_in_degree=True)) #self.MLP = MLPReadout(n_hidden, n_classes) self.avgpooling = AvgPooling() self.fc1 = nn.Linear(n_hidden, n_hidden) self.fc2 = nn.Linear(n_hidden, n_classes) self.dropout = nn.Dropout(p=drop) self.bn1 = nn.BatchNorm1d(n_hidden * n_heads) self.bn2 = nn.BatchNorm1d(n_hidden)
def __init__(self, g): super(ProbTraffic_gcn_res_pool, self).__init__() self.g = g self.gcn = GCN_RES() self.avgpool = AvgPooling() self.layernorm = nn.LayerNorm(128) self.f2 = MLP2(128, 256, 128, 0.2, True)
def __init__(self, node_feat_dim, edge_feat_dim, hid_dim, out_dim, num_layers, dropout=0., beta=1.0, learn_beta=False, aggr='softmax', mlp_layers=1): super(DeeperGCN, self).__init__() self.num_layers = num_layers self.dropout = dropout self.gcns = nn.ModuleList() self.norms = nn.ModuleList() for _ in range(self.num_layers): conv = GENConv(in_dim=hid_dim, out_dim=hid_dim, aggregator=aggr, beta=beta, learn_beta=learn_beta, mlp_layers=mlp_layers) self.gcns.append(conv) self.norms.append(nn.BatchNorm1d(hid_dim, affine=True)) self.node_encoder = AtomEncoder(hid_dim) self.pooling = AvgPooling() self.output = nn.Linear(hid_dim, out_dim)
def main(args, dataset): data_loader = DataLoader(dataset, batch_size=args['batch_size'], collate_fn=collate, shuffle=False) model = load_pretrained(args['model']).to(args['device']) model.eval() readout = AvgPooling() mol_emb = [] for batch_id, bg in enumerate(data_loader): print('Processing batch {:d}/{:d}'.format(batch_id + 1, len(data_loader))) bg = bg.to(args['device']) nfeats = [ bg.ndata.pop('atomic_number').to(args['device']), bg.ndata.pop('chirality_type').to(args['device']) ] efeats = [ bg.edata.pop('bond_type').to(args['device']), bg.edata.pop('bond_direction_type').to(args['device']) ] with torch.no_grad(): node_repr = model(bg, nfeats, efeats) mol_emb.append(readout(bg, node_repr)) mol_emb = torch.cat(mol_emb, dim=0).detach().cpu().numpy() np.save(args['out_dir'] + '/mol_emb.npy', mol_emb)
def __init__( self, in_feats, n_hidden, n_classes, #n_layers, #activation, dropout, aggregator_type, device): super(SAGE_DGL, self).__init__() self.device = device self.layers = nn.ModuleList() self.dropout = nn.Dropout(dropout) self.avgpooling = AvgPooling() #self.activation = activation # input layer self.layers.append(SAGEDGLConv(in_feats, n_hidden, aggregator_type)) # hidden layers for i in range(2): self.layers.append(SAGEDGLConv(n_hidden, n_hidden, aggregator_type)) # output layer self.layers.append(SAGEDGLConv(n_hidden, n_hidden, aggregator_type)) # activation None self.readout = nn.Linear(n_hidden, n_classes) self.fc1 = nn.Linear(n_hidden, n_hidden) self.fc2 = nn.Linear(n_hidden, n_classes)
def __init__(self, g, in_feats, n_hidden, n_classes, n_layers, activation, pooling, dropout): super(Classifier, self).__init__() self.g = g self.layers = nn.ModuleList() # input layer self.layers.append( GraphConv(in_feats, n_hidden, activation=activation, allow_zero_in_degree=True, norm='both')) # hidden layers for i in range(n_layers - 1): self.layers.append( GraphConv(n_hidden, n_hidden, activation=activation, allow_zero_in_degree=True, norm='both')) # output layer self.dropout = nn.Dropout(p=dropout) if pooling == 'sum': self.pool = SumPooling() elif pooling == 'mean': self.pool = AvgPooling() elif pooling == 'max': self.pool = MaxPooling() else: raise NotImplementedError self.classify = nn.Linear(n_hidden, n_classes)
def __init__(self, embed="gpls", dim=64, hidden_dim=64, num_gaussians=64, cutoff=5.0, output_dim=1, n_conv=3, act=ShiftedSoftplus(), aggregation_mode='avg', norm=False): """ Args: embed: Group and Period embeding to atomic number Embedding dim: dimension of features output_dim: dimension of prediction cutoff: radius cutoff num_gaussians: dimension in the RBF function n_conv: number of interaction layers norm: normalization """ super().__init__() self.name = "SchNet" self._dim = dim self.cutoff = cutoff self.n_conv = n_conv self.norm = norm self.output_dim = output_dim self.aggregation_mode = aggregation_mode if act == None: self.activation = ShiftedSoftplus() else: self.activation = act assert embed in ['gpls', 'atom', 'gp'], \ "Expect mode to be 'gpls' or 'atom' or 'gp', got {}".format(embed) if embed == "gpls": self.embedding_layer = GPLSEmbedding(dim) elif embed == "atom": self.embedding_layer = AtomEmbedding(dim) elif embed == "gp": self.embedding_layer = GPEmbedding(dim) self.rbf_layer = RBFLayer(0, cutoff, num_gaussians) self.conv_layers = nn.ModuleList([ SchInteraction(self.rbf_layer._fan_out, dim) for i in range(n_conv) ]) self.atom_dense_layer1 = nn.Linear(dim, int(dim / 2)) self.atom_dense_layer2 = nn.Linear(int(dim / 2), output_dim) if self.aggregation_mode == 'sum': self.readout = SumPooling() elif self.aggregation_mode == "avg": self.readout = AvgPooling()
def __init__(self, predictor_dim=None): super(DGL_GIN_ContextPred, self).__init__() from dgllife.model import load_pretrained from dgl.nn.pytorch.glob import AvgPooling ## this is fixed hyperparameters as it is a pretrained model self.gnn = load_pretrained('gin_supervised_contextpred') self.readout = AvgPooling() self.transform = nn.Linear(300, predictor_dim)
def __init__(self, hidden_channels, out_channels, num_layers, dropout): super(Predictor, self).__init__() self.lin = nn.ModuleList() for _ in range(num_layers - 1): self.lin.append(nn.Linear(hidden_channels, hidden_channels)) self.lin.append(nn.Linear(hidden_channels, out_channels)) self.pooling = AvgPooling() self.dropout = nn.Dropout(dropout)
def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim, output_dim, final_dropout, learn_eps, graph_pooling_type, neighbor_pooling_type, batch_size, rank_dim=32): super(GIN, self).__init__() self.num_layers = num_layers self.learn_eps = learn_eps self.graph_pooling_type = graph_pooling_type self.batch_size = batch_size # List of MLPs self.ginlayers = torch.nn.ModuleList() self.batch_norms = torch.nn.ModuleList() self.cplayers = torch.nn.ModuleList() for layer in range(self.num_layers - 1): if layer == 0: mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim) else: mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim) if graph_pooling_type == 'cp': self.ginlayers.append( GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps, hidden_dim, rank_dim, output_dim)) else: self.ginlayers.append( GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps, hidden_dim, rank_dim, output_dim)) self.batch_norms.append(nn.BatchNorm1d(hidden_dim)) self.linears_prediction = torch.nn.ModuleList() for layer in range(num_layers): if layer == 0: self.linears_prediction.append( nn.Linear(hidden_dim, output_dim)) self.cplayers.append(graph_cp_pooling(input_dim, hidden_dim, rank_dim, init=True)) else: self.linears_prediction.append( nn.Linear(hidden_dim, output_dim)) self.cplayers.append(graph_cp_pooling(hidden_dim, output_dim, rank_dim, init=False)) self.drop = nn.Dropout(final_dropout) if graph_pooling_type == 'sum': self.pool = SumPooling() elif graph_pooling_type == 'mean': self.pool = AvgPooling() elif graph_pooling_type == 'max': self.pool = MaxPooling() elif graph_pooling_type == 'cp': self.pool = SumPooling() else: raise NotImplementedError
def __init__(self, dataset, node_feat_dim, edge_feat_dim, hid_dim, out_dim, num_layers, dropout=0., norm='batch', pooling='mean', beta=1.0, learn_beta=False, aggr='softmax', mlp_layers=1): super(DeeperGCN, self).__init__() self.dataset = dataset self.num_layers = num_layers self.dropout = dropout self.gcns = nn.ModuleList() self.norms = nn.ModuleList() for i in range(self.num_layers): conv = GENConv(dataset=dataset, in_dim=hid_dim, out_dim=hid_dim, aggregator=aggr, beta=beta, learn_beta=learn_beta, mlp_layers=mlp_layers, norm=norm) self.gcns.append(conv) self.norms.append(norm_layer(norm, hid_dim)) if self.dataset == 'ogbg-molhiv': self.node_encoder = AtomEncoder(hid_dim) elif self.dataset == 'ogbg-ppa': self.node_encoder = nn.Linear(node_feat_dim, hid_dim) self.edge_encoder = nn.Linear(edge_feat_dim, hid_dim) else: raise ValueError(f'Dataset {dataset} is not supported.') if pooling == 'sum': self.pooling = SumPooling() elif pooling == 'mean': self.pooling = AvgPooling() elif pooling == 'max': self.pooling = MaxPooling() else: raise NotImplementedError(f'{pooling} is not supported.') self.output = nn.Linear(hid_dim, out_dim)
def __init__(self, embed="atom", dim=64, cutoff=5., output_dim=1, num_gaussians=64, n_conv=3, act="ssp", aggregation_mode="avg", norm=False): """ Args: dim: dimension of features output_dim: dimension of prediction cutoff: radius cutoff width: width in the RBF function n_conv: number of interaction layers norm: normalization """ super().__init__() self.name = "NMPEUModel" self._dim = dim self.cutoff = cutoff self.num_gaussians = num_gaussians self.n_conv = n_conv self.norm = norm self.activation = ShiftedSoftplus() self.aggregation_mode = aggregation_mode assert embed in ["atom", "gp", "gpls", "fakeatom"] if embed == "gpls": self.embedding_layer = GPLSEmbedding(dim) elif embed == "gp": self.embedding_layer = GPEmbedding(dim) elif embed == "atom": self.embedding_layer = AtomEmbedding(dim) elif embed == "fakeatom": self.embedding_layer = FakeAtomEmbedding(dim) self.rbf_layer = RBFLayer(0, cutoff, num_gaussians) self.conv_layers = nn.ModuleList([ NMPEUInteraction(self.rbf_layer._fan_out, dim, act=self.activation) for i in range(n_conv) ]) self.atom_dense_layer1 = nn.Linear(dim, int(dim / 2)) self.atom_dense_layer2 = nn.Linear(int(dim / 2), output_dim) if self.aggregation_mode == 'sum': self.readout = SumPooling() elif self.aggregation_mode == "avg": self.readout = AvgPooling()
def __init__(self, input_dim, target_dim): super(GraphClassifier, self).__init__() hidden_dim = 8 hidden_dim2 = 16 self.graph_conv1 = GraphConv(input_dim, hidden_dim) self.graph_conv2 = GraphConv(hidden_dim, hidden_dim2) self.relu = nn.ReLU() self.pooling = AvgPooling() fc_hidden_dim = 32 self.fc1 = nn.Linear(hidden_dim2, fc_hidden_dim) self.fc2 = nn.Linear(fc_hidden_dim, target_dim) self.dropout = nn.Dropout(p=0.1)
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout, aggr_type): super(Graph_Predictor, self).__init__() self.conv = nn.ModuleList() self.conv.append(SAGEConv(in_channels, hidden_channels, aggr_type)) for _ in range(num_layers - 1): self.conv.append( SAGEConv(hidden_channels, hidden_channels, aggr_type)) self.lin = nn.ModuleList() for _ in range(num_layers - 1): self.lin.append(nn.Linear(hidden_channels, hidden_channels)) self.lin.append(nn.Linear(hidden_channels, out_channels)) self.pooling = AvgPooling() self.dropout = nn.Dropout(dropout)
def __init__(self, in_dim, out_dim): super(MVGRL, self).__init__() self.encoder1 = GraphConv(in_dim, out_dim, norm='both', bias=True, activation=nn.PReLU()) self.encoder2 = GraphConv(in_dim, out_dim, norm='none', bias=True, activation=nn.PReLU()) self.pooling = AvgPooling() self.disc = Discriminator(out_dim) self.act_fn = nn.Sigmoid()
def __init__(self, embed="gpls", dim=64, hidden_dim=128, output_dim=1, n_conv=3, cutoff=12., num_gaussians=64, aggregation_mode='avg', norm=False): super(CGCNN, self).__init__() self.name = "CGCNN" self.dim = dim self._dim = hidden_dim self.cutoff = cutoff self.n_conv = n_conv self.norm = norm self.num_gaussians = num_gaussians self.activation = nn.Softplus() self.aggregation_mode = aggregation_mode assert embed in ["atom", "gp", "gpls", "fakeatom"] if embed == "gpls": self.embedding_layer = GPLSEmbedding(dim) elif embed == "gp": self.embedding_layer = GPEmbedding(dim) elif embed == "atom": self.embedding_layer = AtomEmbedding(dim) elif embed == "fakeatom": self.embedding_layer = FakeAtomEmbedding(dim) self.rbf_layer = RBFLayer(0, cutoff, num_gaussians) self.conv_layers = nn.ModuleList([ CGCNNConv(self.dim, self.rbf_layer._fan_out) for i in range(n_conv) ]) assert aggregation_mode in ['sum', 'avg'], \ "Expect mode to be 'sum' or 'avg', got {}".format(aggregation_mode ) if self.aggregation_mode == 'sum': self.readout = SumPooling() elif self.aggregation_mode == "avg": self.readout = AvgPooling() self.conv_to_fc = nn.Linear(dim, hidden_dim) self.conv_to_fc_softplus = nn.Softplus() self.fc_out = nn.Linear(hidden_dim, output_dim)
def __init__(self, net_params): super().__init__() num_node_type = net_params['num_node_type'] hidden_dim = net_params['hidden_dim'] n_classes = net_params['n_classes'] dropout = net_params['dropout'] self.n_layers = net_params['L'] n_mlp_layers = net_params['n_mlp_GIN'] # GIN learn_eps = net_params['learn_eps_GIN'] # GIN neighbor_aggr_type = net_params['neighbor_aggr_GIN'] # GIN readout = net_params['readout'] # this is graph_pooling_type batch_norm = net_params['batch_norm'] residual = net_params['residual'] self.pos_enc = net_params['pos_enc'] if self.pos_enc: pos_enc_dim = net_params['pos_enc_dim'] self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim) else: in_dim = 1 self.embedding_h = nn.Embedding(in_dim, hidden_dim) # List of MLPs self.ginlayers = torch.nn.ModuleList() for layer in range(self.n_layers): mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim) self.ginlayers.append( GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout, batch_norm, residual, 0, learn_eps)) # Linear function for graph poolings (readout) of output of each layer # which maps the output of different layers into a prediction score self.linears_prediction = torch.nn.ModuleList() for layer in range(self.n_layers + 1): self.linears_prediction.append(nn.Linear(hidden_dim, n_classes)) if readout == 'sum': self.pool = SumPooling() elif readout == 'mean': self.pool = AvgPooling() elif readout == 'max': self.pool = MaxPooling() else: raise NotImplementedError
def __init__(self, num_node_emb_list, num_edge_emb_list, num_layers=5, emb_dim=300, JK='last', dropout=0.5, readout='mean', n_tasks=1): super(GINPredictor, self).__init__() if num_layers < 2: raise ValueError('Number of GNN layers must be greater ' 'than 1, got {:d}'.format(num_layers)) self.gnn = GIN(num_node_emb_list=num_node_emb_list, num_edge_emb_list=num_edge_emb_list, num_layers=num_layers, emb_dim=emb_dim, JK=JK, dropout=dropout) if readout == 'sum': self.readout = SumPooling() elif readout == 'mean': self.readout = AvgPooling() elif readout == 'max': self.readout = MaxPooling() elif readout == 'attention': if JK == 'concat': self.readout = GlobalAttentionPooling( gate_nn=nn.Linear((num_layers + 1) * emb_dim, 1)) else: self.readout = GlobalAttentionPooling( gate_nn=nn.Linear(emb_dim, 1)) elif readout == 'set2set': self.readout = Set2Set() else: raise ValueError( "Expect readout to be 'sum', 'mean', " "'max', 'attention' or 'set2set', got {}".format(readout)) if JK == 'concat': self.predict = nn.Linear((num_layers + 1) * emb_dim, n_tasks) else: self.predict = nn.Linear(emb_dim, n_tasks)
def __init__(self, in_feats, n_hidden, out_feats, dim, n_kernels, device): super(MoNet_DGL, self).__init__() self.device = device self.layers = nn.ModuleList() self.pseudo_proj = nn.ModuleList() # Input layer self.layers.append( GMMDGLConv(in_feats, n_hidden, dim, n_kernels, aggregator_type="max", allow_zero_in_degree=True)) self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh())) # Hidden layer for _ in range(2): self.layers.append( GMMDGLConv(n_hidden, n_hidden, dim, n_kernels, aggregator_type="max", allow_zero_in_degree=True)) self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh())) # Output layer self.layers.append( GMMDGLConv( n_hidden, n_hidden, dim, n_kernels, aggregator_type="max", allow_zero_in_degree=True)) #, allow_zero_in_degree = True self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh())) self.dropout = nn.Dropout(0.5) self.fc1 = nn.Linear(n_hidden, n_hidden) self.fc2 = nn.Linear(n_hidden, out_feats) self.avgpooling = AvgPooling()
def __init__(self, features_dim, h_dim, out_dim, num_rels, num_bases=-1, num_layers=1): super(RGCN, self).__init__() self.features_dim, self.h_dim, self.out_dim = features_dim, h_dim, out_dim self.num_layers = num_layers self.num_rels = num_rels self.num_bases = num_bases self.pooling_layer = AvgPooling() # create rgcn layers self.build_model()
def __init__(self, input_dimensions: _typing.Sequence[int], output_dimension: int, dropout: float, graph_pooling_type: str): super(_JKSumPoolDecoder, self).__init__() self._linear_transforms: torch.nn.ModuleList = torch.nn.ModuleList() for input_dimension in input_dimensions: self._linear_transforms.append( torch.nn.Linear(input_dimension, output_dimension)) self._dropout: torch.nn.Dropout = torch.nn.Dropout(dropout) if not isinstance(graph_pooling_type, str): raise TypeError elif graph_pooling_type.lower() == 'sum': self.__pool = SumPooling() elif graph_pooling_type.lower() == 'mean': self.__pool = AvgPooling() elif graph_pooling_type.lower() == 'max': self.__pool = MaxPooling() else: raise NotImplementedError
def __init__(self, in_edge_feats, num_node_types=1, hidden_feats=300, n_layers=5, n_tasks=1, batchnorm=True, activation=F.relu, dropout=0., gnn_type='gcn', virtual_node=True, residual=False, jk=False, readout='mean'): super(GNNOGBPredictor, self).__init__() assert gnn_type in ['gcn', 'gin'], \ "Expect gnn_type to be 'gcn' or 'gin', got {}".format(gnn_type) assert readout in ['mean', 'sum', 'max'], \ "Expect readout to be in ['mean', 'sum', 'max'], got {}".format(readout) self.gnn = GNNOGB(in_edge_feats=in_edge_feats, num_node_types=num_node_types, hidden_feats=hidden_feats, n_layers=n_layers, batchnorm=batchnorm, activation=activation, dropout=dropout, gnn_type=gnn_type, virtual_node=virtual_node, residual=residual, jk=jk) if readout == 'mean': self.readout = AvgPooling() if readout == 'sum': self.readout = SumPooling() if readout == 'max': self.readout = MaxPooling() self.predict = nn.Linear(hidden_feats, n_tasks)
def __init__(self, args): """model parameters setting Paramters --------- num_layers: int The number of linear layers in the neural network num_mlp_layers: int The number of linear layers in mlps input_dim: int The dimensionality of input features hidden_dim: int The dimensionality of hidden units at ALL layers output_dim: int The number of classes for prediction final_dropout: float dropout ratio on the final linear layer eps: boolean If True, learn epsilon to distinguish center nodes from neighbors If False, aggregate neighbors and center nodes altogether. neighbor_pooling_type: str how to aggregate neighbors (sum, mean, or max) graph_pooling_type: str how to aggregate entire nodes in a graph (sum, mean or max) """ super(GIN, self).__init__() self.args = args missing_keys = list( set( [ "features_num", "num_class", "num_graph_features", "num_layers", "hidden", "dropout", "act", "mlp_layers", "eps", ] ) - set(self.args.keys()) ) if len(missing_keys) > 0: raise Exception("Missing keys: %s." % ",".join(missing_keys)) self.num_graph_features = self.args["num_graph_features"] self.num_layers = self.args["num_layers"] assert self.num_layers > 2, "Number of layers in GIN should not less than 3" if not self.num_layers == len(self.args["hidden"]) + 1: LOGGER.warn("Warning: layer size does not match the length of hidden units") self.eps = True if self.args["eps"]=="True" else False self.num_mlp_layers = self.args["mlp_layers"] input_dim = self.args["features_num"] hidden = self.args["hidden"] neighbor_pooling_type = self.args["neighbor_pooling_type"] graph_pooling_type = self.args["graph_pooling_type"] if self.args["act"] == "leaky_relu": act = LeakyReLU() elif self.args["act"] == "relu": act = ReLU() elif self.args["act"] == "elu": act = ELU() elif self.args["act"] == "tanh": act = Tanh() else: act = ReLU() final_dropout = self.args["dropout"] output_dim = self.args["num_class"] # List of MLPs self.ginlayers = torch.nn.ModuleList() self.batch_norms = torch.nn.ModuleList() for layer in range(self.num_layers - 1): if layer == 0: mlp = MLP(self.num_mlp_layers, input_dim, hidden[layer], hidden[layer]) else: mlp = MLP(self.num_mlp_layers, hidden[layer-1], hidden[layer], hidden[layer]) self.ginlayers.append( GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.eps)) self.batch_norms.append(nn.BatchNorm1d(hidden[layer])) # Linear function for graph poolings of output of each layer # which maps the output of different layers into a prediction score self.linears_prediction = torch.nn.ModuleList() for layer in range(self.num_layers): if layer == 0: self.linears_prediction.append( nn.Linear(input_dim, output_dim)) else: self.linears_prediction.append( nn.Linear(hidden[layer-1], output_dim)) self.drop = nn.Dropout(final_dropout) if graph_pooling_type == 'sum': self.pool = SumPooling() elif graph_pooling_type == 'mean': self.pool = AvgPooling() elif graph_pooling_type == 'max': self.pool = MaxPooling() else: raise NotImplementedError
def __init__( self, num_layers, num_mlp_layers, input_dim, hidden_dim, output_dim, final_dropout, learn_eps, graph_pooling_type, neighbor_pooling_type, use_selayer, ): """model parameters setting Paramters --------- num_layers: int The number of linear layers in the neural network num_mlp_layers: int The number of linear layers in mlps input_dim: int The dimensionality of input features hidden_dim: int The dimensionality of hidden units at ALL layers output_dim: int The number of classes for prediction final_dropout: float dropout ratio on the final linear layer learn_eps: boolean If True, learn epsilon to distinguish center nodes from neighbors If False, aggregate neighbors and center nodes altogether. neighbor_pooling_type: str how to aggregate neighbors (sum, mean, or max) graph_pooling_type: str how to aggregate entire nodes in a graph (sum, mean or max) """ super(UnsupervisedGIN, self).__init__() self.num_layers = num_layers self.learn_eps = learn_eps # List of MLPs self.ginlayers = torch.nn.ModuleList() self.batch_norms = torch.nn.ModuleList() for layer in range(self.num_layers - 1): if layer == 0: mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim, use_selayer) else: mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim, use_selayer) self.ginlayers.append( GINConv( ApplyNodeFunc(mlp, use_selayer), neighbor_pooling_type, 0, self.learn_eps, ) ) self.batch_norms.append( SELayer(hidden_dim, int(np.sqrt(hidden_dim))) if use_selayer else nn.BatchNorm1d(hidden_dim) ) # Linear function for graph poolings of output of each layer # which maps the output of different layers into a prediction score self.linears_prediction = torch.nn.ModuleList() for layer in range(num_layers): if layer == 0: self.linears_prediction.append(nn.Linear(input_dim, output_dim)) else: self.linears_prediction.append(nn.Linear(hidden_dim, output_dim)) self.drop = nn.Dropout(final_dropout) if graph_pooling_type == "sum": self.pool = SumPooling() elif graph_pooling_type == "mean": self.pool = AvgPooling() elif graph_pooling_type == "max": self.pool = MaxPooling() else: raise NotImplementedError
def __init__(self, type='0'): super().__init__() self.pool = AvgPooling() self.type = type
def __init__(self): super().__init__() self.pool = AvgPooling()