Exemple #1
0
    def __init__(self, net_params):
        super().__init__()
        self.n_layers = 2
        self.embedding_h = nn.Linear(net_params.in_dim, net_params.hidden_dim)

        self.ginlayers = torch.nn.ModuleList()
        for layer in range(net_params.L):
            mlp = MLP(net_params.n_mlp_GIN, net_params.hidden_dim,
                      net_params.hidden_dim, net_params.hidden_dim)
            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), net_params.neighbor_aggr_GIN,
                         net_params.dropout, net_params.graph_norm,
                         net_params.batch_norm, net_params.residual, 0,
                         net_params.learn_eps_GIN))
            pass

        # Linear function for graph poolings (readout) of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()
        for layer in range(self.n_layers + 1):
            self.linears_prediction.append(
                nn.Linear(net_params.hidden_dim, net_params.n_classes))
            pass

        if net_params.readout == 'sum':
            self.pool = SumPooling()
        elif net_params.readout == 'mean':
            self.pool = AvgPooling()
        elif net_params.readout == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError

        pass
Exemple #2
0
    def __init__(self, g, in_feats, n_hidden, n_classes, n_layers, activation,
                 pooling, dropout):
        super(Classifier, self).__init__()
        self.g = g
        self.layers = nn.ModuleList()
        # input layer
        self.layers.append(
            GraphConv(in_feats,
                      n_hidden,
                      activation=activation,
                      allow_zero_in_degree=True,
                      norm='both'))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(
                GraphConv(n_hidden,
                          n_hidden,
                          activation=activation,
                          allow_zero_in_degree=True,
                          norm='both'))
        # output layer
        self.dropout = nn.Dropout(p=dropout)

        if pooling == 'sum':
            self.pool = SumPooling()
        elif pooling == 'mean':
            self.pool = AvgPooling()
        elif pooling == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError

        self.classify = nn.Linear(n_hidden, n_classes)
Exemple #3
0
    def __init__(self, num_layers, num_mlp_layers, input_dim, hidden_dim,
                 output_dim, final_dropout, learn_eps, graph_pooling_type,
                 neighbor_pooling_type, batch_size, rank_dim=32):
        super(GIN, self).__init__()
        self.num_layers = num_layers
        self.learn_eps = learn_eps
        self.graph_pooling_type = graph_pooling_type
        self.batch_size = batch_size

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()
        self.cplayers = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim)
            else:
                mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
            
            if graph_pooling_type == 'cp':    
                self.ginlayers.append(
                    GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps, hidden_dim, rank_dim, output_dim))
            else:
                self.ginlayers.append(
                    GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.learn_eps, hidden_dim, rank_dim, output_dim))
            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(num_layers):
            if layer == 0:
                self.linears_prediction.append(
                    nn.Linear(hidden_dim, output_dim))
                self.cplayers.append(graph_cp_pooling(input_dim, hidden_dim, rank_dim, init=True))
            else:
                self.linears_prediction.append(
                    nn.Linear(hidden_dim, output_dim))
                self.cplayers.append(graph_cp_pooling(hidden_dim, output_dim, rank_dim, init=False))

            

        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == 'sum':
            self.pool = SumPooling()
        elif graph_pooling_type == 'mean':
            self.pool = AvgPooling()
        elif graph_pooling_type == 'max':
            self.pool = MaxPooling()
        elif graph_pooling_type == 'cp':
            self.pool = SumPooling()
        else:
            raise NotImplementedError
Exemple #4
0
    def __init__(self,
                 dataset,
                 node_feat_dim,
                 edge_feat_dim,
                 hid_dim,
                 out_dim,
                 num_layers,
                 dropout=0.,
                 norm='batch',
                 pooling='mean',
                 beta=1.0,
                 learn_beta=False,
                 aggr='softmax',
                 mlp_layers=1):
        super(DeeperGCN, self).__init__()
        
        self.dataset = dataset
        self.num_layers = num_layers
        self.dropout = dropout
        self.gcns = nn.ModuleList()
        self.norms = nn.ModuleList()

        for i in range(self.num_layers):
            conv = GENConv(dataset=dataset,
                           in_dim=hid_dim,
                           out_dim=hid_dim,
                           aggregator=aggr,
                           beta=beta,
                           learn_beta=learn_beta,
                           mlp_layers=mlp_layers,
                           norm=norm)
            
            self.gcns.append(conv)
            self.norms.append(norm_layer(norm, hid_dim))

        if self.dataset == 'ogbg-molhiv':
            self.node_encoder = AtomEncoder(hid_dim)
        elif self.dataset == 'ogbg-ppa':
            self.node_encoder = nn.Linear(node_feat_dim, hid_dim)
            self.edge_encoder = nn.Linear(edge_feat_dim, hid_dim)
        else:
            raise ValueError(f'Dataset {dataset} is not supported.')

        if pooling == 'sum':
            self.pooling = SumPooling()
        elif pooling == 'mean':
            self.pooling = AvgPooling()
        elif pooling == 'max':
            self.pooling = MaxPooling()
        else:
            raise NotImplementedError(f'{pooling} is not supported.')
        
        self.output = nn.Linear(hid_dim, out_dim)
 def __init__(self, n_feats_fc, in_feats_g, Dropout):
     super(Net, self).__init__()
     self.knn = dgl.nn.pytorch.factory.KNNGraph(8)
     self.edge1 = EdgeConv(50, 100)
     self.edge2 = EdgeConv(100, 200)
     self.edge3 = EdgeConv(200, 600)
     self.Dropout = nn.Dropout(Dropout)
     self.pooling = MaxPooling()
     self.fc1 = nn.Linear(600, 300)
     self.fc2 = nn.Linear(300, 300)
     self.fc3 = nn.Linear(300, 100)
     self.fc4 = nn.Linear(100, 50)
     self.fc_out = nn.Linear(50, 9)
Exemple #6
0
    def __init__(self, k, in_feats, hiddens, out_feats):
        super(ChebNet, self).__init__()
        self.pool = nn.MaxPool1d(2)
        self.layers = nn.ModuleList()
        self.readout = MaxPooling()

        # Input layer
        self.layers.append(ChebConv(in_feats, hiddens[0], k))

        for i in range(1, len(hiddens)):
            self.layers.append(ChebConv(hiddens[i - 1], hiddens[i], k))

        self.cls = nn.Sequential(nn.Linear(hiddens[-1], out_feats),
                                 nn.LogSoftmax())
Exemple #7
0
    def __init__(self,
                 num_node_emb_list,
                 num_edge_emb_list,
                 num_layers=5,
                 emb_dim=300,
                 JK='last',
                 dropout=0.5,
                 readout='mean',
                 n_tasks=1):
        super(GINPredictor, self).__init__()

        if num_layers < 2:
            raise ValueError('Number of GNN layers must be greater '
                             'than 1, got {:d}'.format(num_layers))

        self.gnn = GIN(num_node_emb_list=num_node_emb_list,
                       num_edge_emb_list=num_edge_emb_list,
                       num_layers=num_layers,
                       emb_dim=emb_dim,
                       JK=JK,
                       dropout=dropout)

        if readout == 'sum':
            self.readout = SumPooling()
        elif readout == 'mean':
            self.readout = AvgPooling()
        elif readout == 'max':
            self.readout = MaxPooling()
        elif readout == 'attention':
            if JK == 'concat':
                self.readout = GlobalAttentionPooling(
                    gate_nn=nn.Linear((num_layers + 1) * emb_dim, 1))
            else:
                self.readout = GlobalAttentionPooling(
                    gate_nn=nn.Linear(emb_dim, 1))
        elif readout == 'set2set':
            self.readout = Set2Set()
        else:
            raise ValueError(
                "Expect readout to be 'sum', 'mean', "
                "'max', 'attention' or 'set2set', got {}".format(readout))

        if JK == 'concat':
            self.predict = nn.Linear((num_layers + 1) * emb_dim, n_tasks)
        else:
            self.predict = nn.Linear(emb_dim, n_tasks)
    def __init__(self, net_params):
        super().__init__()
        num_node_type = net_params['num_node_type']
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        readout = net_params['readout']  # this is graph_pooling_type
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.pos_enc = net_params['pos_enc']
        if self.pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        else:
            in_dim = 1
            self.embedding_h = nn.Embedding(in_dim, hidden_dim)

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()

        for layer in range(self.n_layers):
            mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)

            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                         batch_norm, residual, 0, learn_eps))

        # Linear function for graph poolings (readout) of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(self.n_layers + 1):
            self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))

        if readout == 'sum':
            self.pool = SumPooling()
        elif readout == 'mean':
            self.pool = AvgPooling()
        elif readout == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError
 def __init__(self, input_dimensions: _typing.Sequence[int],
              output_dimension: int, dropout: float,
              graph_pooling_type: str):
     super(_JKSumPoolDecoder, self).__init__()
     self._linear_transforms: torch.nn.ModuleList = torch.nn.ModuleList()
     for input_dimension in input_dimensions:
         self._linear_transforms.append(
             torch.nn.Linear(input_dimension, output_dimension))
     self._dropout: torch.nn.Dropout = torch.nn.Dropout(dropout)
     if not isinstance(graph_pooling_type, str):
         raise TypeError
     elif graph_pooling_type.lower() == 'sum':
         self.__pool = SumPooling()
     elif graph_pooling_type.lower() == 'mean':
         self.__pool = AvgPooling()
     elif graph_pooling_type.lower() == 'max':
         self.__pool = MaxPooling()
     else:
         raise NotImplementedError
    def __init__(self,
                 in_edge_feats,
                 num_node_types=1,
                 hidden_feats=300,
                 n_layers=5,
                 n_tasks=1,
                 batchnorm=True,
                 activation=F.relu,
                 dropout=0.,
                 gnn_type='gcn',
                 virtual_node=True,
                 residual=False,
                 jk=False,
                 readout='mean'):
        super(GNNOGBPredictor, self).__init__()

        assert gnn_type in ['gcn', 'gin'], \
            "Expect gnn_type to be 'gcn' or 'gin', got {}".format(gnn_type)
        assert readout in ['mean', 'sum', 'max'], \
            "Expect readout to be in ['mean', 'sum', 'max'], got {}".format(readout)

        self.gnn = GNNOGB(in_edge_feats=in_edge_feats,
                          num_node_types=num_node_types,
                          hidden_feats=hidden_feats,
                          n_layers=n_layers,
                          batchnorm=batchnorm,
                          activation=activation,
                          dropout=dropout,
                          gnn_type=gnn_type,
                          virtual_node=virtual_node,
                          residual=residual,
                          jk=jk)

        if readout == 'mean':
            self.readout = AvgPooling()
        if readout == 'sum':
            self.readout = SumPooling()
        if readout == 'max':
            self.readout = MaxPooling()

        self.predict = nn.Linear(hidden_feats, n_tasks)
Exemple #11
0
    def __init__(
        self,
        num_layers,
        num_mlp_layers,
        input_dim,
        hidden_dim,
        output_dim,
        final_dropout,
        learn_eps,
        graph_pooling_type,
        neighbor_pooling_type,
        use_selayer,
    ):
        """model parameters setting

        Paramters
        ---------
        num_layers: int
            The number of linear layers in the neural network
        num_mlp_layers: int
            The number of linear layers in mlps
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        final_dropout: float
            dropout ratio on the final linear layer
        learn_eps: boolean
            If True, learn epsilon to distinguish center nodes from neighbors
            If False, aggregate neighbors and center nodes altogether.
        neighbor_pooling_type: str
            how to aggregate neighbors (sum, mean, or max)
        graph_pooling_type: str
            how to aggregate entire nodes in a graph (sum, mean or max)

        """
        super(UnsupervisedGIN, self).__init__()
        self.num_layers = num_layers
        self.learn_eps = learn_eps

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                mlp = MLP(num_mlp_layers, input_dim, hidden_dim, hidden_dim, use_selayer)
            else:
                mlp = MLP(num_mlp_layers, hidden_dim, hidden_dim, hidden_dim, use_selayer)

            self.ginlayers.append(
                GINConv(
                    ApplyNodeFunc(mlp, use_selayer),
                    neighbor_pooling_type,
                    0,
                    self.learn_eps,
                )
            )
            self.batch_norms.append(
                SELayer(hidden_dim, int(np.sqrt(hidden_dim))) if use_selayer else nn.BatchNorm1d(hidden_dim)
            )

        # Linear function for graph poolings of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(num_layers):
            if layer == 0:
                self.linears_prediction.append(nn.Linear(input_dim, output_dim))
            else:
                self.linears_prediction.append(nn.Linear(hidden_dim, output_dim))

        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == "sum":
            self.pool = SumPooling()
        elif graph_pooling_type == "mean":
            self.pool = AvgPooling()
        elif graph_pooling_type == "max":
            self.pool = MaxPooling()
        else:
            raise NotImplementedError
 def __init__(self):
     super().__init__()
     self.pool = MaxPooling()
Exemple #13
0
    def __init__(self, args):
        """model parameters setting

        Paramters
        ---------
        num_layers: int
            The number of linear layers in the neural network
        num_mlp_layers: int
            The number of linear layers in mlps
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        final_dropout: float
            dropout ratio on the final linear layer
        eps: boolean
            If True, learn epsilon to distinguish center nodes from neighbors
            If False, aggregate neighbors and center nodes altogether.
        neighbor_pooling_type: str
            how to aggregate neighbors (sum, mean, or max)
        graph_pooling_type: str
            how to aggregate entire nodes in a graph (sum, mean or max)

        """
        super(GIN, self).__init__()
        self.args = args

        missing_keys = list(
            set(
                [
                    "features_num",
                    "num_class",
                    "num_graph_features",
                    "num_layers",
                    "hidden",
                    "dropout",
                    "act",
                    "mlp_layers",
                    "eps",
                ]
            )
            - set(self.args.keys())
        )
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ",".join(missing_keys))

        self.num_graph_features = self.args["num_graph_features"]
        self.num_layers = self.args["num_layers"]
        assert self.num_layers > 2, "Number of layers in GIN should not less than 3"
        if not self.num_layers == len(self.args["hidden"]) + 1:
            LOGGER.warn("Warning: layer size does not match the length of hidden units")

        self.eps = True if self.args["eps"]=="True" else False
        self.num_mlp_layers = self.args["mlp_layers"]
        input_dim = self.args["features_num"]
        hidden = self.args["hidden"]
        neighbor_pooling_type = self.args["neighbor_pooling_type"]
        graph_pooling_type = self.args["graph_pooling_type"]
        if self.args["act"] == "leaky_relu":
            act = LeakyReLU()
        elif self.args["act"] == "relu":
            act = ReLU()
        elif self.args["act"] == "elu":
            act = ELU()
        elif self.args["act"] == "tanh":
            act = Tanh()
        else:
            act = ReLU()
        final_dropout = self.args["dropout"]
        output_dim = self.args["num_class"]

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                mlp = MLP(self.num_mlp_layers, input_dim, hidden[layer], hidden[layer])
            else:
                mlp = MLP(self.num_mlp_layers, hidden[layer-1], hidden[layer], hidden[layer])

            self.ginlayers.append(
                GINConv(ApplyNodeFunc(mlp), neighbor_pooling_type, 0, self.eps))
            self.batch_norms.append(nn.BatchNorm1d(hidden[layer]))

        # Linear function for graph poolings of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(self.num_layers):
            if layer == 0:
                self.linears_prediction.append(
                    nn.Linear(input_dim, output_dim))
            else:
                self.linears_prediction.append(
                    nn.Linear(hidden[layer-1], output_dim))

        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == 'sum':
            self.pool = SumPooling()
        elif graph_pooling_type == 'mean':
            self.pool = AvgPooling()
        elif graph_pooling_type == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError
Exemple #14
0
 def __init__(self):
     super(MaxReadout, self).__init__()
     self.max_pooler = MaxPooling()