예제 #1
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        graph_norm = net_params['graph_norm']
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()

        self.embedding_h = nn.Linear(in_dim, hidden_dim)

        # Input layer
        mlp = MLP(1, in_dim, hidden_dim, hidden_dim)
        self.ginlayers.append(
            GINLayer(ApplyNodeFunc(mlp),
                     neighbor_aggr_type,
                     dropout,
                     graph_norm,
                     batch_norm,
                     residual,
                     0,
                     learn_eps,
                     activation=F.relu))

        # Hidden layers
        for layer in range(self.n_layers - 1):
            mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)

            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp),
                         neighbor_aggr_type,
                         dropout,
                         graph_norm,
                         batch_norm,
                         residual,
                         0,
                         learn_eps,
                         activation=F.relu))

        # Output layer
        mlp = MLP(1, hidden_dim, n_classes, n_classes)
        self.ginlayers.append(
            GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                     graph_norm, batch_norm, residual, 0, learn_eps))
예제 #2
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        readout = net_params['readout']  # this is graph_pooling_type
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.n_classes = n_classes
        self.device = net_params['device']

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()

        self.embedding_h = nn.Embedding(in_dim, hidden_dim)

        for layer in range(self.n_layers):
            mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)

            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                         batch_norm, residual, 0, learn_eps))

        # Linear function for output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(self.n_layers + 1):
            self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))
예제 #3
0
    def __init__(self, net_params):
        super().__init__()
        self.n_layers = 2
        self.embedding_h = nn.Linear(net_params.in_dim, net_params.hidden_dim)

        self.ginlayers = torch.nn.ModuleList()
        for layer in range(net_params.L):
            mlp = MLP(net_params.n_mlp_GIN, net_params.hidden_dim,
                      net_params.hidden_dim, net_params.hidden_dim)
            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), net_params.neighbor_aggr_GIN,
                         net_params.dropout, net_params.graph_norm,
                         net_params.batch_norm, net_params.residual, 0,
                         net_params.learn_eps_GIN))
            pass

        # Linear function for graph poolings (readout) of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()
        for layer in range(self.n_layers + 1):
            self.linears_prediction.append(
                nn.Linear(net_params.hidden_dim, net_params.n_classes))
            pass

        if net_params.readout == 'sum':
            self.pool = SumPooling()
        elif net_params.readout == 'mean':
            self.pool = AvgPooling()
        elif net_params.readout == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError

        pass
예제 #4
0
파일: gin_net.py 프로젝트: yuehaowang/SoGCN
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        readout = net_params['readout']  # this is graph_pooling_type
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        activation_name = net_params['activation']

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()

        self.embedding_h = nn.Linear(in_dim, hidden_dim)

        for layer in range(self.n_layers - 1):
            mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim,
                      activations[activation_name])

            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                         batch_norm, residual, 0, learn_eps,
                         activations[activation_name]))

        self.ginlayers.append(
            GINLayer(
                ApplyNodeFunc(
                    MLP(n_mlp_layers, hidden_dim, hidden_dim, out_dim,
                        activations[activation_name])), neighbor_aggr_type,
                dropout, batch_norm, residual, 0, learn_eps,
                activations[activation_name]))
예제 #5
0
    def __init__(self, net_params):
        super().__init__()
        num_atom_type = net_params['num_atom_type']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        self.readout = net_params['readout']  # this is graph_pooling_type
        self.graph_norm = net_params['graph_norm']
        self.batch_norm = net_params['batch_norm']
        self.layer_norm = net_params['layer_norm']
        self.residual = net_params['residual']

        self.task = net_params['task']
        if self.task == 'classification':
            self.num_classes = net_params['num_classes']
        else:
            self.num_classes = 1

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()

        self.embedding_lin = nn.Linear(num_atom_type, hidden_dim, False)

        for layer in range(self.n_layers):
            mlp = MLP(hidden_dim, hidden_dim, hidden_dim, self.batch_norm,
                      self.layer_norm)
            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                         self.graph_norm, self.batch_norm, self.layer_norm, 0,
                         learn_eps))

        # Linear function for graph poolings (readout) of output of each layer
        # which maps the output of different layers into a prediction score

        self.linear_ro = nn.Linear(hidden_dim, out_dim, bias=False)
        self.linear_prediction = nn.Linear(out_dim,
                                           self.num_classes,
                                           bias=True)

        #	additional parameters for gated residual connection
        if self.residual == 'gated':
            self.W_g = nn.Linear(2 * hidden_dim, hidden_dim, False)
예제 #6
0
    def __init__(self, net_params):
        super().__init__()
        num_node_type = net_params['num_node_type']
        hidden_dim = net_params['hidden_dim']
        n_classes = net_params['n_classes']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        readout = net_params['readout']  # this is graph_pooling_type
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.pos_enc = net_params['pos_enc']
        if self.pos_enc:
            pos_enc_dim = net_params['pos_enc_dim']
            self.embedding_pos_enc = nn.Linear(pos_enc_dim, hidden_dim)
        else:
            in_dim = 1
            self.embedding_h = nn.Embedding(in_dim, hidden_dim)

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()

        for layer in range(self.n_layers):
            mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)

            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                         batch_norm, residual, 0, learn_eps))

        # Linear function for graph poolings (readout) of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        for layer in range(self.n_layers + 1):
            self.linears_prediction.append(nn.Linear(hidden_dim, n_classes))

        if readout == 'sum':
            self.pool = SumPooling()
        elif readout == 'mean':
            self.pool = AvgPooling()
        elif readout == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError
예제 #7
0
    def __init__(self, net_params):
        super().__init__()
        in_dim = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        dropout = net_params['dropout']
        self.n_layers = net_params['L']
        n_mlp_layers = net_params['n_mlp_GIN']  # GIN
        learn_eps = net_params['learn_eps_GIN']  # GIN
        neighbor_aggr_type = net_params['neighbor_aggr_GIN']  # GIN
        batch_norm = net_params['batch_norm']
        residual = net_params['residual']
        self.device = net_params['device']

        self.embedding_h = nn.Linear(in_dim, hidden_dim)

        # List of MLPs
        self.ginlayers = torch.nn.ModuleList()
        for layer in range(self.n_layers):
            mlp = MLP(n_mlp_layers, hidden_dim, hidden_dim, hidden_dim)
            self.ginlayers.append(
                GINLayer(ApplyNodeFunc(mlp), neighbor_aggr_type, dropout,
                         batch_norm, residual, 0, learn_eps))

        self.MLP_layer = MLPReadout(2 * hidden_dim, 1)