Esempio n. 1
0
    def __init__(self, num_features, hidden_dim):
        super(GIN, self).__init__()

        nn1 = Sequential(Linear(num_features, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(hidden_dim)

        nn2 = Sequential(Linear(hidden_dim, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(hidden_dim)

        nn3 = Sequential(Linear(hidden_dim, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(hidden_dim)

        nn4 = Sequential(Linear(hidden_dim, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(hidden_dim)

        nn5 = Sequential(Linear(hidden_dim, hidden_dim), ReLU(), Linear(hidden_dim, hidden_dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(hidden_dim)

        self.fc1 = Linear(hidden_dim, hidden_dim)
        self.fc2 = Linear(hidden_dim, 1)
Esempio n. 2
0
    def __init__(self, num_features, dim):
        super(Net, self).__init__()
        '''
        num_features = dataset.num_features
        dim = 32
        '''
        nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim)

        self.fc1 = Linear(dim, dim)
        self.fc2 = Linear(
            dim, 2
        )  # binary classification, softmax is used instead of sigmoid here.
Esempio n. 3
0
    def __init__(self, num_features, embedding_size, hidden_size=16):
        super(Actor, self).__init__()

        self.conv1 = GINConv(
            nn.Sequential(nn.Linear(num_features, embedding_size), nn.ReLU(),
                          nn.Linear(embedding_size, embedding_size)))
        self.conv2 = GINConv(
            nn.Sequential(nn.Linear(embedding_size, embedding_size), nn.ReLU(),
                          nn.Linear(embedding_size, embedding_size)))
        self.conv3 = GINConv(
            nn.Sequential(nn.Linear(embedding_size, embedding_size), nn.ReLU(),
                          nn.Linear(embedding_size, embedding_size)))

        #Feed mlp_A embeddings of all non-scaffold nodes
        #Output unormalized distribution over non-scaffold nodes
        self.mlp_A = nn.Sequential(nn.Linear(embedding_size, hidden_size),
                                   nn.ReLU(), nn.Linear(hidden_size, 1))

        #Feed mlp_B embeddings of all nodes, including scaffold nodes.
        #These embeddings are concatenated with embedding of node selected from mlp_A
        #Output unormalized distribution over all nodes
        self.mlp_B = nn.Sequential(nn.Linear(embedding_size * 2, hidden_size),
                                   nn.ReLU(), nn.Linear(hidden_size, 1))

        #Feed mlp_C a graph embedding, which is AGG() of all node embeddings
        #Output unormalized distribution over {dont_stop, stop} actions.
        self.mlp_C = nn.Sequential(nn.Linear(embedding_size, hidden_size),
                                   nn.ReLU(), nn.Linear(hidden_size, 2))

        self.num_features = num_features
Esempio n. 4
0
 def __init__(self, dataset, num_layers, hidden, mode='cat'):
     super(GIN0WithJK, self).__init__()
     self.conv1 = GINConv(Sequential(
         Linear(dataset.num_features, hidden),
         ReLU(),
         Linear(hidden, hidden),
         ReLU(),
         BN(hidden),
     ),
                          train_eps=False)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(
             GINConv(Sequential(
                 Linear(hidden, hidden),
                 ReLU(),
                 Linear(hidden, hidden),
                 ReLU(),
                 BN(hidden),
             ),
                     train_eps=False))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = Linear(num_layers * hidden, hidden)
     else:
         self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, dataset.num_classes)
Esempio n. 5
0
    def __init__(self, args):
        super(GIN, self).__init__()
        self.args = args
        self.num_layer = int(self.args["num_layers"])
        assert self.num_layer > 2, "Number of layers in GIN should not less than 3"

        missing_keys = list(
            set([
                "features_num", "num_class", "num_graph_features",
                "num_layers", "hidden", "dropout", "act", "mlp_layers", "eps"
            ]) - set(self.args.keys()))
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ','.join(missing_keys))
        if not self.num_layer == len(self.args['hidden']) + 1:
            LOGGER.warn(
                'Warning: layer size does not match the length of hidden units'
            )
        self.num_graph_features = self.args['num_graph_features']

        if self.args["act"] == "leaky_relu":
            act = LeakyReLU()
        elif self.args["act"] == "relu":
            act = ReLU()
        elif self.args["act"] == "elu":
            act = ELU()
        elif self.args["act"] == "tanh":
            act = Tanh()
        else:
            act = ReLU()

        train_eps = True if self.args["eps"] == "True" else False

        self.convs = torch.nn.ModuleList()
        self.bns = torch.nn.ModuleList()

        nn = [Linear(self.args["features_num"], self.args["hidden"][0])]
        for _ in range(self.args["mlp_layers"] - 1):
            nn.append(act)
            nn.append(Linear(self.args["hidden"][0], self.args["hidden"][0]))
        # nn.append(BatchNorm1d(self.args['hidden'][0]))
        self.convs.append(GINConv(Sequential(*nn), train_eps=train_eps))
        self.bns.append(BatchNorm1d(self.args["hidden"][0]))

        for i in range(self.num_layer - 3):
            nn = [Linear(self.args["hidden"][i], self.args["hidden"][i + 1])]
            for _ in range(self.args["mlp_layers"] - 1):
                nn.append(act)
                nn.append(
                    Linear(self.args["hidden"][i + 1],
                           self.args["hidden"][i + 1]))
            # nn.append(BatchNorm1d(self.args['hidden'][i+1]))
            self.convs.append(GINConv(Sequential(*nn), train_eps=train_eps))
            self.bns.append(BatchNorm1d(self.args["hidden"][i + 1]))

        self.fc1 = Linear(
            self.args["hidden"][self.num_layer - 3] + self.num_graph_features,
            self.args["hidden"][self.num_layer - 2],
        )
        self.fc2 = Linear(self.args["hidden"][self.num_layer - 2],
                          self.args["num_class"])
    def __init__(self, writer=None, dropout=0.0):
        super(CD_GCN_Net, self).__init__()

        self.writer = writer

        self.in_dim = 1
        self.hidden_dim = 10
        self.out_dim = 4

        # self.conv11 = GCNConv(self.in_dim, self.hidden_dim)
        # self.conv12 = GCNConv(self.hidden_dim, self.hidden_dim)
        # self.conv13 = GCNConv(self.hidden_dim, self.hidden_dim)

        self.conv11 = GINConv(
            nn.Sequential(nn.Linear(self.in_dim, self.hidden_dim),
                          nn.Linear(self.hidden_dim, self.hidden_dim),
                          nn.BatchNorm1d(self.hidden_dim)))

        self.conv12 = GINConv(
            nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim),
                          nn.Linear(self.hidden_dim, self.hidden_dim),
                          nn.BatchNorm1d(self.hidden_dim)))

        self.conv13 = GINConv(
            nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim),
                          nn.Linear(self.hidden_dim, self.hidden_dim),
                          nn.BatchNorm1d(self.hidden_dim)))

        self.final_fc = nn.Sequential(
            nn.Dropout(dropout), nn.Linear(3 * self.hidden_dim, self.out_dim),
            nn.Softmax(dim=-1))
Esempio n. 7
0
    def __init__(self):
        super(Net, self).__init__()

        num_features = dataset.num_features
        dim = args.hidden

        nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim),
                         ReLU(), torch.nn.BatchNorm1d(dim))
        self.conv1 = GINConv(nn1)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv2 = GINConv(nn2)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv3 = GINConv(nn3)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv4 = GINConv(nn4)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv5 = GINConv(nn5)

        self.fc1 = Sequential(Linear(num_features, dim), ReLU(),
                              torch.nn.BatchNorm1d(dim))
        self.fc2 = Sequential(Linear(dim, dim), ReLU(),
                              torch.nn.BatchNorm1d(dim))

        self.lin = Linear(dim, dataset.num_classes)
    def __init__(self, input_dim=0, dim=32, pretr_out_dim=100, out_dim=2):
        super(GINNet, self).__init__()

        num_features = input_dim
        #dim = dim

        nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim)

        self.fc1 = Linear(dim, dim)
        self.fc2 = Linear(dim, out_dim)
        self.fc3 = Linear(dim, pretr_out_dim)
Esempio n. 9
0
    def __init__(self, num_features, dim_node, dim_graph, config):
        """ GIN model from PyG examples. Output distance matrix.
        https://github.com/rusty1s/pytorch_geometric/blob/master/examples/mutag_gin.py
        """
        super().__init__()

        dim = config["hidden_units"]

        nn1 = Sequential(Linear(num_features, dim), ELU(), Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ELU(), Linear(dim, dim_node))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim_node)

        self.fc1 = Linear(dim_node, dim_node)
        self.fc2 = Linear(dim_node, dim_graph)
Esempio n. 10
0
    def __init__(self, in_channels, dim, out_channels):
        super(Net, self).__init__()

        self.conv1 = GINConv(
            Sequential(Linear(in_channels, dim), BatchNorm1d(dim), ReLU(),
                       Linear(dim, dim), ReLU()))

        self.conv2 = GINConv(
            Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(),
                       Linear(dim, dim), ReLU()))

        self.conv3 = GINConv(
            Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(),
                       Linear(dim, dim), ReLU()))

        self.conv4 = GINConv(
            Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(),
                       Linear(dim, dim), ReLU()))

        self.conv5 = GINConv(
            Sequential(Linear(dim, dim), BatchNorm1d(dim), ReLU(),
                       Linear(dim, dim), ReLU()))

        self.lin1 = Linear(dim, dim)
        self.lin2 = Linear(dim, out_channels)
Esempio n. 11
0
    def __init__(self, input_dim, hidden_dim, num_classes):
        super(Net3, self).__init__()

        self.gc1 = GINConv(
            MLP(input_dim=input_dim,
                hidden_dim=hidden_dim,
                output_dim=hidden_dim,
                enhance=True))
        self.gc2 = GINConv(
            MLP(input_dim=hidden_dim,
                hidden_dim=hidden_dim,
                output_dim=hidden_dim,
                enhance=True))
        self.pool1 = MEWISPool(hidden_dim=hidden_dim)
        self.gc3 = GINConv(
            MLP(input_dim=hidden_dim,
                hidden_dim=hidden_dim,
                output_dim=hidden_dim,
                enhance=True))
        self.gc4 = GINConv(
            MLP(input_dim=hidden_dim,
                hidden_dim=hidden_dim,
                output_dim=hidden_dim,
                enhance=True))
        self.pool2 = MEWISPool(hidden_dim=hidden_dim)
        self.gc5 = GINConv(
            MLP(input_dim=hidden_dim,
                hidden_dim=hidden_dim,
                output_dim=hidden_dim,
                enhance=True))
        self.fc1 = nn.Linear(in_features=hidden_dim, out_features=hidden_dim)
        self.fc2 = nn.Linear(in_features=hidden_dim, out_features=num_classes)
Esempio n. 12
0
 def __init__(self,
              num_node_features=100,
              num_class=18,
              hidden=16,
              dropout_rate=0.5,
              num_layers=2,
              eps=0,
              train_eps=True):
     super(GIN, self).__init__()
     self.first_conv = GINConv(
         Sequential(Linear(num_node_features, hidden), ReLU(),
                    Linear(hidden, hidden)), eps, train_eps)
     self.first_bn = BatchNorm1d(hidden)
     self.nns = torch.nn.ModuleList()
     self.convs = torch.nn.ModuleList()
     self.bns = torch.nn.ModuleList()
     for i in range(num_layers):
         self.nns.append(
             Sequential(Linear(hidden, hidden), ReLU(),
                        Linear(hidden, hidden)))
         self.bns.append(BatchNorm1d(hidden))
         self.convs.append(GINConv(self.nns[i], eps, train_eps))
     self.lin1 = Linear(hidden, hidden)
     self.lin2 = Linear(hidden, num_class)
     self.dropout_rate = dropout_rate
Esempio n. 13
0
    def __init__(self, n_output=1,num_features_xd=78, num_features_xt=25,
                 n_filters=32, embed_dim=128, output_dim=128, dropout=0.2):

        super(GINProtEmbDouble, self).__init__()

        dim = 32
        self.dropout = nn.Dropout(dropout)
        self.relu = nn.ReLU()
        self.n_output = n_output
        # convolution layers
        nn1 = Sequential(Linear(num_features_xd, dim), ReLU(), Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim)

        nn6 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv6 = GINConv(nn6)
        self.bn6 = torch.nn.BatchNorm1d(dim)

        nn7 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv7 = GINConv(nn7)
        self.bn7 = torch.nn.BatchNorm1d(dim)

        nn8 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv8 = GINConv(nn8)
        self.bn8 = torch.nn.BatchNorm1d(dim)

        nn9 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv9 = GINConv(nn9)
        self.bn9 = torch.nn.BatchNorm1d(dim)

        nn10 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv10 = GINConv(nn10)
        self.bn10 = torch.nn.BatchNorm1d(dim)

        self.fc1_xd = Linear(dim, output_dim)

        # 1D convolution on protein sequence
        self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
        self.fc1_xt = nn.Linear(128 * 1000, output_dim)

        # combined layers
        self.fc1 = nn.Linear(256, 1024)
        self.fc2 = nn.Linear(1024, 256)
        self.out = nn.Linear(256, self.n_output)        # n_output = 1 for regression task
Esempio n. 14
0
        def __init__(self, edge_index, graph_features):
            super(Net, self).__init__()
            self.edge_index = edge_index
            self.graph_features = graph_features

            num_features = x_feat.shape[1]
            dim = 32

            nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
            self.conv1 = GINConv(nn1)
            self.bn1 = torch.nn.BatchNorm1d(dim)

            nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
            self.conv2 = GINConv(nn2)
            self.bn2 = torch.nn.BatchNorm1d(dim)

            nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
            self.conv3 = GINConv(nn3)
            self.bn3 = torch.nn.BatchNorm1d(dim)

            nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
            self.conv4 = GINConv(nn4)
            self.bn4 = torch.nn.BatchNorm1d(dim)

            nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
            self.conv5 = GINConv(nn5)
            self.bn5 = torch.nn.BatchNorm1d(dim)

            self.fc1 = Linear(dim, dim)
            self.fc2 = Linear(dim, 1)
Esempio n. 15
0
    def __init__(self, args):
        super(GIN, self).__init__()

        num_node_features = args.get("num_node_features")
        num_hidden = args.get("num_hidden")
        num_classes = args.get("num_classes")
        self.dropout = args.get("dropout", 0.0)

        nn1 = Sequential(Linear(num_node_features, num_hidden), LeakyReLU(),
                         Linear(num_hidden, num_hidden))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(num_hidden)

        nn2 = Sequential(Linear(num_hidden, num_hidden), LeakyReLU(),
                         Linear(num_hidden, num_hidden))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(num_hidden)

        self.fc1 = Linear(num_hidden, num_hidden)
        self.fc2 = Linear(num_hidden, num_classes)

        self.graph_embedding_function = args.get("graph_embedding_function",
                                                 None)

        self.reset_parameters()
Esempio n. 16
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 dropout=0.5):
        super(GIN, self).__init__()

        self.convs = torch.nn.ModuleList()

        # input layer
        self.convs.append(
            GINConv(Linear(in_channels, hidden_channels), train_eps=True))

        # hidden layers
        for _ in range(num_layers - 2):
            self.convs.append(
                GINConv(Linear(hidden_channels, hidden_channels),
                        train_eps=True))

        # output layer
        self.convs.append(
            GINConv(Linear(hidden_channels, out_channels), train_eps=True))

        self.dropout = dropout
Esempio n. 17
0
    def __init__(self):
        super(Net, self).__init__()

        num_features = dataset.num_features
        dim = 64

        nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim),
                         ReLU(), torch.nn.BatchNorm1d(dim))
        self.conv1 = GINConv(nn1)
        self.fc1 = Linear(dim, dim)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv2 = GINConv(nn2)
        self.fc2 = Linear(dim, dim)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv3 = GINConv(nn3)
        self.fc3 = Linear(dim, dim)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv4 = GINConv(nn4)
        self.fc4 = Linear(dim, dim)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim), ReLU(),
                         torch.nn.BatchNorm1d(dim))
        self.conv5 = GINConv(nn5)
        self.fc5 = Linear(dim, dim)

        self.lin2 = Linear(dim, 1)
Esempio n. 18
0
    def __init__(self, num_features, num_classes, dim=32):
        super(Net, self).__init__()

        nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim)

        self.fc1 = Linear(dim, dim)
        self.fc2 = Linear(dim, num_classes)
Esempio n. 19
0
    def get_fc_kv(self, dim_K, dim_V, conv):

        if conv == 'GCN':

            fc_k = GCNConv(dim_K, dim_V)
            fc_v = GCNConv(dim_K, dim_V)

        elif conv == 'GIN':

            fc_k = GINConv(nn.Sequential(
                nn.Linear(dim_K, dim_K),
                nn.ReLU(),
                nn.Linear(dim_K, dim_V),
                nn.ReLU(),
                nn.BatchNorm1d(dim_V),
            ),
                           train_eps=False)

            fc_v = GINConv(nn.Sequential(
                nn.Linear(dim_K, dim_K),
                nn.ReLU(),
                nn.Linear(dim_K, dim_V),
                nn.ReLU(),
                nn.BatchNorm1d(dim_V),
            ),
                           train_eps=False)

        else:

            fc_k = nn.Linear(dim_K, dim_V)
            fc_v = nn.Linear(dim_K, dim_V)

        return fc_k, fc_v
Esempio n. 20
0
    def __init__(self, nfeat=12, nclass=6):
        super(GINWOBN, self).__init__()

        dim = 128

        nn1 = nn.Sequential(nn.Linear(nfeat, dim), nn.ReLU(),
                            nn.Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = nn.Sequential(nn.Linear(dim, dim), nn.ReLU(),
                            nn.Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = nn.Sequential(nn.Linear(dim, dim), nn.ReLU(),
                            nn.Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = nn.Sequential(nn.Linear(dim, dim), nn.ReLU(),
                            nn.Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = nn.Sequential(nn.Linear(dim, dim), nn.ReLU(),
                            nn.Linear(dim, dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim)

        self.fc1 = nn.Linear(dim, dim)
        self.fc2 = nn.Linear(dim, nclass)
Esempio n. 21
0
    def __init__(self, name='GCNConv'):
        super(Net, self).__init__()
        self.name = name
        if (name == 'GCNConv'):
            self.conv1 = GCNConv(dataset.num_features, 128)
            self.conv2 = GCNConv(128, 64)
        elif (name == 'ChebConv'):
            self.conv1 = ChebConv(dataset.num_features, 128, K=2)
            self.conv2 = ChebConv(128, 64, K=2)
        elif (name == 'GATConv'):
            self.conv1 = GATConv(dataset.num_features, 128)
            self.conv2 = GATConv(128, 64)
        elif (name == 'GINConv'):
            nn1 = Sequential(Linear(dataset.num_features, 128), ReLU(),
                             Linear(128, 64))
            self.conv1 = GINConv(nn1)
            self.bn1 = torch.nn.BatchNorm1d(64)
            nn2 = Sequential(Linear(64, 64), ReLU(), Linear(64, 64))
            self.conv2 = GINConv(nn2)
            self.bn2 = torch.nn.BatchNorm1d(64)

        self.attr = GCNConv(64,
                            dataset.num_classes,
                            cached=True,
                            normalize=not args.use_gdc)

        self.attack = GCNConv(64,
                              dataset.num_classes,
                              cached=True,
                              normalize=not args.use_gdc)
        self.reverse = GradientReversalLayer()
Esempio n. 22
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers):
        super(Net, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for i in range(num_layers):
            mlp = Sequential(
                Linear(in_channels, 2 * hidden_channels),
                BatchNorm(2 * hidden_channels),
                ReLU(),
                Linear(2 * hidden_channels, hidden_channels),
            )
            conv = GINConv(mlp, train_eps=True)
            conv = conv.jittable(x=torch.randn(data.num_nodes, in_channels),
                                 edge_index=data.edge_index)

            self.convs.append(conv)
            self.batch_norms.append(BatchNorm(hidden_channels))

            in_channels = hidden_channels

        self.lin1 = Linear(hidden_channels, hidden_channels)
        self.batch_norm1 = BatchNorm(hidden_channels)
        self.lin2 = Linear(hidden_channels, out_channels)
Esempio n. 23
0
    def __init__(self, in_channels, out_channels=32):
        super(EncoderGIN, self).__init__()

        nn1 = Sequential(Linear(in_channels, out_channels), ReLU(),
                         Linear(out_channels, out_channels))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(out_channels)

        nn2 = Sequential(Linear(out_channels, out_channels), ReLU(),
                         Linear(out_channels, out_channels))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)

        nn3 = Sequential(Linear(out_channels, out_channels), ReLU(),
                         Linear(out_channels, out_channels))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(out_channels)

        nn4 = Sequential(Linear(out_channels, out_channels), ReLU(),
                         Linear(out_channels, out_channels))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(out_channels)

        nn5 = Sequential(Linear(out_channels, out_channels), ReLU(),
                         Linear(out_channels, out_channels))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(out_channels)
Esempio n. 24
0
 def __init__(self, dataset, num_layers, hidden, train_eps=False, mode='cat'):
     super().__init__()
     self.conv1 = GINConv(nn.Sequential(
         nn.Linear(dataset.num_features, hidden),
         nn.ReLU(),
         nn.Linear(hidden, hidden),
         nn.ReLU(),
         nn.BatchNorm1d(hidden),
     ), train_eps=train_eps)
     self.convs = nn.ModuleList()
     for i in range(num_layers - 1):
         self.convs.append(
             GINConv(nn.Sequential(
                 nn.Linear(hidden, hidden),
                 nn.ReLU(),
                 nn.Linear(hidden, hidden),
                 nn.ReLU(),
                 nn.BatchNorm1d(hidden),
             ), train_eps=train_eps))
     self.jump = JumpingKnowledge(mode)
     if mode == 'cat':
         self.lin1 = nn.Linear(num_layers * hidden, hidden)
     else:
         self.lin1 = nn.Linear(hidden, hidden)
     self.lin2 = nn.Linear(hidden, dataset.num_classes)
Esempio n. 25
0
 def __init__(self, dataset, num_layers, hidden1, hidden2, deltas, elasticity=0.01, num_iterations = 30):
     super(cut_MPNN, self).__init__()
     self.hidden1 = hidden1
     self.hidden2 = hidden2
     self.conv1 = GINConv(Sequential(
         Linear(1,  self.hidden1),
         ReLU(),
         Linear(self.hidden1, self.hidden1),
         ReLU(),
         BN( self.hidden1),
     ),train_eps=False)
     self.num_iterations = num_iterations
     self.convs = torch.nn.ModuleList()
     self.deltas = deltas
     self.numlayers = num_layers
     self.elasticity = elasticity
     
     self.bns = torch.nn.ModuleList()
     for i in range(num_layers-1):
         self.bns.append(BN( self.hidden1))
     self.convs = torch.nn.ModuleList()        
     for i in range(num_layers - 1):
             self.convs.append(GINConv(Sequential(
         Linear( self.hidden1,  self.hidden1),
         ReLU(),
         Linear( self.hidden1,  self.hidden1),
         ReLU(),
         BN(self.hidden1),
     ),train_eps=False))
  
     self.conv2 = GATAConv( self.hidden1, self.hidden2 ,heads=8)
     self.lin1 = Linear(8*self.hidden2, self.hidden1)
     self.bn2 = BN(self.hidden1)
     self.lin2 = Linear(self.hidden1, 1)
Esempio n. 26
0
    def __init__(self):
        super(ginNet, self).__init__()

        # sum mlp : ln -> relu ->ln
        nn1 = Sequential(Linear(num_features, dimHid), ReLU(),
                         Linear(dimHid, dimHid))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dimHid)  # normalize data

        nn2 = Sequential(Linear(dimHid, dimHid), ReLU(),
                         Linear(dimHid, dimHid))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dimHid)

        nn3 = Sequential(Linear(dimHid, dimHid), ReLU(),
                         Linear(dimHid, dimHid))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dimHid)

        nn4 = Sequential(Linear(dimHid, dimHid), ReLU(),
                         Linear(dimHid, dimHid))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dimHid)

        nn5 = Sequential(Linear(dimHid, dimHid), ReLU(),
                         Linear(dimHid, dimHid))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dimHid)

        # read out
        self.fc1 = Linear(dimHid, dimHid)
        self.fc2 = Linear(dimHid, num_classes)
Esempio n. 27
0
    def __init__(self,
                 n_output=1,
                 num_features_xd=78,
                 num_features_xde=512,
                 num_features_xt=25,
                 n_filters=32,
                 embed_dim=128,
                 output_dim=128,
                 dropout=0.2):

        super(GINConvNetEmbed, self).__init__()

        dim = 32
        self.dropout = nn.Dropout(dropout)
        self.relu = nn.ReLU()
        self.n_output = n_output
        # convolution layers
        nn1 = Sequential(Linear(num_features_xd, dim), ReLU(),
                         Linear(dim, dim))
        self.conv1 = GINConv(nn1)
        self.bn1 = torch.nn.BatchNorm1d(dim)

        nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv2 = GINConv(nn2)
        self.bn2 = torch.nn.BatchNorm1d(dim)

        nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv3 = GINConv(nn3)
        self.bn3 = torch.nn.BatchNorm1d(dim)

        nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv4 = GINConv(nn4)
        self.bn4 = torch.nn.BatchNorm1d(dim)

        nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
        self.conv5 = GINConv(nn5)
        self.bn5 = torch.nn.BatchNorm1d(dim)

        self.fc1_xd = Linear(dim, output_dim)

        # 1D convolution on drug molecule embedding
        #self.embedding_xde = nn.Embedding(num_features_xde + 1, embed_dim)
        self.conv_xde_1 = nn.Conv1d(in_channels=num_features_xde,
                                    out_channels=n_filters,
                                    kernel_size=8)
        self.fc1_xde = nn.Linear(32 * 121, output_dim)

        # 1D convolution on protein sequence
        self.embedding_xt = nn.Embedding(num_features_xt + 1, embed_dim)
        self.conv_xt_1 = nn.Conv1d(in_channels=1000,
                                   out_channels=n_filters,
                                   kernel_size=8)
        self.fc1_xt = nn.Linear(32 * 121, output_dim)

        # combined layers
        self.fc1 = nn.Linear(256, 1024)
        self.fc2 = nn.Linear(1024, 256)
        self.out = nn.Linear(256,
                             self.n_output)  # n_output = 1 for regression task
Esempio n. 28
0
 def __init__(self):
     super(Pyg_GIN, self).__init__()
     self.conv1 = GINConv(
         nn.Sequential(nn.Linear(N_feats, N_feats), nn.ReLU()))
     self.conv2 = GINConv(
         nn.Sequential(nn.Linear(N_feats, N_feats), nn.ReLU()))
     self.fc1 = nn.Linear(N_feats, 256)
     self.fc2 = nn.Linear(256, N_labels)
Esempio n. 29
0
    def __init__(self, in_channels):
        super().__init__()

        self.conv1 = GINConv(Seq(Lin(in_channels, 64), ReLU(), Lin(64, 64)))
        self.pool1 = TopKPooling(in_channels, min_score=0.05)
        self.conv2 = GINConv(Seq(Lin(64, 64), ReLU(), Lin(64, 64)))

        self.lin = torch.nn.Linear(64, 1)
 def __init__(self, nhid, dropout):
     super(GIN, self).__init__()
     num_features, dim = nhid, nhid
     self.dropout = dropout
     nn1 = Sequential(Linear(num_features, dim), ReLU(), Linear(dim, dim))
     self.conv1 = GINConv(nn1)
     nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
     self.conv2 = GINConv(nn2)
     self.fc1 = Linear(dim, dim)