Exemple #1
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 activation,
                 dim,
                 kernel,
                 aggr_type,
                 dropout,
                 graph_norm,
                 batch_norm,
                 residual=False,
                 bias=True):
        super().__init__()

        self.in_dim = in_dim
        self.out_dim = out_dim
        self.dim = dim
        self.kernel = kernel
        self.graph_norm = graph_norm
        self.batch_norm = batch_norm
        self.residual = residual
        self.dropout = dropout
        self.activation = activation

        self.layer = GMMConv(in_dim, out_dim, dim, kernel, aggr_type)

        self.bn_node_h = nn.BatchNorm1d(out_dim)
        if in_dim != out_dim:
            self.residual = False

        if bias:
            self.bias = nn.Parameter(torch.Tensor(out_dim))
        else:
            self.register_buffer('bias', None)
        self.reset_parameters()
Exemple #2
0
    def __init__(self, n_kernels, in_feats, hiddens, out_feats):
        super(MoNet, self).__init__()
        self.pool = nn.MaxPool1d(2)
        self.layers = nn.ModuleList()
        self.readout = MaxPooling()

        # Input layer
        self.layers.append(GMMConv(in_feats, hiddens[0], 2, n_kernels))

        # Hidden layer
        for i in range(1, len(hiddens)):
            self.layers.append(
                GMMConv(hiddens[i - 1], hiddens[i], 2, n_kernels))

        self.cls = nn.Sequential(nn.Linear(hiddens[-1], out_feats),
                                 nn.LogSoftmax())
Exemple #3
0
    def __init__(self, g, in_feats, n_hidden, out_feats, n_layers, dim,
                 n_kernels, dropout):
        super(MoNet, self).__init__()
        self.g = g
        self.layers = nn.ModuleList()
        self.pseudo_proj = nn.ModuleList()

        # Input layer
        self.layers.append(GMMConv(in_feats, n_hidden, dim, n_kernels))
        self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))

        # Hidden layer
        for _ in range(n_layers - 1):
            self.layers.append(GMMConv(n_hidden, n_hidden, dim, n_kernels))
            self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim),
                                                  nn.Tanh()))

        # Output layer
        self.layers.append(GMMConv(n_hidden, out_feats, dim, n_kernels))
        self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))
        self.dropout = nn.Dropout(dropout)