Пример #1
0
    def __init__(self,
                 in_dim,
                 hid_dim,
                 out_dim,
                 num_layers=1,
                 mode='cat',
                 dropout=0.):
        super(JKNet, self).__init__()

        self.mode = mode
        self.dropout = nn.Dropout(dropout)
        self.layers = nn.ModuleList()
        self.layers.append(GraphConv(in_dim, hid_dim, activation=F.relu))
        for _ in range(num_layers):
            self.layers.append(GraphConv(hid_dim, hid_dim, activation=F.relu))

        if self.mode == 'cat':
            hid_dim = hid_dim * (num_layers + 1)
        elif self.mode == 'lstm':
            self.lstm = nn.LSTM(hid_dim, (num_layers * hid_dim) // 2,
                                bidirectional=True,
                                batch_first=True)
            self.attn = nn.Linear(2 * ((num_layers * hid_dim) // 2), 1)

        self.output = nn.Linear(hid_dim, out_dim)
        self.reset_params()
Пример #2
0
    def __init__(self,
                 in_dim,
                 hidden_dim_1,
                 hidden_dim_2,
                 fc_hidden_1,
                 fc_hidden_2,
                 num_classes,
                 use_cuda=False):
        """
        Constructor for the GraphAttConvBinaryClassifier class
        Parameters:
            in_dim (int): Dimension of features for each node
            hidden_dim (int): Dimension of hidden embeddings
            num_classes (int): Number of output classes
            use_cuda (bool): Indicates whether GPU should be utilized or not
        """
        super(GraphConvBinaryClassifier, self).__init__()

        # Model layers
        self.conv1 = GraphConv(in_dim, hidden_dim_1)
        self.conv2 = GraphConv(hidden_dim_1, hidden_dim_2)
        self.conv3 = GraphConv(hidden_dim_2, fc_hidden_1)

        self.fc_1 = nn.Linear(fc_hidden_1, fc_hidden_2)
        self.fc_2 = nn.Linear(fc_hidden_2, num_classes)
        self.out = nn.Sigmoid()

        self.use_cuda = use_cuda
Пример #3
0
    def __init__(self, args):
        super(GCN, self).__init__()
        self.args = args
        self.num_layer = int(self.args["num_layers"])

        missing_keys = list(
            set([
                "features_num",
                "num_class",
                "num_layers",
                "hidden",
                "dropout",
                "act",
            ]) - set(self.args.keys()))
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ",".join(missing_keys))

        if not self.num_layer == len(self.args["hidden"]) + 1:
            LOGGER.warn(
                "Warning: layer size does not match the length of hidden units"
            )
        self.convs = torch.nn.ModuleList()

        self.convs.append(
            GraphConv(self.args["features_num"], self.args["hidden"][0]))

        for i in range(self.num_layer - 2):
            self.convs.append(
                GraphConv(self.args["hidden"][i], self.args["hidden"][i + 1]))
        self.convs.append(
            GraphConv(self.args["hidden"][-1], self.args["num_class"]))
Пример #4
0
    def __init__(self,
                 num_layers,
                 input_dim,
                 hidden_dim,
                 output_dim,
                 final_dropout,
                 graph_pooling_type,
                 norm_type='gn'):
        super(GCN, self).__init__()
        self.num_layers = num_layers

        self.gcnlayers = torch.nn.ModuleList()
        self.norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                self.gcnlayers.append(GraphConv(input_dim, hidden_dim))
            else:
                self.gcnlayers.append(GraphConv(hidden_dim, hidden_dim))

            self.norms.append(Norm(norm_type, hidden_dim))

        self.linears_prediction = nn.Linear(hidden_dim, output_dim)
        self.drop = nn.Dropout(final_dropout)

        if graph_pooling_type == 'sum':
            self.pool = SumPooling()
        elif graph_pooling_type == 'mean':
            self.pool = AvgPooling()
        elif graph_pooling_type == 'max':
            self.pool = MaxPooling()
        else:
            raise NotImplementedError
Пример #5
0
    def __init__(self, in_feats, n_hidden, n_classes, n_layers, activation):
        super(GCN, self).__init__()
        self.layers = nn.ModuleList()

        # input layer
        self.layers.append(
            GraphConv(in_feats, out_feats=n_hidden, activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(
                GraphConv(n_hidden, out_feats=n_hidden, activation=activation))
        # output layer
        # self.linear1 = nn.Linear(n_hidden, n_hidden // 2)
        # self.linear2 = nn.Linear(n_hidden // 2, n_classes)

        self.linear2 = nn.Linear(n_hidden * 3, n_classes)
        self.softmax = nn.Softmax(dim=1)
Пример #6
0
    def __init__(self, layer_sizes, batch_norm_mm=0.99):
        super(GCN, self).__init__()

        self.layers = nn.ModuleList()
        for in_dim, out_dim in zip(layer_sizes[:-1], layer_sizes[1:]):
            self.layers.append(GraphConv(in_dim, out_dim))
            self.layers.append(BatchNorm1d(out_dim, momentum=batch_norm_mm))
            self.layers.append(nn.PReLU())
Пример #7
0
    def __init__(self, input_dim, output_dim, **kwargs):
        super().__init__(input_dim, output_dim)
        hidden_dim = kwargs.get('hidden_dim', 32)
        num_layers = kwargs.get('num_layers', 2)

        self.num_layers = num_layers
        self.linear_in = nn.Linear(input_dim, hidden_dim)
        self.conv = GraphConv(2*hidden_dim, hidden_dim, activation=F.relu)
        self.g_embed = nn.Linear(hidden_dim, output_dim)
Пример #8
0
    def __init__(self, input_dim, output_dim, **kwargs):
        super().__init__(input_dim, output_dim)

        hidden_dims = kwargs.get('hidden_dims', [32])
        self.num_layers = len(hidden_dims)

        hidden_plus_input_dims = [hd + input_dim for hd in hidden_dims]
        self.convs = nn.ModuleList([GraphConv(in_dim, out_dim, activation=F.relu) for (in_dim, out_dim)
                      in zip([input_dim] + hidden_plus_input_dims[:-1], hidden_dims)])

        self.g_embed = nn.Linear(hidden_dims[-1], output_dim)
Пример #9
0
 def __init__(self, input_dimension: int, dimensions: _typing.Sequence[int],
              act: _typing.Optional[str], dropout: _typing.Optional[float]):
     super(_GCN, self).__init__()
     self.__convolution_layers: torch.nn.ModuleList = torch.nn.ModuleList()
     for layer, _dimension in enumerate(dimensions):
         self.__convolution_layers.append(
             GraphConv(
                 input_dimension if layer == 0 else dimensions[layer - 1],
                 _dimension))
     self._act: _typing.Optional[str] = act
     self._dropout: _typing.Optional[float] = dropout
Пример #10
0
    def __init__(self, input_dim, num_inducing, hidden_sizes=[32, 32], out_dim=None, mean=None, covar=None):

        if out_dim is None:
            batch_shape = torch.Size([])
        else:
            batch_shape = torch.Size([out_dim])
        
        if out_dim is None:
            inducing_points = torch.rand(num_inducing, hidden_sizes[-1])
        else:
            inducing_points = torch.rand(out_dim, num_inducing, hidden_sizes[-1])

        variational_distribution = CholeskyVariationalDistribution(
            inducing_points.size(-2), 
            batch_shape=batch_shape
        )

        # Use LMCVariationalStrategy for introducing correlation among tasks
        if out_dim is None:
            variational_strategy = VariationalStrategy(
                    self, inducing_points, variational_distribution, learn_inducing_locations=True
                )
        else:
            variational_strategy = IndependentMultitaskVariationalStrategy(
                VariationalStrategy(
                    self, inducing_points, variational_distribution, learn_inducing_locations=True
                ),
                num_tasks=out_dim,
            )

        super(DeepGraphKernel, self).__init__(variational_strategy)

        gcn_layers = nn.ModuleList()
        layer_input_out_dims = list(zip(
            [input_dim] + hidden_sizes[:-1],
            hidden_sizes
        ))

        for i, (in_features, out_features) in enumerate(layer_input_out_dims):
            gcn_layers.append(
                GraphConv(in_features, out_features, activation=nn.ReLU())
            )

        self.mean_module = gpytorch.means.LinearMean(hidden_sizes[-1], batch_shape=torch.Size([out_dim])) if mean is None else mean
        self.covar_module = gpytorch.kernels.PolynomialKernel(power=4, batch_shape=batch_shape) if covar is None else covar
        # self.covar_module.offset = 5
        self.num_inducing = inducing_points.size(-2)
        self.gcn = gcn_layers
        self.dropout = torch.nn.Dropout(0.5)
Пример #11
0
    def __init__(self, num_layers, d, out_d, residual, reinit, **kwargs):

        super().__init__(emb_size=d)

        self.d = d
        self.num_layers = num_layers
        self.residual = residual

        self.layers = nn.ModuleList(
            [GraphConv(d, d) for _ in range(num_layers)])

        self.ln = nn.Linear(d, out_d)

        if reinit:
            self.reinit_params()
Пример #12
0
 def __init__(self, input_dimension: int, dimensions: _typing.Sequence[int]):
     super(_TopK, self).__init__()
     self.__gcn_layers: torch.nn.ModuleList = torch.nn.ModuleList()
     self.__batch_normalizations: torch.nn.ModuleList = torch.nn.ModuleList()
     self.__num_layers = len(dimensions)
     for layer in range(self.__num_layers):
         self.__gcn_layers.append(
             GraphConv(
                 input_dimension if layer == 0 else dimensions[layer - 1],
                 dimensions[layer]
             )
         )
         self.__batch_normalizations.append(
             torch.nn.BatchNorm1d(dimensions[layer])
         )
 def __init__(self, **kwargs):
     super().__init__(**kwargs)
     self.layers = nn.ModuleList()
     for _ in range(self.n_layers):
         self.layers.append(
             nn.ModuleDict({
                 'gc':
                 GraphConv(in_feats=self.hidden_dim,
                           out_feats=self.hidden_dim,
                           norm=True,
                           bias=True,
                           activation=self.get_act()),
                 'norm':
                 self.get_norm(self.hidden_dim),
                 'do':
                 nn.Dropout(self.p_dropout)
             }))
Пример #14
0
    def __init__(self,
                 in_features,
                 out_features,
                 *,
                 hids=[16] * 5,
                 acts=['relu'] * 5,
                 mode='cat',
                 dropout=0.5,
                 bias=True):
        super().__init__()
        self.mode = mode
        num_JK_layers = len(list(hids)) - 1  # number of JK layers

        assert num_JK_layers >= 1 and len(
            set(hids)
        ) == 1, 'the number of hidden layers should be greated than 2 and the hidden units must be equal'

        conv = []
        self.dropout = nn.Dropout(dropout)
        for hid, act in zip(hids, acts):
            conv.append(
                GraphConv(in_features,
                          hid,
                          bias=bias,
                          activation=activations.get(act)))
            in_features = hid

        assert len(conv) == num_JK_layers + 1

        self.conv = nn.ModuleList(conv)

        if self.mode == 'cat':
            hid = hid * (num_JK_layers + 1)
        elif self.mode == 'lstm':
            self.lstm = nn.LSTM(hid, (num_JK_layers * hid) // 2,
                                bidirectional=True,
                                batch_first=True)
            self.attn = nn.Linear(2 * ((num_JK_layers * hid) // 2), 1)

        self.output = nn.Linear(hid, out_features)
    def __init__(self,
                 input_dim,
                 output_dim,
                 hidden_sizes=[32, 32],
                 *args,
                 **kwargs):
        super(DeepGraphNeuralNetwork, self).__init__(*args, **kwargs)

        self.layers = nn.ModuleList()

        layer_input_out_dims = list(
            zip([input_dim] + hidden_sizes, hidden_sizes + [output_dim]))

        for i, (in_features, out_features) in enumerate(layer_input_out_dims):
            if i != len(layer_input_out_dims) - 1:
                activation = nn.ReLU()
            else:
                # The last layer will have softmax activation
                activation = nn.Softmax()

            self.layers.append(
                GraphConv(in_features, out_features, activation=activation))
        self.dropout = nn.Dropout(p=0.1)
Пример #16
0
 def __init__(self, in_feats, h_feats):
     super(GCN, self).__init__()
     self.conv1 = GraphConv(in_feats, h_feats)
     self.conv2 = GraphConv(h_feats, h_feats)
Пример #17
0
 def __init__(self, in_dim, hidden_dim, out_dim):
     super(GCN_Node_Classifier, self).__init__()
     self.conv1 = GraphConv(in_dim, hidden_dim)
     self.conv2 = GraphConv(hidden_dim, out_dim)
Пример #18
0
    def __init__(self, args):
        """model parameters setting

        Paramters
        ---------
        num_layers: int
            The number of linear layers in the neural network
        num_mlp_layers: int
            The number of linear layers in mlps
        input_dim: int
            The dimensionality of input features
        hidden_dim: int
            The dimensionality of hidden units at ALL layers
        output_dim: int
            The number of classes for prediction
        final_dropout: float
            dropout ratio on the final linear layer

        """
        super(Topkpool, self).__init__()
        self.args = args

        missing_keys = list(
            set(
                [
                    "features_num",
                    "num_class",
                    "num_graph_features",
                    "num_layers",
                    "hidden",
                    "dropout",
                ]
            )
            - set(self.args.keys())
        )
        if len(missing_keys) > 0:
            raise Exception("Missing keys: %s." % ",".join(missing_keys))
        #if not self.num_layer == len(self.args["hidden"]) + 1:
        #    LOGGER.warn("Warning: layer size does not match the length of hidden units")


        self.num_graph_features = self.args["num_graph_features"]
        self.num_layers = self.args["num_layers"]
        assert self.num_layers > 2, "Number of layers in GIN should not less than 3"
        assert self.num_layers == len(self.args["hidden"]) + 1, "Warning: layer size does not match the length of hidden units"

        input_dim = self.args["features_num"]
        hidden = self.args["hidden"]
        final_dropout = self.args["dropout"]
        output_dim = self.args["num_class"]

        # List of MLPs
        self.gcnlayers = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers - 1):
            if layer == 0:
                self.gcnlayers.append(GraphConv(input_dim, hidden[layer]))
            else:
                self.gcnlayers.append(GraphConv(hidden[layer-1], hidden[layer]))

            #self.gcnlayers.append(GraphConv(input_dim, hidden_dim))
            self.batch_norms.append(nn.BatchNorm1d(hidden[layer]))

        # Linear function for graph poolings of output of each layer
        # which maps the output of different layers into a prediction score
        self.linears_prediction = torch.nn.ModuleList()

        #TopKPool
        k = 3
        self.pool = SortPooling(k)

        for layer in range(self.num_layers):
            if layer == 0:
                self.linears_prediction.append(
                    nn.Linear(input_dim * k, output_dim))
            else:
                self.linears_prediction.append(
                    nn.Linear(hidden[layer-1] * k, output_dim))

        self.drop = nn.Dropout(final_dropout)