Пример #1
0
    def __init__(self, input_dim, output_dim, model_args):
        super(GINNet_NC, self).__init__()
        self.latent_dim = model_args.latent_dim
        self.mlp_hidden = model_args.mlp_hidden
        self.emb_normlize = model_args.emb_normlize
        self.device = model_args.device
        self.num_gnn_layers = len(self.latent_dim)
        self.num_mlp_layers = len(self.mlp_hidden) + 1
        self.dense_dim = self.latent_dim[-1]
        self.readout_layers = get_readout_layers(model_args.readout)

        self.gnn_layers = nn.ModuleList()
        self.gnn_layers.append(GINConv(nn.Sequential(
                nn.Linear(input_dim, self.latent_dim[0]),
                nn.ReLU()),
            train_eps=True)
        )

        for i in range(1, self.num_gnn_layers):
            self.gnn_layers.append(GINConv(nn.Sequential(
                nn.Linear(self.latent_dim[i-1], self.latent_dim[i]),
                nn.ReLU()),
                train_eps=True)
            )

        self.gnn_non_linear = nn.ReLU()
        self.Softmax = nn.Softmax(dim=-1)
Пример #2
0
    def __init__(
            self, input_dimension: int,
            dimensions: _typing.Sequence[int],
            _act: str, _dropout: float,
            mlp_layers: int, _eps: str
    ):
        super(_GIN, self).__init__()

        self._act: str = _act

        def _get_act() -> torch.nn.Module:
            if _act == 'leaky_relu':
                return torch.nn.LeakyReLU()
            elif _act == 'relu':
                return torch.nn.ReLU()
            elif _act == 'elu':
                return torch.nn.ELU()
            elif _act == 'tanh':
                return torch.nn.Tanh()
            elif _act == 'PReLU'.lower():
                return torch.nn.PReLU()
            else:
                return torch.nn.ReLU()

        convolution_layers: torch.nn.ModuleList = torch.nn.ModuleList()
        batch_normalizations: torch.nn.ModuleList = torch.nn.ModuleList()

        __mlp_layers = [torch.nn.Linear(input_dimension, dimensions[0])]
        for _ in range(mlp_layers - 1):
            __mlp_layers.append(_get_act())
            __mlp_layers.append(torch.nn.Linear(dimensions[0], dimensions[0]))
        convolution_layers.append(
            GINConv(torch.nn.Sequential(*__mlp_layers), train_eps=_eps == "True")
        )
        batch_normalizations.append(torch.nn.BatchNorm1d(dimensions[0]))

        num_layers: int = len(dimensions)
        for layer in range(num_layers - 1):
            __mlp_layers = [torch.nn.Linear(dimensions[layer], dimensions[layer + 1])]
            for _ in range(mlp_layers - 1):
                __mlp_layers.append(_get_act())
                __mlp_layers.append(
                    torch.nn.Linear(dimensions[layer + 1], dimensions[layer + 1])
                )
            convolution_layers.append(
                GINConv(torch.nn.Sequential(*__mlp_layers), train_eps=_eps == "True")
            )
            batch_normalizations.append(
                torch.nn.BatchNorm1d(dimensions[layer + 1])
            )

        self.__convolution_layers: torch.nn.ModuleList = convolution_layers
        self.__batch_normalizations: torch.nn.ModuleList = batch_normalizations
    def __init__(self, in_channels: int, hidden_channels: int, num_layers: int,
                 out_channels: Optional[int] = None, dropout: float = 0.0,
                 act: Optional[Callable] = ReLU(inplace=True),
                 norm: Optional[torch.nn.Module] = None, jk: str = 'last',
                 **kwargs):
        super().__init__(in_channels, hidden_channels, num_layers,
                         out_channels, dropout, act, norm, jk)

        self.convs.append(
            GINConv(GIN.MLP(in_channels, hidden_channels), **kwargs))
        for _ in range(1, num_layers):
            self.convs.append(
                GINConv(GIN.MLP(hidden_channels, hidden_channels), **kwargs))
Пример #4
0
    def __init__(self, input_dim, output_dim, model_args):
        super(GINNet, self).__init__()
        self.latent_dim = model_args.latent_dim
        self.mlp_hidden = model_args.mlp_hidden
        self.emb_normlize = model_args.emb_normlize
        self.device = model_args.device
        self.num_gnn_layers = len(self.latent_dim)
        self.num_mlp_layers = len(self.mlp_hidden) + 1
        self.dense_dim = self.latent_dim[-1]
        self.readout_layers = get_readout_layers(model_args.readout)

        self.gnn_layers = nn.ModuleList()
        self.gnn_layers.append(
            GINConv(nn.Sequential(
                nn.Linear(input_dim, self.latent_dim[0], bias=False),
                nn.BatchNorm1d(self.latent_dim[0]), nn.ReLU(),
                nn.Linear(self.latent_dim[0], self.latent_dim[0], bias=False),
                nn.BatchNorm1d(self.latent_dim[0])),
                    train_eps=True))

        for i in range(1, self.num_gnn_layers):
            self.gnn_layers.append(
                GINConv(nn.Sequential(
                    nn.Linear(self.latent_dim[i - 1],
                              self.latent_dim[i],
                              bias=False), nn.BatchNorm1d(self.latent_dim[i]),
                    nn.ReLU(),
                    nn.Linear(self.latent_dim[i],
                              self.latent_dim[i],
                              bias=False), nn.BatchNorm1d(self.latent_dim[i])),
                        train_eps=True))

        self.gnn_non_linear = nn.ReLU()

        self.mlps = nn.ModuleList()
        if self.num_mlp_layers > 1:
            self.mlps.append(
                nn.Linear(self.dense_dim * len(self.readout_layers),
                          model_args.mlp_hidden[0]))
            for i in range(1, self.num_mlp_layers - 1):
                self.mlps.append(
                    nn.Linear(self.mlp_hidden[i - 1], self.mlp_hidden[1]))
            self.mlps.append(nn.Linear(self.mlp_hidden[-1], output_dim))
        else:
            self.mlps.append(
                nn.Linear(self.dense_dim * len(self.readout_layers),
                          output_dim))
        self.dropout = nn.Dropout(model_args.dropout)
        self.Softmax = nn.Softmax(dim=-1)
        self.mlp_non_linear = nn.ELU()
Пример #5
0
    def __init__(self, input_dim: int, hidden_dim: int, output_dim: int,
                 num_layers: int, num_mlp_layers: int, task: str, **kwargs):
        super(GIN, self).__init__()

        self.num_layers = num_layers
        self.task = task

        self.bigger_input = input_dim > hidden_dim

        if not self.bigger_input:
            self.padding = nn.ConstantPad1d((0, hidden_dim - input_dim),
                                            value=0)

        self.convs = torch.nn.ModuleList()
        self.batch_norms = torch.nn.ModuleList()

        for layer in range(self.num_layers):
            if layer == 0 and self.bigger_input:
                _nn = nn.Sequential(nn.Linear(input_dim,
                                              hidden_dim), nn.ReLU(),
                                    nn.Linear(hidden_dim, hidden_dim))
            else:
                _nn = nn.Sequential(nn.Linear(hidden_dim,
                                              hidden_dim), nn.ReLU(),
                                    nn.Linear(hidden_dim, hidden_dim))
            self.convs.append(GINConv(nn=_nn))

            self.batch_norms.append(nn.BatchNorm1d(hidden_dim))

        self.linear_prediction = nn.Linear(hidden_dim, output_dim)
Пример #6
0
 def init_conv(self, in_channels: int, out_channels: int,
               **kwargs) -> MessagePassing:
     mlp = MLP(
         [in_channels, out_channels, out_channels],
         act=self.act,
         act_first=self.act_first,
         norm=self.norm,
         norm_kwargs=self.norm_kwargs,
     )
     return GINConv(mlp, **kwargs)
Пример #7
0
    def __init__(
        self,
        in_dim_or_pre_mlp: Union[int, nn.Sequential],
        num_layers: int,
        vertex_embed_dim: int,
        mlp_num_hidden: int,
        mlp_hidden_dim: int,
        act,
        jk=True,
    ):
        super(GINNet, self).__init__()
        self.act = act()

        if isinstance(in_dim_or_pre_mlp, nn.Sequential):
            self.pre_mlp = in_dim_or_pre_mlp
            first_layer_dim = self.pre_mlp[-1].out_features
        elif isinstance(in_dim_or_pre_mlp, int):
            self.pre_mlp = None
            first_layer_dim = in_dim_or_pre_mlp
        else:
            raise TypeError(
                f"Expected int or nn.Sequential as in_dim_or_pre_mlp but found {type(in_dim_or_pre_mlp)}"
            )

        gin_layers_list = []
        batch_norms_list = [
        ]  # List of batchnorms applied to the output of MLP
        for i in range(num_layers):
            gin_layers_list.append(
                GINConv(
                    build_mlp(
                        shapes=(
                            vertex_embed_dim if i > 0 else first_layer_dim,
                            mlp_hidden_dim,
                            vertex_embed_dim,
                        ),
                        act=act,
                        n_hidden=mlp_num_hidden,
                        batch_norm=True,
                    ),
                    train_eps=True,
                ))
            batch_norms_list.append(nn.BatchNorm1d(vertex_embed_dim))

        self.gin_layers = nn.ModuleList(gin_layers_list)
        self.batch_norms = nn.ModuleList(batch_norms_list)

        self.jk = jk
        if self.jk:
            self.out_dim = first_layer_dim + vertex_embed_dim * num_layers
            if self.pre_mlp is not None:
                self.out_dim += self.pre_mlp[0].in_features
        else:
            self.out_dim = vertex_embed_dim
Пример #8
0
 def init_conv(self, in_channels: int, out_channels: int,
               **kwargs) -> MessagePassing:
     mlp = MLP([in_channels, out_channels, out_channels], batch_norm=True)
     return GINConv(mlp, **kwargs)