Esempio n. 1
0
 def __init__(self, in_channels, hidden_channels, out_channels, drop_out=0.1):
     super(SAGENet, self).__init__()
     self.conv1 = SAGEConv(in_channels, hidden_channels)
     self.conv2 = SAGEConv(hidden_channels, hidden_channels)
     self.conv3 = SAGEConv(hidden_channels, hidden_channels)
     self.lin = torch.nn.Linear(3 * hidden_channels, out_channels)
     self.drop_out = drop_out
        def __init__(self, in_channels, out_channels):
            super().__init__()

            self.convs = torch.nn.ModuleList()
            self.convs.append(SAGEConv(in_channels, 16))
            self.convs.append(SAGEConv(16, 16))
            self.convs.append(SAGEConv(16, out_channels))
Esempio n. 3
0
 def __init__(self, input_dim):
     super(SimpleNetWSage, self).__init__()
     self.conv_succ1 = SAGEConv(input_dim, 64, flow="target_to_source")
     # self.conv_succ2 = GCNConv(128, 128, flow="target_to_source")
     self.conv_succ3 = SAGEConv(64, 64, flow="target_to_source")
     self.conv_succ2 = SAGEConv(64, 64, flow="target_to_source")
     self.conv_succ4 = SAGEConv(64, 64, flow="target_to_source")
     self.conv_probs = SAGEConv(64, 1, flow="target_to_source")
     self.do_nothing = Linear(64, 1)
     self.value = Linear(64, 1)
Esempio n. 4
0
    def __init__(self, in_channels, hidden_channels, out_channels, num_layers,
                 dropout):
        super(SAGE, self).__init__()

        self.convs = torch.nn.ModuleList()
        self.convs.append(SAGEConv(in_channels, hidden_channels))
        for _ in range(num_layers - 2):
            self.convs.append(SAGEConv(hidden_channels, hidden_channels))
        self.convs.append(SAGEConv(hidden_channels, out_channels))

        self.dropout = dropout
    def __init__(self, in_channels: int, hidden_channels: int, num_layers: int,
                 out_channels: Optional[int] = None, dropout: float = 0.0,
                 act: Optional[Callable] = ReLU(inplace=True),
                 norm: Optional[torch.nn.Module] = None, jk: str = 'last',
                 **kwargs):
        super().__init__(in_channels, hidden_channels, num_layers,
                         out_channels, dropout, act, norm, jk)

        self.convs.append(SAGEConv(in_channels, hidden_channels, **kwargs))
        for _ in range(1, num_layers):
            self.convs.append(
                SAGEConv(hidden_channels, hidden_channels, **kwargs))
Esempio n. 6
0
 def __init__(
     self,
     input_channels: int,
     output_channels: int,
     aggr: str,
     activation_name: _typing.Optional[str] = ...,
     dropout_probability: _typing.Optional[float] = ...,
 ):
     super().__init__()
     self._convolution: SAGEConv = SAGEConv(input_channels,
                                            output_channels,
                                            aggr=aggr)
     if (activation_name is not Ellipsis and activation_name is not None
             and type(activation_name) == str):
         self._activation_name: _typing.Optional[str] = activation_name
     else:
         self._activation_name: _typing.Optional[str] = None
     if (dropout_probability is not Ellipsis
             and dropout_probability is not None
             and type(dropout_probability) == float):
         if dropout_probability < 0:
             dropout_probability = 0
         if dropout_probability > 1:
             dropout_probability = 1
         self._dropout: _typing.Optional[
             torch.nn.Dropout] = torch.nn.Dropout(dropout_probability)
     else:
         self._dropout: _typing.Optional[torch.nn.Dropout] = None
Esempio n. 7
0
 def __init__(self, input_dimension: int, dimensions: _typing.Sequence[int],
              _act: _typing.Optional[str],
              _dropout: _typing.Optional[float],
              aggr: _typing.Optional[str]):
     super(_SAGE, self).__init__()
     self._act: _typing.Optional[str] = _act
     self._dropout: _typing.Optional[float] = _dropout
     self.__convolution_layers: torch.nn.ModuleList = torch.nn.ModuleList()
     for layer, output_dimension in enumerate(dimensions):
         self.__convolution_layers.append(
             SAGEConv(input_dimension if layer == 0 else dimensions[layer -
                                                                    1],
                      output_dimension,
                      aggr=aggr))
Esempio n. 8
0
 def __init__(self, in_channels, out_channels):
     super(Net, self).__init__()
     self.conv1 = SAGEConv(in_channels, 16)
     self.conv2 = SAGEConv(16, 16)
     self.conv3 = SAGEConv(16, out_channels)
Esempio n. 9
0
 def init_conv(self, in_channels: Union[int, Tuple[int, int]],
               out_channels: int, **kwargs) -> MessagePassing:
     return SAGEConv(in_channels, out_channels, **kwargs)
Esempio n. 10
0
File: models.py Progetto: lmb633/gnn
 def __init__(self, in_channels, out_channels):
     super(SAGENet, self).__init__()
     self.conv1 = SAGEConv(in_channels, 16, normalize=False)
     self.conv2 = SAGEConv(16, out_channels, normalize=False)
Esempio n. 11
0
 def init_conv(self, in_channels: int, out_channels: int,
               **kwargs) -> MessagePassing:
     return SAGEConv(in_channels, out_channels, **kwargs)
Esempio n. 12
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = SAGEConv(dataset.num_features, 16)
     self.conv2 = SAGEConv(16, 16)
     self.conv3 = SAGEConv(16, dataset.num_classes)