コード例 #1
0
    def __init__(self, input_dim, output_dim, model_args):
        super(GATNet_NC, self).__init__()
        self.latent_dim = model_args.latent_dim
        self.mlp_hidden = model_args.mlp_hidden
        self.emb_normlize = model_args.emb_normlize
        self.device = model_args.device
        self.num_gnn_layers = model_args.num_gat_layer
        self.num_mlp_layers = len(self.mlp_hidden) + 1
        self.dense_dim = model_args.gat_hidden * model_args.gat_heads
        self.readout_layers = get_readout_layers(model_args.readout)

        self.gnn_layers = nn.ModuleList()
        self.gnn_layers.append(
            GATConv(input_dim,
                    model_args.gat_hidden,
                    heads=model_args.gat_heads,
                    dropout=model_args.gat_dropout,
                    concat=model_args.gat_concate))
        for i in range(1, self.num_gnn_layers):
            self.gnn_layers.append(
                GATConv(self.dense_dim,
                        model_args.gat_hidden,
                        heads=model_args.gat_heads,
                        dropout=model_args.gat_dropout,
                        concat=model_args.gat_concate))
        self.gnn_non_linear = nn.ReLU()
        self.Softmax = nn.Softmax(dim=-1)
コード例 #2
0
        def __init__(self, in_channels, out_channels):
            super().__init__()

            self.convs = torch.nn.ModuleList()
            self.convs.append(GATConv(in_channels, 16, heads=2))
            self.convs.append(GATConv(32, 16, heads=2))
            self.convs.append(GATConv(32, out_channels, heads=2, concat=False))
コード例 #3
0
    def __init__(self,
                 in_channels: int,
                 hidden_channels: int,
                 num_layers: int,
                 out_channels: Optional[int] = None,
                 dropout: float = 0.0,
                 act: Optional[Callable] = ReLU(inplace=True),
                 norm: Optional[torch.nn.Module] = None,
                 jk: str = 'last',
                 **kwargs):
        super().__init__(in_channels, hidden_channels, num_layers,
                         out_channels, dropout, act, norm, jk)

        if 'concat' in kwargs:
            del kwargs['concat']

        if 'heads' in kwargs:
            assert hidden_channels % kwargs['heads'] == 0

        out_channels = hidden_channels // kwargs.get('heads', 1)

        self.convs.append(
            GATConv(in_channels, out_channels, dropout=dropout, **kwargs))
        for _ in range(1, num_layers):
            self.convs.append(GATConv(hidden_channels, out_channels, **kwargs))
コード例 #4
0
ファイル: gat.py プロジェクト: znsoftm/cogdl
 def __init__(self, in_feats, hidden_size, out_feats, num_heads, dropout):
     super(GAT, self).__init__()
     self.in_feats = in_feats
     self.out_feats = out_feats
     self.hidden_size = hidden_size
     self.num_heads = num_heads
     self.dropout = dropout
     self.conv1 = GATConv(in_feats, hidden_size, heads=num_heads, dropout=dropout)
     self.conv2 = GATConv(hidden_size * num_heads, out_feats, dropout=dropout)
コード例 #5
0
    def __init__(self, num_features, hidden_size, embedding_size, num_heads, dropout, num_clusters):
        super(DAEGC, self).__init__()

        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.embedding_size = embedding_size
        self.dropout = dropout
        self.num_clusters = num_clusters
        self.conv1 = GATConv(num_features, hidden_size, heads=num_heads, dropout=dropout)
        self.conv2 = GATConv(hidden_size * num_heads, embedding_size, dropout=dropout)
        self.cluster_center = nn.Parameter(torch.zeros(num_clusters, embedding_size))
コード例 #6
0
ファイル: pyg_drgat.py プロジェクト: ckhui/cogdl
 def __init__(self, num_features, num_classes, hidden_size, num_heads, dropout):
     super(DrGAT, self).__init__()
     self.num_features = num_features
     self.num_classes = num_classes
     self.hidden_size = hidden_size
     self.num_heads = num_heads
     self.dropout = dropout
     self.conv1 = GATConv(
         num_features, hidden_size, heads=num_heads, dropout=dropout
     )
     self.conv2 = GATConv(hidden_size * num_heads, num_classes, dropout=dropout)
     self.se1 = SELayer(num_features, se_channels=int(np.sqrt(num_features)))
     self.se2 = SELayer(
         hidden_size * num_heads, se_channels=int(np.sqrt(hidden_size * num_heads))
     )
コード例 #7
0
 def __init__(self, num_features, num_classes, hidden_size, num_heads,
              dropout):
     super(GAT, self).__init__()
     self.num_features = num_features
     self.num_classes = num_classes
     self.hidden_size = hidden_size
     self.num_heads = num_heads
     self.dropout = dropout
     self.conv1 = GATConv(num_features,
                          hidden_size,
                          heads=num_heads,
                          dropout=dropout)
     self.conv2 = GATConv(hidden_size * num_heads,
                          num_classes,
                          dropout=dropout)
コード例 #8
0
 def __init__(self,
              input_dimension: int,
              per_head_output_dimensions: _typing.Sequence[int],
              num_hidden_heads: int,
              num_output_heads: int,
              _dropout: float,
              _act: _typing.Optional[str],
              concat_last: bool = True):
     super(_GAT, self).__init__()
     self._dropout: float = _dropout
     self._act: _typing.Optional[str] = _act
     total_output_dimensions: _typing.Sequence[int] = (
         GATUtils.to_total_hidden_dimensions(per_head_output_dimensions,
                                             num_hidden_heads,
                                             num_output_heads,
                                             concat_last=concat_last))
     num_layers = len(per_head_output_dimensions)
     self.__convolution_layers: torch.nn.ModuleList = torch.nn.ModuleList()
     for layer in range(len(per_head_output_dimensions)):
         self.__convolution_layers.append(
             GATConv(input_dimension
                     if layer == 0 else total_output_dimensions[layer - 1],
                     per_head_output_dimensions[layer],
                     num_hidden_heads
                     if layer < num_layers - 1 else num_output_heads,
                     dropout=_dropout,
                     concat=True
                     if layer < num_layers - 1 or concat_last else False))
コード例 #9
0
 def __init__(self,c_in,c_out,dropout,support_len=3,order=2):
     super(gcn,self).__init__()
     c_in = (order*support_len+1)*c_in
     self.GATConv = GATConv(c_in,c_out,heads = 1, concat = True,dropout = self.dropout)
     self.mlp = linear(c_in,c_out)
     self.dropout = dropout
     self.order = order
コード例 #10
0
    def __init__(self,
                 num_features,
                 hidden_size,
                 num_classes=2,
                 num_heads=8,
                 dropout=0):
        super(GAT, self).__init__()

        self.conv1 = GATConv(num_features,
                             hidden_size,
                             heads=num_heads,
                             dropout=dropout)
        self.conv2 = GATConv(hidden_size * num_heads,
                             num_classes,
                             heads=1,
                             dropout=dropout)

        self.dropout = dropout
        self.activation = F.relu
コード例 #11
0
    def init_conv(self, in_channels: int, out_channels: int,
                  **kwargs) -> MessagePassing:

        kwargs = copy.copy(kwargs)
        if 'heads' in kwargs and out_channels % kwargs['heads'] != 0:
            kwargs['heads'] = 1
        if 'concat' not in kwargs or kwargs['concat']:
            out_channels = out_channels // kwargs.get('heads', 1)

        return GATConv(in_channels, out_channels, dropout=self.dropout,
                       **kwargs)
コード例 #12
0
    def init_conv(self, in_channels: int, out_channels: int,
                  **kwargs) -> MessagePassing:

        heads = kwargs.pop('heads', 1)
        concat = kwargs.pop('concat', True)

        # Do not use concatenation in case the layer `GATConv` layer maps to
        # the desired output channels (out_channels != None and jk != None):
        if getattr(self, '_is_conv_to_out', False):
            concat = False

        if concat and out_channels % heads != 0:
            raise ValueError(f"Ensure that the number of output channels of "
                             f"'GATConv' (got '{out_channels}') is divisible "
                             f"by the number of heads (got '{heads}')")

        if concat:
            out_channels = out_channels // heads

        return GATConv(in_channels, out_channels, heads=heads, concat=concat,
                       dropout=self.dropout, **kwargs)
コード例 #13
0
    def __init__(self,
                 in_channels_primal,
                 in_channels_dual,
                 out_channels_primal,
                 out_channels_dual,
                 single_dual_nodes,
                 undirected_dual_edges,
                 heads=1,
                 concat_primal=True,
                 concat_dual=True,
                 negative_slope_primal=0.2,
                 negative_slope_dual=0.2,
                 dropout_primal=0,
                 dropout_dual=0,
                 bias_primal=True,
                 bias_dual=True,
                 add_self_loops_to_dual_graph=False):
        if (single_dual_nodes):
            assert (undirected_dual_edges), (
                "The dual-graph configuration with single dual nodes and "
                "directed dual edges is not valid. Please specify a different "
                "configuration.")
        super(DualPrimalConv, self).__init__()
        # Save input parameters as an attribute.
        self.__input_parameters = {
            k: v
            for k, v in locals().items() if (k[0] != '_' and k != 'self')
        }

        # The operation performed on the dual graph is a standard GAT
        # convolution.
        if (add_self_loops_to_dual_graph):
            # NOTE: Self-loops are added in the dual graph, and are used both
            # when computing attention coefficients and when performing feature
            # aggregation.
            self._dual_layer = GATConv(in_channels=in_channels_dual,
                                       out_channels=out_channels_dual,
                                       heads=heads,
                                       concat=concat_dual,
                                       negative_slope=negative_slope_dual,
                                       dropout=dropout_dual,
                                       bias=bias_dual)
        else:
            # NOTE: No self-loops are added in the dual graph.
            self._dual_layer = GATConvNoSelfLoops(
                in_channels=in_channels_dual,
                out_channels=out_channels_dual,
                heads=heads,
                concat=concat_dual,
                negative_slope=negative_slope_dual,
                dropout=dropout_dual,
                bias=bias_dual)

        # The operation performed on the primal graph is a modified version of
        # GAT convolution that uses dual features to compute attention
        # coefficients. NOTE: PrimalConv has a modified forward() method that
        # does not insert self-loops (as they would not find a correspondence in
        # the dual graph).
        self._primal_layer = PrimalConv(
            in_channels=in_channels_primal,
            out_channels=out_channels_primal,
            out_channels_dual=out_channels_dual,
            heads=heads,
            concat=concat_primal,
            concat_dual=concat_dual,
            negative_slope=negative_slope_primal,
            dropout=dropout_primal,
            bias=bias_primal,
            single_dual_nodes=single_dual_nodes,
            undirected_dual_edges=undirected_dual_edges)