Exemplo n.º 1
0
    def __init__(
            self, in_dim, hidden_dim, out_dim, num_heads, dropout,
            residual=False, activation=None):
        """GAT模型

        :param in_dim: int 输入特征维数
        :param hidden_dim: int 隐含特征维数
        :param out_dim: int 输出特征维数
        :param num_heads: List[int] 每一层的注意力头数,长度等于层数
        :param dropout: float Dropout概率
        :param residual: bool, optional 是否使用残差连接,默认为False
        :param activation: callable, optional 输出层激活函数
        :raise ValueError: 如果层数(即num_heads的长度)小于2
        """
        super().__init__()
        num_layers = len(num_heads)
        if num_layers < 2:
            raise ValueError('层数至少为2,实际为{}'.format(num_layers))
        self.layers = nn.ModuleList()
        self.layers.append(GATConv(
            in_dim, hidden_dim, num_heads[0], dropout, dropout, residual=residual, activation=F.elu
        ))
        for i in range(1, num_layers - 1):
            self.layers.append(GATConv(
                num_heads[i - 1] * hidden_dim, hidden_dim, num_heads[i], dropout, dropout,
                residual=residual, activation=F.elu
            ))
        self.layers.append(GATConv(
            num_heads[-2] * hidden_dim, out_dim, num_heads[-1], dropout, dropout,
            residual=residual, activation=activation
        ))
Exemplo n.º 2
0
    def __init__(self,
                 g, #DGL的图对象
                 n_layers, #层数
                 in_feats, #输入特征维度
                 n_hidden, #隐层特征维度
                 n_classes, #类别数
                 heads, #多头注意力的数量
                 activation, #激活函数
                 in_drop, #输入特征的Dropout比例
                 at_drop, #注意力特征的Dropout比例
                 negative_slope, #注意力计算中Leaky ReLU的a值
                 ):
        super( GAT, self ).__init__( )
        self.g = g
        self.num_layers = n_layers
        self.activation = activation

        self.gat_layers = nn.ModuleList()

        self.gat_layers.append( GATConv(
            in_feats, n_hidden, heads[0],
            in_drop, at_drop, negative_slope, activation=self.activation ) )

        for l in range(1, n_layers):
            self.gat_layers.append( GATConv(
                n_hidden * heads[l-1], n_hidden, heads[l],
                in_drop, at_drop, negative_slope, activation=self.activation))

        self.gat_layers.append( GATConv(
            n_hidden * heads[-2], n_classes, heads[-1],
            in_drop, at_drop, negative_slope, activation=None) )
Exemplo n.º 3
0
    def __init__(self, input_dim, hidden_dim, output_dim, num_layers, gnn_type):
        super(GNN, self).__init__()

        self.hidden_dim = hidden_dim
        self.num_layers = num_layers
        self.gnn_type = gnn_type

        self.convs = nn.ModuleList()

        if gnn_type == "gcn":
            self.convs.append(GraphConv(input_dim, hidden_dim))
        elif gnn_type == "sage":
            self.convs.append(SAGEConv(input_dim, hidden_dim, "gcn"))
        elif gnn_type == "gat":
            self.convs.append(GATConv(input_dim, hidden_dim, num_heads=3))
        else:
            raise ValueError("Invalid gnn_type")

        for i in range(num_layers - 2):
            if gnn_type == "gcn":
                self.convs.append(GraphConv(hidden_dim, hidden_dim))
            elif gnn_type == "sage":
                self.convs.append(SAGEConv(hidden_dim, hidden_dim, "gcn"))
            elif gnn_type == "gat":
                self.convs.append(GATConv(hidden_dim, hidden_dim, num_heads=3))

        if gnn_type == "gcn":
            self.convs.append(GraphConv(hidden_dim, output_dim))
        elif gnn_type == "sage":
            self.convs.append(SAGEConv(hidden_dim, output_dim, "gcn"))
        elif gnn_type == "gat":
            self.convs.append(GATConv(hidden_dim, output_dim, num_heads=3))
Exemplo n.º 4
0
 def __init__(self,
              num_layers,
              in_dim,
              num_hidden,
              num_classes,
              heads,
              activation,
              feat_drop=0,
              attn_drop=0,
              negative_slope=0.2,
              residual=False):
     super(GAT, self).__init__()
     self.num_layers = num_layers
     self.gat_layers = nn.ModuleList()
     self.activation = activation
     # input projection (no residual)
     self.gat_layers.append(
         GATConv(in_dim, num_hidden, heads[0], feat_drop, attn_drop,
                 negative_slope, False, self.activation))
     # hidden layers
     for l in range(1, num_layers):
         # due to multi-head, the in_dim = num_hidden * num_heads
         self.gat_layers.append(
             GATConv(num_hidden * heads[l - 1], num_hidden, heads[l],
                     feat_drop, attn_drop, negative_slope, residual,
                     self.activation))
     # output projection
     self.gat_layers.append(
         GATConv(num_hidden * heads[-2], num_classes, heads[-1], feat_drop,
                 attn_drop, negative_slope, residual, None))
    def __init__(self, model_config):
        super(GAT, self).__init__()

        # adj = np.load("/root/workdir/input/fnc_adj.npy")
        self.G = dgl.DGLGraph(np.where(np.ones([53, 53]) == 1))
        self.linear1 = nn.Linear(400, 256)

        self.gat_conv1 = GATConv(256,
                                 32,
                                 num_heads=8,
                                 feat_drop=model_config.dropout_rate,
                                 residual=True)
        self.bn1 = nn.BatchNorm1d(64)

        self.gat_conv2 = GATConv(256,
                                 32,
                                 num_heads=8,
                                 feat_drop=model_config.dropout_rate,
                                 residual=True)
        self.bn2 = nn.BatchNorm1d(64)

        self.gat_conv3 = GATConv(256,
                                 32,
                                 num_heads=8,
                                 feat_drop=model_config.dropout_rate,
                                 residual=True)
        self.bn3 = nn.BatchNorm1d(64)

        self.loading_mlp = nn.Sequential(
            nn.Linear(26, 128),
            nn.LeakyReLU(),
            nn.Dropout(model_config.dropout_rate),
        )

        self.last_linear = nn.Linear(256 * 3 + 128, model_config.num_classes)
Exemplo n.º 6
0
 def __init__(self,
              num_layers,
              in_dim,
              num_hidden,
              heads,
              feat_drop=0,
              attn_drop=0,
              negative_slope=0.2,
              activation=None,
              residual=False):
     super(GAT, self).__init__()
     self.num_layers = num_layers
     self.gat_layers = nn.ModuleList()
     self.activation = activation
     # input projection (no residual)
     self.gat_layers.append(
         GATConv(in_dim,
                 num_hidden,
                 heads[0],
                 feat_drop,
                 attn_drop,
                 negative_slope,
                 False,
                 None,
                 allow_zero_in_degree=True))
Exemplo n.º 7
0
 def __init__(self, in_dim, hid_dim, out_dim, num_heads=1, residual=False):
     super(GeniePathConv, self).__init__()
     self.breadth_func = GATConv(in_dim,
                                 hid_dim,
                                 num_heads=num_heads,
                                 residual=residual)
     self.depth_func = LSTM(hid_dim, out_dim)
Exemplo n.º 8
0
Arquivo: gat.py Projeto: isratnisa/dgl
    def __init__(self,
                 data_info: dict,
                 embed_size: int = -1,
                 num_layers: int = 2,
                 hidden_size: int = 8,
                 heads: List[int] = [8, 8],
                 activation: str = "elu",
                 feat_drop: float = 0.6,
                 attn_drop: float = 0.6,
                 negative_slope: float = 0.2,
                 residual: bool = False):
        """Graph Attention Networks

        Parameters
        ----------
        data_info : dict
            The information about the input dataset.
        embed_size : int
            The dimension of created embedding table. -1 means using original node embedding
        hidden_size : int
            Hidden size.
        num_layers : int
            Number of layers.
        norm : str
            GCN normalization type. Can be 'both', 'right', 'left', 'none'.
        activation : str
            Activation function.
        feat_drop : float
            Dropout rate for features.
        attn_drop : float
            Dropout rate for attentions.
        negative_slope: float
            Negative slope for leaky relu in GATConv
        residual : bool
            If true, the GATConv will use residule connection
        """
        super(GAT, self).__init__()
        self.data_info = data_info
        self.embed_size = embed_size
        self.num_layers = num_layers
        self.gat_layers = nn.ModuleList()
        self.activation = getattr(torch.nn.functional, activation)

        if embed_size > 0:
            self.embed = nn.Embedding(data_info["num_nodes"], embed_size)
            in_size = embed_size
        else:
            in_size = data_info["in_size"]

        for i in range(num_layers):
            in_hidden = hidden_size * heads[i - 1] if i > 0 else in_size
            out_hidden = hidden_size if i < num_layers - \
                1 else data_info["out_size"]
            use_residual = i == num_layers
            activation = None if i == num_layers else self.activation

            self.gat_layers.append(
                GATConv(in_hidden, out_hidden, heads[i], feat_drop, attn_drop,
                        negative_slope, use_residual, activation))
Exemplo n.º 9
0
    def __init__(self,
                 in_dim,
                 hidden_dim,
                 layer_num,
                 sample_size,
                 device,
                 GNN_name="GIN"):
        super(GNNStructEncoder, self).__init__()
        self.n_distribution = 7  # How many gaussian distribution should exist
        self.out_dim = hidden_dim
        if GNN_name == "GIN":
            self.linear1 = MLP(layer_num, hidden_dim, hidden_dim, hidden_dim)
            self.graphconv1 = GINConv(apply_func=self.linear1,
                                      aggregator_type='sum')
            self.linear2 = MLP(layer_num, hidden_dim, hidden_dim, hidden_dim)
            self.graphconv2 = GINConv(apply_func=self.linear2,
                                      aggregator_type='sum')
        elif GNN_name == "GCN":
            self.graphconv1 = GraphConv(hidden_dim, hidden_dim)
            self.graphconv2 = GraphConv(hidden_dim, hidden_dim)
        else:
            self.graphconv = GATConv(hidden_dim, hidden_dim, num_heads=10)
        # self.neighbor_num_list = neighbor_num_list
        self.linear_classifier = MLP(1, hidden_dim, hidden_dim,
                                     self.n_distribution)
        self.neighbor_generator = MLP_generator(hidden_dim, hidden_dim,
                                                sample_size).to(device)
        # Gaussian Means, and std
        self.gaussian_mean = nn.Parameter(
            torch.FloatTensor(sample_size, self.n_distribution,
                              hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.gaussian_log_sigma = nn.Parameter(
            torch.FloatTensor(sample_size, self.n_distribution,
                              hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.m = torch.distributions.Normal(
            torch.zeros(sample_size, self.n_distribution, hidden_dim),
            torch.ones(sample_size, self.n_distribution, hidden_dim))

        # Before MLP Gaussian Means, and std
        self.mlp_gaussian_mean = nn.Parameter(
            torch.FloatTensor(hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.mlp_gaussian_log_sigma = nn.Parameter(
            torch.FloatTensor(hidden_dim).uniform_(-0.5 / hidden_dim, 0.5 /
                                                   hidden_dim)).to(device)
        self.mlp_m = torch.distributions.Normal(torch.zeros(hidden_dim),
                                                torch.ones(hidden_dim))

        # Decoders
        self.degree_decoder = FNN(hidden_dim, hidden_dim, 1, 4)
        # self.degree_loss_func = FocalLoss(int(max_degree_num) + 1)
        self.degree_loss_func = nn.MSELoss()
        self.pool = mp.Pool(1)
        self.in_dim = in_dim
        self.sample_size = sample_size
Exemplo n.º 10
0
    def __init__(self, d_model, d_inner, dropout=0.1):
        super(GATEncoder, self).__init__()
        # self.proj1 = nn.Linear(d_model, d_model)
        # self.proj2 = nn.Linear(d_inner, d_model)
        # self.layer_norm = LayerNormalization(d_model)

        self.proj1 = GATConv(d_model, d_model, num_heads=3)

        init.xavier_normal_(self.proj1.weight)
Exemplo n.º 11
0
    def __init__(self, G, hid_dims, num_layers, num_heads=3, multihot=True):
        super().__init__(G, hid_dims, num_layers, multihot)

        self.num_heads = num_heads
        self.input_layer = GATConv(self.in_dims,
                                   hid_dims,
                                   num_heads=num_heads,
                                   residual=True)
        self.hidden_layers = [
            GATConv(hid_dims * num_heads,
                    hid_dims,
                    num_heads=num_heads,
                    residual=True) for _ in range(num_layers)
        ]
        self.output_layer = GATConv(hid_dims * num_heads,
                                    self.out_dims,
                                    num_heads=1,
                                    residual=True)
Exemplo n.º 12
0
 def __init__(self, in_dim, out_dim, hid_dim=16, num_layers=2, num_heads=1, residual=False):
     super(GeniePathLazy, self).__init__()
     self.hid_dim = hid_dim
     self.linear1 = nn.Linear(in_dim, hid_dim)
     self.linear2 = th.nn.Linear(hid_dim, out_dim)
     self.breaths = nn.ModuleList()
     self.depths = nn.ModuleList()
     for i in range(num_layers):
         self.breaths.append(GATConv(hid_dim, hid_dim, num_heads=num_heads, residual=residual))
         self.depths.append(LSTM(hid_dim*2, hid_dim))
Exemplo n.º 13
0
    def __init__(self, num_metapaths, in_dim, out_dim, num_heads, dropout):
        """HAN层

        :param num_metapaths: int 元路径个数
        :param in_dim: int 输入特征维数
        :param out_dim: int 输出特征维数
        :param num_heads: int 注意力头数K
        :param dropout: float Dropout概率
        """
        super().__init__()
        # 顶点层次的注意力,每个GAT层对应一个元路径
        self.gats = nn.ModuleList([
            GATConv(in_dim, out_dim, num_heads, dropout, dropout, activation=F.elu)
            for _ in range(num_metapaths)
        ])
        # 语义层次的注意力
        self.semantic_attention = SemanticAttention(in_dim=num_heads * out_dim)
Exemplo n.º 14
0
 def __init__(self, in_dim, hidden_dim, device):
     super(GNNAnomalyDetctor, self).__init__()
     self.in_dim = in_dim
     self.gatconv = GATConv(in_dim, hidden_dim, num_heads=3)
     self.nwr_gae = GNNStructEncoder(in_dim, hidden_dim, layer_num=2, sample_size=5, device=device)
     self.linear = torch.nn.Linear(10, 10)
Exemplo n.º 15
0
 def __init__(self):
     super(Net, self).__init__()
     self.layer1 = NNConv(300, 150,edge_f, 'mean')
     self.layer2 = GATConv()
Exemplo n.º 16
0
 def __init__(self, nfeat, nhid, nclass=2, dropout=False):
     super(GAT, self).__init__()
     self.conv1 = GATConv(nfeat, nhid, num_heads=4)
     self.conv2 = GATConv(4 * nhid, nclass, num_heads=1)
Exemplo n.º 17
0
 def __init__(self, in_dim, out_dim, num_heads=4, is_final_layer=False):
     super(GAT_Layer, self).__init__()
     self.gat = GATConv(in_dim, out_dim, num_heads=num_heads)
     self.batchnorm = nn.BatchNorm1d(out_dim * num_heads)
     self.activation = nn.LeakyReLU()
     self.is_final_layer = is_final_layer
Exemplo n.º 18
0
 def __init__(self, in_feats, h_feats):
     super(GAT, self).__init__()
     self.conv1 = GATConv(in_feats, h_feats // 8, 8)
     self.conv2 = GATConv(h_feats, h_feats// 8, 8)