Exemplo n.º 1
0
 def __init__(self,
              in_features,
              hidden_features,
              out_features,
              in_head,
              out_head,
              activation,
              feat_drop=0.6,
              attn_drop=0.6,
              negative_slope=0.2,
              residual=False,
              is_out_layer=True):
     super().__init__()
     # Fixme: Deal zero degree
     heads = [in_head, out_head]
     self.conv1 = GATConv(in_features,
                          hidden_features,
                          heads[0],
                          residual=residual,
                          allow_zero_in_degree=True)
     self.conv2 = GATConv(hidden_features * heads[0],
                          out_features,
                          heads[-1],
                          residual=residual,
                          allow_zero_in_degree=True)
     self.is_out_layer = is_out_layer
     self.activation = F.elu_ if activation == 'Elu' else F.relu
     self.input_dropout = nn.Dropout(p=feat_drop)
     self.dropout = nn.Dropout(p=attn_drop)
Exemplo n.º 2
0
    def __init__(self,
                 in_dim,
                 out_dim,
                 num_heads,
                 dropout,
                 batch_norm,
                 residual=False,
                 activation=F.elu):
        super(GATLayerV7, self).__init__(in_dim, out_dim, num_heads, dropout,
                                         batch_norm, residual, activation)
        self.use_second_loop = (in_dim == out_dim * num_heads)
        factor = 2 if self.use_second_loop else 1
        self.out_dim = out_dim
        self.in_dim = in_dim
        self.num_heads = num_heads

        self.gatconv = GATConv(in_dim, out_dim, num_heads * factor, dropout,
                               dropout)

        if self.batch_norm:
            self.batchnorm_h = nn.BatchNorm1d(out_dim * num_heads * factor)

        if self.use_second_loop:
            self.gatconv2 = GATConv(in_dim, out_dim, num_heads, dropout,
                                    dropout)
            if self.batch_norm:
                self.batchnorm_h2 = nn.BatchNorm1d(out_dim * num_heads)
Exemplo n.º 3
0
    def __init__(self, in_dim, hidden_dim):
        super(Net, self).__init__()
        #self.conv1 = GraphConv(in_dim, hidden_dim)
        #self.conv2 = GraphConv(hidden_dim, hidden_dim)
        #self.conv3 = GraphConv(hidden_dim, hidden_dim)

        self.conv1 = GATConv(in_dim,
                             hidden_dim,
                             4,
                             residual=True,
                             activation=F.relu)
        self.conv2 = GATConv(4 * hidden_dim,
                             hidden_dim,
                             4,
                             residual=True,
                             activation=F.relu)
        self.conv3 = GATConv(4 * hidden_dim, hidden_dim, 4, residual=True)

        self.w_group_mlp = nn.Sequential(nn.Linear(hidden_dim,
                                                   hidden_dim), nn.ReLU(True),
                                         nn.Linear(hidden_dim, 1),
                                         nn.Sigmoid())
        self.entity_linear = nn.Linear(hidden_dim, hidden_dim)
        self.entity_link_mlp = nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
                                             nn.ReLU(True),
                                             nn.Linear(hidden_dim, 1),
                                             nn.Sigmoid())
        self.training = True
        self.thresh = 0.5
        self.entity_classify = nn.Sequential(nn.Linear(hidden_dim, 4),
                                             nn.Sigmoid())
    def __init__(self,
                 in_dim,
                 out_dim,
                 num_heads,
                 dropout,
                 batch_norm,
                 residual=False,
                 activation=F.elu):
        super().__init__()
        self.residual = residual
        self.activation = activation
        self.batch_norm = batch_norm

        if in_dim != (out_dim * num_heads):
            self.residual = False

        if dgl.__version__ < "0.5":
            self.gatconv = GATConv(in_dim, out_dim, num_heads, dropout,
                                   dropout)
        else:
            self.gatconv = GATConv(in_dim,
                                   out_dim,
                                   num_heads,
                                   dropout,
                                   dropout,
                                   allow_zero_in_degree=True)

        if self.batch_norm:
            self.batchnorm_h = nn.BatchNorm1d(out_dim * num_heads)
Exemplo n.º 5
0
    def __init__(self, in_feats, n_hidden, n_classes, n_layers, n_heads,
                 activation, feat_drop, attn_drop, negative_slope, residual):
        super().__init__()

        self.n_layers = n_layers
        self.activation = activation
        self.n_hidden = n_hidden
        self.n_heads = n_heads
        self.n_classes = n_classes
        self.convs = nn.ModuleList()

        # input layer
        self.convs.append(
            GATConv((in_feats, in_feats), n_hidden, n_heads, feat_drop,
                    attn_drop, negative_slope, residual, self.activation))
        # hidden layer
        for _ in range(1, n_layers - 1):
            # due to multi-head, the in_dim = num_hidden * num_heads
            self.convs.append(
                GATConv((n_hidden * n_heads, n_hidden * n_heads), n_hidden,
                        n_heads, feat_drop, attn_drop, negative_slope,
                        residual, self.activation))
        # output layer
        self.convs.append(
            GATConv((n_hidden * n_heads, n_hidden * n_heads), n_classes,
                    n_heads, feat_drop, attn_drop, negative_slope, residual,
                    None))
Exemplo n.º 6
0
 def __init__(self,
              g,
              n_layer,
              in_dim,
              num_hidden,
              num_classes,
              heads,
              activation,
              feat_drop=0.6,
              attn_drop=0.6,
              negative_slope=0.2,
              residual=False):
     super(GAT, self).__init__()
     self.n_layer = n_layer
     self.gat_layers = nn.ModuleList()
     self.bns = torch.nn.ModuleList()
     self.activation = activation
     # input projection (no residual)
     self.gat_layers.append(
         GATConv(in_dim, num_hidden, heads[0], feat_drop, attn_drop,
                 negative_slope, False))
     self.bns.append(torch.nn.BatchNorm1d(num_hidden * heads[0]))
     # hidden layers
     for l in range(1, n_layer):
         # due to multi-head, the in_dim = num_hidden * n_head
         self.gat_layers.append(
             GATConv(num_hidden * heads[l - 1], num_hidden, heads[l],
                     feat_drop, attn_drop, negative_slope, residual))
         self.bns(torch.nn.BatchNorm1d(num_hidden * heads[l]))
     # output projection
     self.gat_layers.append(
         GATConv(num_hidden * heads[-2], num_classes, heads[-1], feat_drop,
                 attn_drop, negative_slope, residual, None))
Exemplo n.º 7
0
 def __init__(self,
              num_layers,
              in_dim,
              num_hidden,
              num_classes,
              num_heads = 1,
              feat_drop = 0.1,
              attn_drop = 0.1,
              negative_slope = None,
              residual = True,
              activation = None):
     super(GAT, self).__init__()
     self.num_layers = num_layers
     self.gat_layers = nn.ModuleList()
     self.activation = activation
     # input projection (no residual)
     self.gat_layers.append(GATConv(in_dim, num_hidden, num_heads,
                                          feat_drop = feat_drop,
                                          attn_drop =attn_drop, 
                                          residual= residual,
                                          activation=activation))
     # hidden layers
     for l in range(1, num_layers):
         # due to multi-head, the in_dim = num_hidden * num_heads
         self.gat_layers.append(GATConv( num_hidden * num_heads, num_hidden, num_heads,
                                          feat_drop = feat_drop,
                                          attn_drop =attn_drop, 
                                          residual= residual,
                                          activation=activation))
     # output projection
     self.gat_layers.append(GATConv( num_hidden * num_heads, num_hidden, num_heads,
                                          feat_drop = feat_drop,
                                          attn_drop =attn_drop, 
                                          residual= residual,
                                          activation=activation))
Exemplo n.º 8
0
    def __init__(self, g, num_layers, in_dim, num_hidden, heads, activation,
                 feat_drop, attn_drop, negative_slope, residual, num_atom_type,
                 num_bond_type):
        super(GATZinc, self).__init__()
        self.g = g
        self.num_layers = num_layers
        self.gat_layers = nn.ModuleList()
        self.BNs = nn.ModuleList()
        self.norm_layers = nn.ModuleList()
        self.activation = activation
        self.num_atom_type = num_atom_type
        self.num_bond_type = num_bond_type

        # atom_type embedding
        self.embed = nn.Embedding(num_atom_type, in_dim)

        # input projection (no residual)
        self.gat_layers.append(
            GATConv(in_dim, num_hidden, heads[0], feat_drop, attn_drop,
                    negative_slope, False, None))
        self.BNs.append(nn.BatchNorm1d(num_hidden * heads[0]))

        # hidden layers
        for l in range(1, num_layers):
            # due to multi-head, the in_dim = num_hidden * num_heads
            self.gat_layers.append(
                GATConv(num_hidden * heads[l - 1], num_hidden, heads[l],
                        feat_drop, attn_drop, negative_slope, residual,
                        self.activation))
            self.BNs.append(nn.BatchNorm1d(num_hidden * heads[l]))

        hidden_dim = num_hidden * heads[-2]
        self.regressor1 = nn.Linear(hidden_dim, hidden_dim // 2)
        self.regressor2 = nn.Linear(hidden_dim // 2, 1)
 def __init__(self,
              num_layers,
              in_dim,
              num_hidden,
              heads,
              activation,
              feat_drop,
              attn_drop,
              negative_slope,
              residual):
     super(GAT, self).__init__()
     self.num_layers = num_layers
     self.gat_layers = nn.ModuleList()
     self.activation = activation
     # input projection (no residual)
     self.gat_layers.append(GATConv(
         in_dim, num_hidden, heads[0],
         feat_drop, attn_drop, negative_slope, False, self.activation))
     # hidden layers
     for l in range(1, num_layers):
         # due to multi-head, the in_dim = num_hidden * num_heads
         self.gat_layers.append(GATConv(
             num_hidden * heads[l-1], num_hidden, heads[l],
             feat_drop, attn_drop, negative_slope, residual, self.activation))
     # output projection
     self.gat_layers.append(GATConv(
         num_hidden * heads[-2], num_hidden, heads[-1],
         feat_drop, attn_drop, negative_slope, residual, None))
Exemplo n.º 10
0
    def __init__(self, g, num_layers, in_dim, num_hidden, num_classes, heads,
                 activation, feat_drop, attn_drop, negative_slope, residual):
        super(GAT, self).__init__()
        self.g = g
        self.num_layers = num_layers
        self.gat_layers = nn.ModuleList()
        self.activation = activation
        # node embeddings
        # embed_dict = {ntype: nn.Parameter(torch.Tensor(g.number_of_nodes(ntype), in_dim))
        #               for ntype in g.ntypes}
        self.embed = nn.Parameter(torch.Tensor(g.number_of_nodes(), in_dim))
        # input projection (no residual)
        self.gat_layers.append(
            GATConv(in_dim, num_hidden, heads[0], feat_drop, attn_drop,
                    negative_slope, False, self.activation))
        # hidden layers
        for l in range(1, num_layers):
            # due to multi-head, the in_dim = num_hidden * num_heads
            self.gat_layers.append(
                GATConv(num_hidden * heads[l - 1], num_hidden, heads[l],
                        feat_drop, attn_drop, negative_slope, residual,
                        self.activation))
        # output projection
        self.gat_layers.append(
            GATConv(num_hidden * heads[-2], num_classes, heads[-1], feat_drop,
                    attn_drop, negative_slope, residual, None))

        self.emb_size = num_classes
Exemplo n.º 11
0
class MultiGATBaseConvs(nn.Module):

    def __init__(self, input_feat_channel=512, n_head=16):
        super(MultiGATBaseConvs, self).__init__()
        self.n_head = n_head
        self.l1 = GATConv(in_feats=input_feat_channel, out_feats=256, num_heads=int(n_head / 2), residual=True)
        self.l2 = GATConv(in_feats=int(n_head / 2) * 256, out_feats=256, num_heads=n_head, residual=True)
        self.l3 = GATConv(in_feats=n_head * 256, out_feats=256, num_heads=n_head, residual=True)
        self.l4 = GATConv(in_feats=n_head * 256, out_feats=256, num_heads=n_head, residual=True)
        self.l5 = GATConv(in_feats=n_head * 256, out_feats=512, num_heads=1, residual=True)

    #         self.l = GATConv(in_feats=[n_head, 512], out_feats=[1, 512], num_heads=1)

    def forward(self, graph, feat):
        N = feat.shape[0]
        #         print(feat.shape)
        x = self.l1.forward(graph, feat)
        x1 = F.relu(x)
        #         print(x.shape)
        x = self.l2.forward(graph, x1.view(N, -1))
        x = F.relu(x)

        x = self.l3.forward(graph, x.view(N, -1))
        x = F.relu(x)

        x = self.l4.forward(graph, x.view(N, -1))
        x = F.relu(x)

        x = self.l5.forward(graph, x.view(N, -1))
        x = F.relu(x)

        return x.view(N, -1)
Exemplo n.º 12
0
 def __init__(self,
              in_features,
              hidden_features,
              out_features,
              in_head,
              out_head,
              activation,
              feat_drop=0.6,
              attn_drop=0.6,
              negative_slope=0.2,
              residual=False):
     super().__init__()
     # Fixme: Deal zero degree
     heads = [in_head, in_head, out_head]
     self.conv1 = GATConv(in_features,
                          hidden_features,
                          heads[0],
                          residual=residual,
                          allow_zero_in_degree=True)
     self.conv2 = GATConv(hidden_features * heads[0],
                          hidden_features,
                          heads[1],
                          residual=residual,
                          allow_zero_in_degree=True)
     self.conv3 = GATConv(hidden_features * heads[1],
                          out_features,
                          heads[-1],
                          residual=residual,
                          allow_zero_in_degree=True)
     self.bn1 = torch.nn.BatchNorm1d(hidden_features * in_head)
     self.bn2 = torch.nn.BatchNorm1d(hidden_features * in_head)
     self.input_drop = nn.Dropout(feat_drop)
     self.dropout = nn.Dropout(attn_drop)
     self.activation = F.elu_ if activation == 'Elu' else F.relu
Exemplo n.º 13
0
 def __init__(self, input_feat_channel=512, n_head=16):
     super(MultiGATBaseConvs, self).__init__()
     self.n_head = n_head
     self.l1 = GATConv(in_feats=input_feat_channel, out_feats=256, num_heads=int(n_head / 2), residual=True)
     self.l2 = GATConv(in_feats=int(n_head / 2) * 256, out_feats=256, num_heads=n_head, residual=True)
     self.l3 = GATConv(in_feats=n_head * 256, out_feats=256, num_heads=n_head, residual=True)
     self.l4 = GATConv(in_feats=n_head * 256, out_feats=256, num_heads=n_head, residual=True)
     self.l5 = GATConv(in_feats=n_head * 256, out_feats=512, num_heads=1, residual=True)
 def __init__(self, f_in):
     super(GAT, self).__init__()
     self.f_in = f_in
     self.GATlayers = nn.ModuleList()
     self.GATlayers.extend(
         [GATConv(f_in, 20, 4),
          GATConv(80, 25, 2),
          GATConv(50, 10, 1)])
Exemplo n.º 15
0
 def __init__(self, num_features, num_classes):
     super(GAT, self).__init__()
     self.conv1 = GATConv(num_features,
                          8,
                          8,
                          feat_drop=.6,
                          attn_drop=.6,
                          activation=F.relu)
     self.conv2 = GATConv(8 * 8, num_classes, 1, feat_drop=.6, attn_drop=.6)
 def __init__(self):
     super(Decoder, self).__init__()
     self.GATlayers = nn.ModuleList()
     self.tanh = nn.Tanh()
     self.GATlayers.extend([
         GATConv(10, 10, 4),
         GATConv(40, 20, 4),
         GATConv(80, 93, 1),
     ])
Exemplo n.º 17
0
    def __init__(self, g, n_layers, input_size, hidden_size, output_size, nonlinearity, **kwargs):
        super().__init__()

        self.g = g
        self.layers = nn.ModuleList()
        self.layers.append(GATConv(input_size, hidden_size, activation=None, num_heads=kwargs["num_heads"]))
        for i in range(n_layers - 1):
            self.layers.append(GATConv(hidden_size, hidden_size, activation=None, num_heads=kwargs["num_heads"]))
        self.layers.append(GATConv(hidden_size*kwargs["num_heads"], output_size, num_heads=1))
 def __init__(self):
     super(Classifier, self).__init__()
     self.GATlayers = nn.ModuleList()
     self.sigmoid = nn.Sigmoid()
     self.GATlayers.extend([
         GATConv(93, 25, 4),
         GATConv(100, 25, 2),
         GATConv(50, 25, 1),
     ])
     self.fc = nn.Linear(25, 1)
 def __init__(self):
     super(VisualDiscriminator, self).__init__()
     self.GATlayers = nn.ModuleList()
     self.sigmoid = nn.Sigmoid()
     self.GATlayers.extend([
         GATConv(93, 25, 4, activation=nn.ReLU()),
         GATConv(100, 25, 2, activation=nn.ReLU()),
         GATConv(50, 25, 1, activation=nn.ReLU()),
     ])
     self.fc = nn.Sequential(nn.Linear(25, 1), nn.Sigmoid())
 def __init__(self):
     super(Decoder, self).__init__()
     self.GATlayers = nn.ModuleList()
     self.tanh = nn.Tanh()
     self.GATlayers.extend([
         GATConv(25, 10, 4, activation=nn.ReLU()),
         GATConv(40, 25, 4, activation=nn.ReLU()),
         GATConv(100, 50, 1, activation=nn.ReLU()),
     ])
     self.fc = nn.Linear(50, 93)
 def __init__(self, f_in):
     super(GAT, self).__init__()
     self.f_in = f_in
     self.GATlayers = nn.ModuleList()
     self.tanh = nn.Tanh()
     self.GATlayers.extend([
         GATConv(f_in, 25, 4, activation=nn.ReLU()),
         GATConv(100, 25, 2, activation=nn.ReLU()),
         GATConv(50, 25, 1),
     ])
Exemplo n.º 22
0
 def __init__(self, in_feats, hidden_size, num_classes):
     super(GCN, self).__init__()
     self.conv1 = GATConv(in_feats,
                          hidden_size,
                          num_heads=2,
                          feat_drop=0.2,
                          attn_drop=0.2)
     self.conv2 = GATConv(hidden_size,
                          num_classes,
                          num_heads=1,
                          feat_drop=0.2,
                          attn_drop=0.2)
Exemplo n.º 23
0
 def __init__(self,
              num_layers,
              in_dim,
              num_hidden,
              num_classes,
              heads,
              activation=None,
              feat_drop=0.,
              attn_drop=0.,
              negative_slope=0.2,
              residual=False):
     super(GAT, self).__init__()
     self.num_layers = num_layers
     self.gat_layers = nn.ModuleList()
     self.activation = activation
     # input projection (no residual)
     #self.gat_layers.append(GATConv(in_dim, num_hidden, heads[0],feat_drop, attn_drop, negative_slope, False, self.activation))
     self.gat_layers.append(
         GATConv(in_feats=in_dim,
                 out_feats=num_hidden,
                 num_heads=heads[0],
                 feat_drop=feat_drop,
                 attn_drop=attn_drop,
                 negative_slope=negative_slope,
                 residual=False,
                 activation=self.activation))
     # hidden layers
     for l in range(1, num_layers):
         # due to multi-head, the in_dim = num_hidden * num_heads
         #self.gat_layers.append(GATConv(num_hidden * heads[l-1], num_hidden, heads[l],feat_drop, attn_drop, negative_slope, residual, self.activation))
         self.gat_layers.append(
             GATConv(in_feats=num_hidden * heads[l - 1],
                     out_feats=num_hidden,
                     num_heads=heads[l],
                     feat_drop=feat_drop,
                     attn_drop=attn_drop,
                     negative_slope=negative_slope,
                     residual=residual,
                     activation=self.activation))
     # output projection
     #self.gat_layers.append(GATConv(num_hidden * heads[-2], num_classes, heads[-1],feat_drop, attn_drop, negative_slope, residual, None))
     self.gat_layers.append(
         GATConv(in_feats=num_hidden * heads[-2],
                 out_feats=num_classes,
                 num_heads=heads[-1],
                 feat_drop=feat_drop,
                 attn_drop=attn_drop,
                 negative_slope=negative_slope,
                 residual=residual,
                 activation=None))
Exemplo n.º 24
0
class MultiGATBaseConvs(nn.Module):
    def __init__(self, input_feat_channel=512):
        super(MultiGATBaseConvs, self).__init__()
        n_head = 16
        self.l1 = GATConv(in_feats=input_feat_channel,
                          out_feats=512,
                          num_heads=n_head,
                          residual=True)
        self.l2 = GATConv(in_feats=n_head * 512,
                          out_feats=512,
                          num_heads=n_head,
                          residual=True)
        self.l3 = GATConv(in_feats=n_head * 512,
                          out_feats=512,
                          num_heads=n_head,
                          residual=True)
        self.l4 = GATConv(in_feats=n_head * 512,
                          out_feats=512,
                          num_heads=1,
                          residual=True)
        self.l1.forward = MethodType(forward2, self.l1)
        self.l2.forward = MethodType(forward2, self.l2)
        self.l3.forward = MethodType(forward2, self.l3)
        self.l4.forward = MethodType(forward2, self.l4)

#         self.l = GATConv(in_feats=[n_head, 512], out_feats=[1, 512], num_heads=1)

    def forward(self, graph, feat):
        N = feat.shape[0]
        #         print(feat.shape)
        x, _, _ = self.l1.forward(graph, feat)
        x1 = F.relu(x)
        #         print(x.shape)
        x, _, _ = self.l2.forward(graph, x1.view(N, -1))
        x = F.relu(x)

        x, _, _ = self.l3.forward(graph, x.view(N, -1))
        x = F.relu(x)

        x, attn, bef = self.l4.forward(graph, x.view(N, -1))
        x = F.relu(x)
        #         bef = F.relu(bef)
        #         print(x1-x)

        diff = (x1 - x).detach().cpu().numpy()
        plt.hist(np.asarray(diff).ravel(), bins=100)
        plt.ylabel('Freq.')
        plt.show()

        return x.view(N, -1), attn, bef.view(N, -1)
Exemplo n.º 25
0
    def __init__(self,
                 graph,
                 n_heads,
                 n_layers,
                 input_size,
                 hidden_size,
                 output_size,
                 nonlinearity,
                 dropout=0.6):
        super().__init__()

        self.n_layers = n_layers
        self.g = graph
        self.convs = nn.ModuleList()
        self.linear = nn.ModuleList()
        self.bns = nn.ModuleList()

        for i in range(n_layers):
            in_hidden = n_heads * hidden_size if i > 0 else input_size
            out_hidden = hidden_size if i < n_layers - 1 else output_size
            out_channels = n_heads

            self.convs.append(
                GATConv(in_hidden, out_hidden, num_heads=n_heads, attn_drop=0))
            self.linear.append(
                nn.Linear(in_hidden, out_channels * out_hidden, bias=False))
            if i < n_layers - 1:
                self.bns.append(nn.BatchNorm1d(out_channels * out_hidden))

        self.dropout0 = nn.Dropout(min(0.1, dropout))
        self.dropout = nn.Dropout(dropout)
        self.activation = nonlinearity
Exemplo n.º 26
0
    def __init__(self,
                 in_features=64,
                 out_features=2,
                 hidden_gat_sizes=None,
                 dropout_p=0.2):
        super(NetGATConv, self).__init__()

        if hidden_gat_sizes is None:
            hidden_gat_sizes = [10]

        self.dropout = nn.Dropout(p=dropout_p)

        prev_hidden = in_features
        self.hidden_gat_layers = []
        self.hidden_b_norms = []
        for out_hidden in hidden_gat_sizes:
            num_heads = 2
            self.hidden_gat_layers.append(
                GATConv(prev_hidden, out_hidden, num_heads=num_heads))
            self.hidden_b_norms.append(nn.BatchNorm1d(out_hidden))
            prev_hidden = out_hidden
        self.hidden_gat_layers = nn.ModuleList(self.hidden_gat_layers)
        self.hidden_b_norms = nn.ModuleList(self.hidden_b_norms)

        self.last_linear_layer = Linear(prev_hidden, out_features)
Exemplo n.º 27
0
 def __init__(self, f_in, f_out, num_heads, num_layers, feat_drop,
              attn_drop):
     super(GAT, self).__init__()
     self.f_in = f_in
     self.f_out = f_out
     self.num_heads = num_heads
     self.GATlayers = nn.ModuleList()
     self.fc_class = nn.Sequential(
         nn.Linear(self.f_out * self.num_heads, 10),
         nn.ReLU(),
         nn.Linear(10, 1),
         # nn.Sigmoid(),
     )
     self.fc_confidence = nn.Sequential(
         nn.Linear(self.f_out * self.num_heads, 10),
         nn.ReLU(),
         nn.Linear(10, 1),
         nn.Sigmoid(),
     )
     for i in range(num_layers):
         self.GATlayers.append(
             GATConv(f_in,
                     f_out,
                     num_heads,
                     feat_drop=feat_drop,
                     attn_drop=attn_drop,
                     activation=nn.ReLU()))
         f_in = f_out * num_heads
Exemplo n.º 28
0
    def __init__(self):
        super(Model, self).__init__()

        #lift features of the nodes
        self.lifting_layer = nn.Embedding(hyperparams["num_features"],
                                          hyperparams["hsz"])

        #latent representations of the nodes
        self.sageConv1 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')

        self.sageConv2 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')

        self.sageConv3 = SAGEConv(in_feats = hyperparams["hsz"], \
                                                       out_feats = hyperparams["hsz"], \
                                                       aggregator_type = 'lstm')

        self.GAT_conv1 = GATConv(in_feats = hyperparams["hsz"], \
                                 out_feats = hyperparams["hsz"], \
                                 num_heads = hyperparams["num_heads"])

        #readout layer (also task specific layer  during pretraining)
        self.output_layer = nn.Linear(hyperparams["hsz"], 3)
Exemplo n.º 29
0
    def __init__(self, in_dim, out_dim, num_heads, dropout, graph_norm, batch_norm, residual=False, activation=None, dgl_builtin=False):

        super().__init__()
        self.dgl_builtin = dgl_builtin

        if dgl_builtin == False:
            self.in_channels = in_dim
            self.out_channels = out_dim
            self.num_heads = num_heads
            self.residual = residual
            
            if in_dim != (out_dim*num_heads):
                self.residual = False
            
            self.heads = nn.ModuleList()
            for i in range(num_heads):
                self.heads.append(GATHeadLayer(in_dim, out_dim, dropout, graph_norm, batch_norm))
            self.merge = 'cat' 

        else:
            self.in_channels = in_dim
            self.out_channels = out_dim
            self.num_heads = num_heads
            self.residual = residual
            self.activation = activation
            self.graph_norm = graph_norm
            self.batch_norm = batch_norm
            
            if in_dim != (out_dim*num_heads):
                self.residual = False

            # Both feat and weighting dropout tied together here
            self.conv = GATConv(in_dim, out_dim, num_heads, dropout, dropout)
            self.batchnorm_h = nn.BatchNorm1d(out_dim)
Exemplo n.º 30
0
    def __init__(self,
                 num_nodes,
                 num_layers,
                 in_dim,
                 num_hidden,
                 heads,
                 feat_drop,
                 attn_drop,
                 negative_slope,
                 residual,
                 embed=None,
                 embed_connect="residual",
                 graph=None):
        super(GraphAttentionEncoder, self).__init__()

        # Embedding layer
        if embed is not None:
            if embed.shape[1] > num_hidden:
                raise Exception('Pretrain embdding dimension mismatch:'
                                'required {}-d, but got {}-d'.format(
                                    num_hidden, embed.shape[1]))
                # svd = decomposition.TruncatedSVD(n_components=num_hidden)
                # embed = svd.fit_transform(embed)
                # embed = torch.tensor(embed, dtype=torch.float)
            self.emb_node = nn.Embedding.from_pretrained(embed)
        else:
            self.emb_node = nn.Embedding(num_nodes, num_hidden)

        # Hidden layers
        layers = []
        for idx in range(num_layers):
            activation = None if idx == num_layers - 1 else F.elu
            if idx == 0:
                layer = GATConv(in_dim, num_hidden, heads[idx], feat_drop,
                                attn_drop, negative_slope, False, activation)
            else:
                layer = GATConv(num_hidden * heads[idx - 1], num_hidden,
                                heads[idx], feat_drop, attn_drop,
                                negative_slope, residual, activation)
            layers.append(layer)
        self.gat_layers = nn.ModuleList(layers)

        # Record full graph for propagation
        self.graph = graph

        # Connction from node embedding to relational decoder
        self.emb_connect = embed_connect