Beispiel #1
0
    def __init__(self, num_layers, in_dim, num_hidden, num_classes, heads,
                 activation, feat_drop, attn_drop, negative_slope, residual,
                 readout, device):
        super(Classifier, self).__init__()
        self.num_layers = num_layers
        self.layers = nn.ModuleList()
        self.activation = activation
        self.readout = readout
        self.device = device

        # input projection (no residual)
        self.layers.append(
            conv.GATConv(in_dim, num_hidden, heads[0], feat_drop, attn_drop,
                         negative_slope, False, self.activation))

        # hidden layers
        for l in range(1, num_layers):
            # due to multi-head, the in_dim = num_hidden * num_heads
            self.layers.append(
                conv.GATConv(num_hidden * heads[l - 1], num_hidden, heads[l],
                             feat_drop, attn_drop, negative_slope, residual,
                             self.activation))
        # output projection
        self.layers.append(
            conv.GATConv(num_hidden * heads[-2], num_hidden, heads[-1],
                         feat_drop, attn_drop, negative_slope, residual, None))

        # last layer
        self.classify = nn.Linear(num_hidden, num_classes)
 def __init__(self, in_feats, n_hidden, n_classes, n_layers, num_heads,
              activation, dropout):
     super().__init__()
     self.n_layers = n_layers
     self.n_hidden = n_hidden
     self.n_classes = n_classes
     self.layers = nn.ModuleList()
     self.layers.append(
         dglnn.GATConv((in_feats, in_feats),
                       n_hidden,
                       num_heads=num_heads,
                       feat_drop=0.,
                       attn_drop=0.,
                       activation=activation,
                       negative_slope=0.2))
     for i in range(1, n_layers - 1):
         self.layers.append(
             dglnn.GATConv((n_hidden * num_heads, n_hidden * num_heads),
                           n_hidden,
                           num_heads=num_heads,
                           feat_drop=0.,
                           attn_drop=0.,
                           activation=activation,
                           negative_slope=0.2))
     self.layers.append(
         dglnn.GATConv((n_hidden * num_heads, n_hidden * num_heads),
                       n_classes,
                       num_heads=num_heads,
                       feat_drop=0.,
                       attn_drop=0.,
                       activation=None,
                       negative_slope=0.2))
Beispiel #3
0
 def __init__(self, in_feat, hidden_feat, out_feat, rel_names):
     super().__init__()
     self.conv1 = dglnn.HeteroGraphConv({
         rel: dglnn.GATConv(in_feat, hidden_feat, num_heads=4)
         for rel in rel_names
     })
     self.conv2 = dglnn.HeteroGraphConv({
         rel: dglnn.GATConv(hidden_feat, out_feat, num_heads=4)
         for rel in rel_names
     })
Beispiel #4
0
def test_gat_conv():
    ctx = F.ctx()
    g = dgl.rand_graph(100, 1000)
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((100, 5))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (100, 4, 2)

    g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
    gat = nn.GATConv((5, 10), 2, 4)
    feat = (F.randn((100, 5)), F.randn((200, 10)))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (200, 4, 2)
Beispiel #5
0
def test_gat_conv():
    ctx = F.ctx()
    g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True)
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((100, 5))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape[-1] == 2 and h.shape[-2] == 4
Beispiel #6
0
def test_gat_conv(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((g.number_of_nodes(), 5))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (g.number_of_nodes(), 4, 2)
Beispiel #7
0
 def __init__(self,
              in_feats,
              n_hidden,
              n_classes,
              n_layers,
              num_heads,
              activation):
     super().__init__()
     self.n_layers = n_layers
     self.n_hidden = n_hidden
     self.n_classes = n_classes
     self.layers = nn.ModuleList()
     self.layers.append(dglnn.GATConv((in_feats, in_feats), n_hidden, num_heads=num_heads, activation=activation))
     for i in range(1, n_layers - 1):
         self.layers.append(dglnn.GATConv((n_hidden * num_heads, n_hidden * num_heads), n_hidden,
                                          num_heads=num_heads, activation=activation))
     self.layers.append(dglnn.GATConv((n_hidden * num_heads, n_hidden * num_heads), n_classes,
                                      num_heads=num_heads, activation=None))
Beispiel #8
0
def test_gat_conv_bi(g, idtype, out_dim, num_heads):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gat = nn.GATConv(5, out_dim, num_heads)
    feat = (F.randn((g.number_of_src_nodes(), 5)), F.randn((g.number_of_dst_nodes(), 5)))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
    _, a = gat(g, feat, get_attention=True)
    assert a.shape == (g.number_of_edges(), num_heads, 1)
Beispiel #9
0
def test_gat_conv(g, idtype):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((g.number_of_nodes(), 5))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (g.number_of_nodes(), 4, 2)
    _, a = gat(g, feat, get_attention=True)
    assert a.shape == (g.number_of_edges(), 4, 1)
Beispiel #10
0
def test_gat_conv(g, idtype, out_dim, num_heads):
    g = g.astype(idtype).to(F.ctx())
    ctx = F.ctx()
    gat = nn.GATConv(5, out_dim, num_heads)
    feat = F.randn((g.number_of_src_nodes(), 5))
    gat = gat.to(ctx)
    h = gat(g, feat)

    # test pickle
    th.save(gat, tmp_buffer)

    assert h.shape == (g.number_of_dst_nodes(), num_heads, out_dim)
    _, a = gat(g, feat, get_attention=True)
    assert a.shape == (g.number_of_edges(), num_heads, 1)

    # test residual connection
    gat = nn.GATConv(5, out_dim, num_heads, residual=True)
    gat = gat.to(ctx)
    h = gat(g, feat)
Beispiel #11
0
    def __init__(self,
                 layer_type,
                 block_type,
                 activation,
                 normalization=None,
                 **core_layer_hyperparms):
        super(GNNBasicBlock, self).__init__()
        self.layer_type = layer_type
        self.block_type = block_type

        if self.layer_type in ['gcn', 'gcn_res']:
            self.core_layer_type = 'gcn'
            self.core_layer = dglnn.GraphConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=core_layer_hyperparms['out_channels'],
                bias=core_layer_hyperparms['bias'])
        elif self.layer_type in ['gat', 'gat_res']:
            self.core_layer_type = 'gat'
            self.core_layer = dglnn.GATConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=int(core_layer_hyperparms['out_channels'] /
                              core_layer_hyperparms['num_heads']),
                num_heads=core_layer_hyperparms['num_heads'],
                feat_drop=core_layer_hyperparms['feat_drop'],
                attn_drop=core_layer_hyperparms['attn_drop'])
        elif self.layer_type in ['sage', 'sage_res']:
            self.core_layer_type = 'sage'
            self.core_layer = dglnn.SAGEConv(
                in_feats=core_layer_hyperparms['in_channels'],
                out_feats=core_layer_hyperparms['out_channels'],
                aggregator_type='mean',
                bias=core_layer_hyperparms['bias'])

        else:
            raise NotImplementedError

        acti_type, acti_hyperparam = activation
        if acti_type == 'relu':
            self.activation = nn.ReLU(inplace=acti_hyperparam)
        elif acti_type == 'lkrelu':
            self.activation = nn.LeakyReLU(negative_slope=acti_hyperparam)
        elif acti_type == 'elu':
            self.activation = nn.ELU(inplace=acti_hyperparam)
        elif acti_type == 'no':
            self.activation = None
        else:
            raise NotImplementedError

        if 'n' in block_type.split('_'):
            self.node_norm = get_normalization(
                norm_type=normalization,
                num_channels=core_layer_hyperparms['out_channels'])
        self.block_type_str = self.get_block_type_str()
Beispiel #12
0
    def __init__(self, input_size: int, hidden_size: int):
        """
        参数:
        input_size  输入尺寸
        hidden_size 输出尺寸/隐藏尺寸
        若是GAT,则至少还有num_heads

        输出与隐藏状态之间经过线性变换
        """
        super(GRUCell_GAT, self).__init__()  #父类初始化函数
        #生成卷积层, num_heads为1 不使用多头注意力
        self.ConvIR = dglnn.GATConv(input_size + hidden_size,
                                    hidden_size,
                                    1,
                                    allow_zero_in_degree=True)
        self.ConvIZ = dglnn.GATConv(input_size + hidden_size,
                                    hidden_size,
                                    1,
                                    allow_zero_in_degree=True)
        self.ConvIH = dglnn.GATConv(input_size + hidden_size,
                                    hidden_size,
                                    1,
                                    allow_zero_in_degree=True)
Beispiel #13
0
    def __init__(self, in_feats, hidden_size, n_head_list, extra_feats):
        super().__init__()
        self.n_conv = len(n_head_list)

        self.gat_list = nn.ModuleList()
        for i in range(self.n_conv):
            n_head = n_head_list[i]
            if i == 0:
                layer = dglnn.GATConv(in_feats, hidden_size, n_head)
            else:
                n_head_last = n_head_list[i - 1]
                layer = dglnn.GATConv(n_head_last * hidden_size, hidden_size,
                                      n_head)

            torch.nn.init.normal_(layer.attn_l, std=0.5)
            torch.nn.init.normal_(layer.attn_r, std=0.5)
            torch.nn.init.normal_(layer.fc.weight, std=0.5)
            self.gat_list.append(layer)

        self.readout = WeightedAverage(hidden_size * n_head_list[-1])

        self.mlp = MLPModel(hidden_size * n_head_list[-1] + extra_feats, 1,
                            [2 * hidden_size, hidden_size])
Beispiel #14
0
    def __init__(self):
        super(StochasticNetwork, self).__init__()
        if config.NETWORK == 'SAGE':
            self.layers = [
                dglnn.SAGEConv(config.IN_FEATURES,
                               config.HIDDEN_FEATURES,
                               aggregator_type='mean',
                               feat_drop=config.DROPOUT),
                dglnn.SAGEConv(config.HIDDEN_FEATURES,
                               config.HIDDEN_FEATURES,
                               aggregator_type='mean',
                               feat_drop=config.DROPOUT)
            ]
        elif config.NETWORK == 'GAT':
            self.layers = [
                dglnn.GATConv(config.IN_FEATURES,
                              config.HIDDEN_FEATURES,
                              feat_drop=config.DROPOUT,
                              attn_drop=config.ATTN_DROPOUT,
                              num_heads=config.ATTN_HEADS),
                dglnn.GATConv(config.ATTN_HEADS * config.HIDDEN_FEATURES,
                              config.HIDDEN_FEATURES,
                              feat_drop=config.DROPOUT,
                              num_heads=1)
            ]
        elif config.NETWORK == 'GIN':
            self.mlp1 = MLP(1, config.IN_FEATURES, config.HIDDEN_FEATURES,
                            config.HIDDEN_FEATURES)
            self.mlp2 = MLP(1, config.HIDDEN_FEATURES, config.HIDDEN_FEATURES,
                            config.HIDDEN_FEATURES)
            self.layers = [
                dglnn.GINConv(apply_func=self.mlp1, aggregator_type='mean'),
                dglnn.GINConv(apply_func=self.mlp2, aggregator_type='mean'),
            ]

        self.layers = torch.nn.ModuleList(self.layers)
        self.final = nn.Linear(config.HIDDEN_FEATURES, 2)
Beispiel #15
0
def test_gat_conv():
    ctx = F.ctx()
    g = dgl.rand_graph(100, 1000)
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((100, 5))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (100, 4, 2)

    g = dgl.bipartite(sp.sparse.random(100, 200, density=0.1))
    gat = nn.GATConv((5, 10), 2, 4)
    feat = (F.randn((100, 5)), F.randn((200, 10)))
    gat = gat.to(ctx)
    h = gat(g, feat)
    assert h.shape == (200, 4, 2)

    g = dgl.graph(sp.sparse.random(100, 100, density=0.001))
    seed_nodes = th.unique(g.edges()[1])
    block = dgl.to_block(g, seed_nodes)
    gat = nn.GATConv(5, 2, 4)
    feat = F.randn((block.number_of_src_nodes(), 5))
    gat = gat.to(ctx)
    h = gat(block, feat)
    assert h.shape == (block.number_of_dst_nodes(), 4, 2)
Beispiel #16
0
 def __init__(self,
              dim_in,
              dim_out,
              dim_t,
              numr,
              nume,
              g,
              dropout=0,
              deepth=2,
              sampling=None,
              granularity=1,
              r_limit=None):
     super(EmbModule, self).__init__()
     self.dim_in = dim_in
     self.dim_out = dim_out
     self.dim_t = dim_t
     self.numr = numr
     self.nume = nume
     self.deepth = deepth
     self.g = g
     self.granularity = granularity
     mods = dict()
     mods['time_enc'] = TimeEnc(dim_t, nume)
     mods['entity_emb'] = nn.Embedding(nume, dim_in)
     if r_limit is None:
         r_limit = numr
     for l in range(self.deepth):
         mods['norm' + str(l)] = nn.LayerNorm(dim_in + dim_t)
         # mods['dropout' + str(l)] = nn.Dropout(dropout)
         conv_dict = dict()
         for r in range(r_limit):
             conv_dict['r' + str(r)] = dglnn.GATConv(dim_in + dim_t,
                                                     dim_out // 4,
                                                     4,
                                                     feat_drop=dropout,
                                                     attn_drop=dropout,
                                                     residual=False)
             conv_dict['-r' + str(r)] = dglnn.GATConv(dim_in + dim_t,
                                                      dim_out // 4,
                                                      4,
                                                      feat_drop=dropout,
                                                      attn_drop=dropout,
                                                      residual=False)
             conv_dict['self'] = dglnn.GATConv(dim_in + dim_t,
                                               dim_out // 4,
                                               4,
                                               feat_drop=dropout,
                                               attn_drop=dropout,
                                               residual=False)
             # conv_dict['r' + str(r)] = dglnn.GraphConv(dim_in + dim_t, dim_out)
             # conv_dict['-r' + str(r)] = dglnn.GraphConv(dim_in + dim_t, dim_out)
             # conv_dict['self'] = dglnn.GraphConv(dim_in + dim_t, dim_out)
         mods['conv' + str(l)] = dglnn.HeteroGraphConv(conv_dict,
                                                       aggregate='mean')
         mods['act' + str(l)] = nn.ReLU()
         dim_in = dim_out
     self.mods = nn.ModuleDict(mods)
     if sampling is not None:
         fanouts = [int(d) for d in sampling.split('/')]
         self.sampler = dgl.dataloading.MultiLayerNeighborSampler(
             fanouts=fanouts)
     else:
         self.sampler = dgl.dataloading.MultiLayerFullNeighborSampler(
             self.deepth)
    def __init__(self, args, config):
        super(NumericHGN, self).__init__()
        self.args = args
        self.config = config
        self.encoder = ContextEncoder(self.args, config)
        self.bi_attn = BiAttention(args, self.config.hidden_size)
        self.bi_attn_linear = nn.Linear(self.config.hidden_size * 4,
                                        self.config.hidden_size)
        self.bi_lstm = nn.LSTM(self.config.hidden_size,
                               self.config.hidden_size,
                               bidirectional=True)
        self.para_node_mlp = nn.Linear(self.config.hidden_size * 2,
                                       self.config.hidden_size)
        self.sent_node_mlp = nn.Linear(self.config.hidden_size * 2,
                                       self.config.hidden_size)
        self.ent_node_mlp = nn.Linear(self.config.hidden_size * 2,
                                      self.config.hidden_size)

        # https://docs.dgl.ai/api/python/nn.pytorch.html#dgl.nn.pytorch.HeteroGraphConv
        self.gat = dglnn.HeteroGraphConv(
            {
                "ps":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "sp":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "se":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "es":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "pp":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "ss":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "qp":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "pq":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "qe":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                "eq":
                dglnn.GATConv(self.config.hidden_size,
                              self.config.hidden_size,
                              num_heads=1),
                # TODO: Need (i) bi-directional edges and (ii) more edge types (e.g., question-paragraph, paragraph-paragraph, etc.)
            },
            aggregate='sum'
        )  # TODO: May need to change aggregate function (test it!) - ‘sum’, ‘max’, ‘min’, ‘mean’, ‘stack’.

        self.gated_attn = GatedAttention(self.args, self.config)

        self.para_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, args.num_paragraphs))
        self.sent_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, args.num_sentences))
        self.ent_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, args.num_entities))
        self.span_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size * 4, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, self.config.num_labels))
        self.answer_type_mlp = nn.Sequential(
            nn.Linear(self.config.hidden_size * 4, self.config.hidden_size),
            nn.Linear(self.config.hidden_size, 3))