示例#1
0
    def __init__(self,
                 node_input_dim=15,
                 num_edge_type=5,
                 output_dim=12,
                 node_hidden_dim=64,
                 num_basis=-1,
                 num_step_prop=6,
                 num_step_set2set=6):
        super(RGCN, self).__init__()
        self.num_step_prop = num_step_prop
        self.lin0 = nn.Linear(node_input_dim, node_hidden_dim)
        if num_basis < 0:
            self.conv = RGCNConv(node_hidden_dim, node_hidden_dim,
                                 num_edge_type, num_edge_type)
        else:
            self.conv = RGCNConv(node_hidden_dim, node_hidden_dim,
                                 num_edge_type, num_basis)

        self.set2set = Set2Set(node_hidden_dim,
                               processing_steps=num_step_set2set)
        self.lin1 = nn.Linear(2 * node_hidden_dim, node_hidden_dim)
        self.lin2 = nn.Linear(node_hidden_dim, output_dim)
示例#2
0
    def _build_kg_layer(self):
        # db encoder
        self.entity_encoder = RGCNConv(self.n_entity, self.kg_emb_dim, self.n_relation, self.num_bases)
        self.entity_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)

        # concept encoder
        self.word_encoder = GCNConv(self.kg_emb_dim, self.kg_emb_dim)
        self.word_self_attn = SelfAttentionSeq(self.kg_emb_dim, self.kg_emb_dim)

        # gate mechanism
        self.gate_layer = GateLayer(self.kg_emb_dim)

        logger.debug('[Finish build kg layer]')
示例#3
0
 def __init__(self, num_features, num_classes, num_relations, max_seq_len,
              hidden_size=64, dropout=0.5, no_cuda=False):
     """
     The Speaker-level context encoder in the form of a 2 layer GCN.
     """
     super(GraphNet, self).__init__()
     self.conv1 = RGCNConv(num_features, hidden_size, num_relations, num_bases=30)
     self.conv2 = GraphConv(hidden_size, hidden_size)
     self.matchatt = MatchingAttention(num_features + hidden_size, num_features + hidden_size, att_type='general2')
     self.linear = nn.Linear(num_features + hidden_size, hidden_size)
     self.dropout = nn.Dropout(dropout)
     self.softmax_fc = nn.Linear(hidden_size, num_classes)
     self.no_cuda = no_cuda
示例#4
0
文件: pg_rgcn.py 项目: robocomp/sngnn
 def __init__(self,
              num_features,
              n_classes,
              num_rels,
              num_bases,
              num_hidden,
              num_hidden_layers,
              dropout,
              activation,
              bias=True):
     super(PRGCN, self).__init__()
     # dropout
     if dropout:
         self.dropout = nn.Dropout(p=dropout)
     else:
         self.dropout = nn.Dropout(p=0.)
     # activation
     self.activation = activation
     # input layer
     self.rgcn_input = RGCNConv(
         num_features, num_hidden, num_rels, num_bases,
         bias=bias)  #aggr values ['add', 'mean', 'max'] default : add
     # Hidden layers
     self.layers = nn.ModuleList()
     for _ in range(num_hidden_layers):
         self.layers.append(
             RGCNConv(num_hidden,
                      num_hidden,
                      num_rels,
                      num_bases,
                      bias=bias))
     # output layer
     self.rgcn_output = RGCNConv(num_hidden,
                                 n_classes,
                                 num_rels,
                                 num_bases,
                                 bias=bias)
示例#5
0
    def __init__(self, config):
        super(Encoder, self).__init__()
#        self.initializer = Initializer(config)
        layer = EncoderLayer(config)
#        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
        self.layer = nn.ModuleList([layer])
#        self.conv = FastRGCNConv(config.hidden_size,config.hidden_size)
        self.conv3 = RGCNConv(config.hidden_size,config.hidden_size,25,num_bases=128)
        self.conv2 = torch.nn.ModuleList()
        for i in range(5):
            self.conv2.append(
                    DNAConv(config.hidden_size,32,2,0.1))
        self.hidden_size = config.hidden_size
#        self.conv2 = DNAConv(config.hidden_size,32,16,0.1)
        
#        self.conv2 = AGNNConv(config.hidden_size,config.hidden_size)
        self.norm = nn.LayerNorm([512,config.hidden_size],1e-05)
    def __init__(self, config):
        super(Encoder, self).__init__()
        #        self.initializer = Initializer(config)
        #        layer = EncoderLayer(config)
        #        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
        #        self.layer = nn.ModuleList([layer])
        #        self.conv = FastRGCNConv(config.hidden_size,config.hidden_size)
        self.conv3 = RGCNConv(config.hidden_size,
                              config.hidden_size,
                              25,
                              num_bases=128)
        self.conv2 = torch.nn.ModuleList()
        self.conv22 = torch.nn.ModuleList()

        for i in range(3):
            self.conv2.append(DNAConv(config.hidden_size, 32, 4, 0.1))
            self.conv22.append(DNAConv(config.hidden_size, 32, 4, 0.1))

        self.hidden_size = config.hidden_size
示例#7
0
 def __init__(self, conv_name, in_hid, out_hid, num_types, num_relations,
              n_heads, dropout):
     super(GeneralConv, self).__init__()
     self.conv_name = conv_name
     if self.conv_name == 'hgt':
         self.base_conv = HGTConv(in_hid, out_hid, num_types, num_relations,
                                  n_heads, dropout)
     elif self.conv_name == 'gcn':
         self.base_conv = GCNConv(in_hid, out_hid)
     elif self.conv_name == 'gat':
         self.base_conv = GATConv(in_hid, out_hid // n_heads, heads=n_heads)
     elif self.conv_name == 'rgcn':
         self.base_conv = RGCNConv(in_hid,
                                   out_hid,
                                   num_relations,
                                   num_bases=5)
     else:
         raise NotImplementedError("conv_name=%s is not implemented" %
                                   conv_name)
示例#8
0
    def __init__(self, opt, emb_matrix=None):
        super(SynGCN, self).__init__()
        self.drop = nn.Dropout(opt['dropout'])
        self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
        if opt['pos_dim'] > 0:
            self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim'],
                    padding_idx=constant.PAD_ID)
        if opt['ner_dim'] > 0:
            self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim'],
                    padding_idx=constant.PAD_ID)

        input_size = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
        self.rnn = nn.LSTM(input_size, opt['hidden_dim'], opt['num_layers'], batch_first=True,\
                dropout=opt['dropout'], bidirectional=True)

        if opt['sgcn']:
            self.deprel_emb = nn.Embedding(len(constant.DEPREL_TO_ID), opt['deprel_dim'],
                    padding_idx=constant.PAD_ID)
            self.attn = Attention(opt['deprel_dim'], 2*opt['hidden_dim'])
            self.sgcn2 = GCNConv(2*opt['hidden_dim'], opt['hidden_dim'])
        if opt['rgcn']:
            self.rgcn = RGCNConv(2*opt['hidden_dim'], opt['hidden_dim'], len(constant.DEPREL_TO_ID)-1, num_bases=len(constant.DEPREL_TO_ID)-1)
        if opt['gcn']:
            self.gcn = GCNConv(2*opt['hidden_dim'], opt['hidden_dim'])
        if opt['gat']:
            self.deprel_emb = nn.Embedding(len(constant.DEPREL_TO_ID), opt['deprel_dim'],
                    padding_idx=constant.PAD_ID)
            self.gat = GATConv((2*opt['hidden_dim'], 2*opt['hidden_dim']+opt['deprel_dim']), opt['hidden_dim'])

        # output mlp layers
        in_dim = opt['hidden_dim']*3
        layers = [nn.Linear(in_dim, opt['hidden_dim']), nn.ReLU()]
        for _ in range(opt['mlp_layers']-1):
            layers += [nn.Linear(opt['hidden_dim'], opt['hidden_dim']), nn.ReLU()]
        self.out_mlp = nn.Sequential(*layers)
        self.linear = nn.Linear(opt['hidden_dim'], opt['num_class'])

        self.opt = opt
        self.topn = self.opt.get('topn', 1e10)
        self.use_cuda = opt['cuda']
        self.emb_matrix = emb_matrix
        self.init_weights()
def test_rgcn_conv_equality(conf):
    num_bases, num_blocks = conf

    x1 = torch.randn(4, 4)
    edge_index = torch.tensor([[0, 1, 1, 2, 2, 3], [0, 0, 1, 0, 1, 1]])
    edge_type = torch.tensor([0, 1, 1, 0, 0, 1])

    edge_index = torch.tensor([
        [0, 1, 1, 2, 2, 3, 0, 1, 1, 2, 2, 3],
        [0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1],
    ])
    edge_type = torch.tensor([0, 1, 1, 0, 0, 1, 2, 3, 3, 2, 2, 3])

    torch.manual_seed(12345)
    conv1 = RGCNConv(4, 32, 4, num_bases, num_blocks)

    torch.manual_seed(12345)
    conv2 = FastRGCNConv(4, 32, 4, num_bases, num_blocks)

    out1 = conv1(x1, edge_index, edge_type)
    out2 = conv2(x1, edge_index, edge_type)
    assert torch.allclose(out1, out2, atol=1e-6)
示例#10
0
    def __init__(self,
                 in_channels,
                 num_relations,
                 ratio=0.5,
                 min_score=None,
                 multiplier=1,
                 nonlinearity=torch.tanh,
                 rgcn_func="FastRGCNConv",
                 **kwargs):
        super(RGCNSAGPooling, self).__init__()

        self.in_channels = in_channels
        self.ratio = ratio
        self.gnn = FastRGCNConv(
            in_channels, 1, num_relations, **
            kwargs) if rgcn_func == "FastRGCNConv" else RGCNConv(
                in_channels, 1, num_relations, **kwargs)
        self.min_score = min_score
        self.multiplier = multiplier
        self.nonlinearity = nonlinearity

        self.reset_parameters()
示例#11
0
 def __init__(self,
              obj_n: int,
              action_n: int,
              input_dims: List[int],
              type: str,
              embedding_size=16,
              net_code="2g0f",
              mp_rounds=1):
     super().__init__()
     nb_edge_types = input_dims[2]
     nb_layers, nb_dense_layers, self.max_reduce = parse_code(net_code)
     self.embedding_linear = nn.Linear(input_dims[1], embedding_size)
     gnn_layers = []
     for i in range(nb_layers):
         gnn_layers.append(
             RGCNConv(embedding_size, embedding_size, nb_edge_types))
     self.gnn_layers = nn.ModuleList(gnn_layers)
     dense_layers = []
     for i in range(nb_dense_layers):
         if i == 0:
             if self.max_reduce:
                 dense_layers.append(nn.Linear(embedding_size, 128))
             else:
                 dense_layers.append(nn.Linear(embedding_size * obj_n, 128))
         else:
             dense_layers.append(nn.Linear(128, 128))
         dense_layers.append(nn.ReLU())
     self.dense = nn.Sequential(*dense_layers)
     self.num_actions = action_n
     if nb_dense_layers == 0:
         self.policy_linear = nn.Linear(embedding_size, self.num_actions)
         self.baseline_linear = nn.Linear(embedding_size, 1)
     else:
         self.policy_linear = nn.Linear(128, self.num_actions)
         self.baseline_linear = nn.Linear(128, 1)
     self.mp_rounds = mp_rounds
     self.nb_dense_layers = nb_dense_layers
示例#12
0
    def __init__(self,
                 in_channels=1,
                 hidden_channels=1,
                 out_channels=1,
                 normalize=False,
                 add_loop=False,
                 gnn_k=1,
                 gnn_type=1):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k=gnn_k#number of repitiions of gnn
        self.gnn_type=gnn_type
        if gnn_type==0:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=False)
        if gnn_type==1:
            self.conv1 = DenseSAGEConv(in_channels=1, out_channels=hidden_channels, normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels, out_channels=out_channels, normalize=True)
        
        if gnn_type==2:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels, cached=True)
        if gnn_type==3:
            self.conv1 = GCNConv(in_channels=1, out_channels=hidden_channels,improved=True, cached=True)
            self.conv2 = GCNConv(in_channels=hidden_channels, out_channels=out_channels,improved=True, cached=True)
        if gnn_type==4:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=2)
        if gnn_type==5:
            self.conv1 = ChebConv(in_channels=1, out_channels=hidden_channels,K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels, out_channels=out_channels,K=4)
        if gnn_type==6:
            self.conv1 = GraphConv(in_channels=1, out_channels=hidden_channels,aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels, out_channels=out_channels,aggr='add')
        if gnn_type==7:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=3, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=3, aggr='add', bias=True)
        if gnn_type==8:
            self.conv1 = GatedGraphConv(in_channels=1,out_channels=hidden_channels, num_layers=7, aggr='add', bias=True)
            self.conv2 = GatedGraphConv(in_channels=hidden_channels,out_channels=out_channels, num_layers=7, aggr='add', bias=True)
        if gnn_type==9:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=1, concat=True, negative_slope=0.2,dropout=0.6)
        if gnn_type==10:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=6, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==11:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=True, negative_slope=0.2,dropout=0.6)
        
        if gnn_type==12:
            self.conv1 =GATConv(in_channels=1,out_channels=hidden_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            self.conv2 =GATConv(in_channels=hidden_channels,out_channels=out_channels, heads=4, concat=False, negative_slope=0.2,dropout=0.6)
            
        if gnn_type==13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type==14:
            self.conv1 = ARMAConv(in_channels=1, out_channel=hidden_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels, out_channel=out_channels, num_stacks=1, num_layers=1, \
                                  shared_weights=False, act=F.relu, dropout=0.5, bias=True)
        if gnn_type==15:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=1, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=1, cached=True, bias=True)
        if gnn_type==16:
            self.conv1 = SGConv(in_channels=1, out_channels=hidden_channels, K=3, cached=True, bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels, out_channels=out_channels, K=3, cached=True, bias=True)
        if gnn_type==17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type==18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type==19:
            self.conv1 =RGCNConv(in_channels=1, out_channels=hidden_channels, num_relations=3, num_bases=2, bias=True)
            self.conv2 =RGCNConv(in_channels=hidden_channels, out_channels=out_channels, num_relations=3, num_bases=2, bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=hidden_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type==25:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==26:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=False, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==27:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=1, norm=True, root_weight=True, bias=True)
        if gnn_type==28:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=3, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
        if gnn_type==29:
            self.conv1 = SplineConv(in_channels=1, out_channels=hidden_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels, out_channels=out_channels, dim=3, kernel_size=6, is_open_spline=True, \
                                    degree=3, norm=True, root_weight=True, bias=True)
示例#13
0
 def __init__(self):
     super(Net, self).__init__()
     self.conv1 = RGCNConv(
         data.num_nodes, 16, dataset.num_relations, num_bases=30)
     self.conv2 = RGCNConv(
         16, dataset.num_classes, dataset.num_relations, num_bases=30)
示例#14
0
def test_to_hetero_with_bases_and_rgcn_equal_output():
    torch.manual_seed(1234)

    # Run `RGCN` with basis decomposition:
    x = torch.randn(10, 16)  # 6 paper nodes, 4 author nodes
    adj = (torch.rand(10, 10) > 0.5)
    adj[6:, 6:] = False
    edge_index = adj.nonzero(as_tuple=False).t().contiguous()
    row, col = edge_index

    # # 0 = paper<->paper, 1 = author->paper, 2 = paper->author
    edge_type = torch.full((edge_index.size(1), ), -1, dtype=torch.long)
    edge_type[(row < 6) & (col < 6)] = 0
    edge_type[(row < 6) & (col >= 6)] = 1
    edge_type[(row >= 6) & (col < 6)] = 2
    assert edge_type.min() == 0

    num_bases = 4
    conv = RGCNConv(16, 32, num_relations=3, num_bases=num_bases, aggr='add')
    out1 = conv(x, edge_index, edge_type)

    # Run `to_hetero_with_bases`:
    x_dict = {
        'paper': x[:6],
        'author': x[6:],
    }
    edge_index_dict = {
        ('paper', '_', 'paper'):
        edge_index[:, edge_type == 0],
        ('paper', '_', 'author'):
        edge_index[:, edge_type == 1] - torch.tensor([[0], [6]]),
        ('author', '_', 'paper'):
        edge_index[:, edge_type == 2] - torch.tensor([[6], [0]]),
    }

    adj_t_dict = {
        key: SparseTensor.from_edge_index(edge_index).t()
        for key, edge_index in edge_index_dict.items()
    }

    metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))
    model = to_hetero_with_bases(RGCN(16, 32),
                                 metadata,
                                 num_bases=num_bases,
                                 debug=False)

    # Set model weights:
    for i in range(num_bases):
        model.conv.convs[i].lin.weight.data = conv.weight[i].data.t()
        model.conv.convs[i].edge_type_weight.data = conv.comp[:, i].data.t()

    model.lin.weight.data = conv.root.data.t()
    model.lin.bias.data = conv.bias.data

    out2 = model(x_dict, edge_index_dict)
    out2 = torch.cat([out2['paper'], out2['author']], dim=0)
    assert torch.allclose(out1, out2, atol=1e-6)

    out3 = model(x_dict, adj_t_dict)
    out3 = torch.cat([out3['paper'], out3['author']], dim=0)
    assert torch.allclose(out1, out3, atol=1e-6)
示例#15
0
    def __init__(self,
                 num_features,
                 n_classes,
                 num_heads,
                 num_rels,
                 num_bases,
                 num_hidden,
                 num_hidden_layer_pairs,
                 dropout,
                 activation,
                 alpha,
                 bias=True):
        super(PRGAT2, self).__init__()
        self.neg_slope = alpha
        self.num_hidden_layer_pairs = num_hidden_layer_pairs
        # dropout
        if dropout:
            self.dropout = nn.Dropout(p=dropout)
        else:
            self.dropout = nn.Dropout(p=0.)
        # activation
        self.activation = activation

        if num_bases < 0:
            num_bases = num_rels

        self.layers = nn.ModuleList()
        for num_layer in range(num_hidden_layer_pairs):
            # RGCN
            if num_layer == 0:
                self.layers.append(
                    RGCNConv(num_features,
                             num_hidden,
                             num_rels,
                             num_bases,
                             bias=bias))
            else:
                self.layers.append(
                    RGCNConv(num_hidden * num_heads,
                             num_hidden,
                             num_rels,
                             num_bases,
                             bias=bias))
            # GAT
            if num_layer == num_hidden_layer_pairs - 1:
                self.layers.append(
                    GATConv(num_hidden,
                            n_classes,
                            heads=num_heads,
                            concat=False,
                            negative_slope=self.neg_slope,
                            dropout=dropout,
                            bias=bias))
            else:
                self.layers.append(
                    GATConv(num_hidden,
                            num_hidden,
                            heads=num_heads,
                            concat=True,
                            negative_slope=self.neg_slope,
                            dropout=0,
                            bias=bias))
示例#16
0
    def __init__(
            self,
            in_channels=1,
            hidden_channels=1,
            out_channels=1,
            normalize=False,
            add_loop=False,
            gnn_k=1,
            gnn_type=1,
            jump=None,  #None,max,lstm
            res=False,
            activation='leaky'):
        super(GNN, self).__init__()

        self.add_loop = add_loop

        self.in_channels = in_channels
        self.bn1 = torch.nn.BatchNorm1d(hidden_channels)
        self.bn2 = torch.nn.BatchNorm1d(out_channels)
        self.k = gnn_k  #number of repitiions of gnn
        self.gnn_type = gnn_type

        self.jump = jump
        if not (jump is None):
            if jump != 'lstm':
                self.jk = JumpingKnowledge(jump)
            else:
                self.jk = JumpingKnowledge(jump, out_channels, gnn_k)
        if activation == 'leaky':
            self.activ = F.leaky_relu
        elif activation == 'elu':
            self.activ = F.elu
        elif activation == 'relu':
            self.activ = F.relu
        self.res = res
        if self.gnn_type in [10, 12] and self.res == True:
            raise Exception('res must be false when gnn_type==10 or 12!')
        if self.k == 1 and self.res == True:
            raise Exception('res must be false when gnn_k==1!')
        if self.k == 1 and not (self.jump is None):
            raise Exception(
                'jumping knowledge only serves for the case where k>1!')
        if gnn_type == 0:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=False)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=False)
        if gnn_type == 1:
            self.conv1 = DenseSAGEConv(in_channels=self.in_channels,
                                       out_channels=out_channels,
                                       normalize=True)
            self.conv2 = DenseSAGEConv(in_channels=hidden_channels,
                                       out_channels=out_channels,
                                       normalize=True)

        if gnn_type == 2:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 cached=False)
        if gnn_type == 3:
            self.conv1 = GCNConv(in_channels=1,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
            self.conv2 = GCNConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 improved=True,
                                 cached=False)
        if gnn_type == 4:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=2)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=2)
        if gnn_type == 5:
            self.conv1 = ChebConv(in_channels=1,
                                  out_channels=out_channels,
                                  K=4)
            self.conv2 = ChebConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  K=4)
        if gnn_type == 6:
            self.conv1 = GraphConv(in_channels=1,
                                   out_channels=out_channels,
                                   aggr='add')
            self.conv2 = GraphConv(in_channels=hidden_channels,
                                   out_channels=out_channels,
                                   aggr='add')
        if gnn_type == 7:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=3,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 8:
            self.conv1 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
            self.conv2 = GatedGraphConv(out_channels=out_channels,
                                        num_layers=7,
                                        aggr='add',
                                        bias=True)
        if gnn_type == 9:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=1,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)
        if gnn_type == 10:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=6,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 11:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=True,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 12:
            self.conv1 = GATConv(in_channels=1,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)
            self.conv2 = GATConv(in_channels=hidden_channels,
                                 out_channels=out_channels,
                                 heads=4,
                                 concat=False,
                                 negative_slope=0.2,
                                 dropout=0.6)

        if gnn_type == 13:
            self.conv1 = AGNNConv(requires_grad=True)
            self.conv2 = AGNNConv(requires_grad=True)
        if gnn_type == 14:
            self.conv1 = ARMAConv(in_channels=1,
                                  out_channels=hidden_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
            self.conv2 = ARMAConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_stacks=1,
                                  num_layers=1,
                                  shared_weights=False,
                                  act=F.relu,
                                  dropout=0.5,
                                  bias=True)
        if gnn_type == 15:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=1,
                                cached=True,
                                bias=True)
        if gnn_type == 16:
            self.conv1 = SGConv(in_channels=1,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
            self.conv2 = SGConv(in_channels=hidden_channels,
                                out_channels=out_channels,
                                K=3,
                                cached=True,
                                bias=True)
        if gnn_type == 17:
            self.conv1 = APPNP(K=1, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=1, alpha=0.2, bias=True)
        if gnn_type == 18:
            self.conv1 = APPNP(K=3, alpha=0.2, bias=True)
            self.conv2 = APPNP(K=3, alpha=0.2, bias=True)
        if gnn_type == 19:
            self.conv1 = RGCNConv(in_channels=1,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
            self.conv2 = RGCNConv(in_channels=hidden_channels,
                                  out_channels=out_channels,
                                  num_relations=3,
                                  num_bases=2,
                                  bias=True)
# =============================================================================
#         if gnn_type==20:
#             self.conv1 = SignedConv(in_channels=1, out_channels=out_channels, first_aggr=True, bias=True)
#             self.conv2 = SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=True, bias=True)
#         if gnn_type==21:
#             self.conv1 =SignedConv(in_channels=1, out_channels=out_channels, first_aggr=False, bias=True)
#             self.conv2 =SignedConv(in_channels=hidden_channels, out_channels=out_channels, first_aggr=False, bias=True)
#         if gnn_type==22:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#         if gnn_type==23:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=5, kernel_size=3, bias=True)
#         if gnn_type==24:
#             self.conv1 = GMMConv(in_channels=1, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
#             self.conv2 = GMMConv(in_channels=hidden_channels, out_channels=out_channels, dim=2, kernel_size=3, bias=True)
# =============================================================================
        if gnn_type == 25:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=2,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 26:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=False,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 27:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=1,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 28:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=3,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
        if gnn_type == 29:
            self.conv1 = SplineConv(in_channels=1,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
            self.conv2 = SplineConv(in_channels=hidden_channels,
                                    out_channels=out_channels,
                                    dim=3,
                                    kernel_size=6,
                                    is_open_spline=True,
                                    degree=3,
                                    norm=True,
                                    root_weight=True,
                                    bias=True)
def test_to_hetero_and_rgcn_equal_output():
    torch.manual_seed(1234)

    # Run `RGCN`:
    x = torch.randn(10, 16)  # 6 paper nodes, 4 author nodes
    adj = (torch.rand(10, 10) > 0.5)
    adj[6:, 6:] = False
    edge_index = adj.nonzero(as_tuple=False).t().contiguous()
    row, col = edge_index

    # # 0 = paper<->paper, 1 = paper->author, 2 = author->paper
    edge_type = torch.full((edge_index.size(1), ), -1, dtype=torch.long)
    edge_type[(row < 6) & (col < 6)] = 0
    edge_type[(row < 6) & (col >= 6)] = 1
    edge_type[(row >= 6) & (col < 6)] = 2
    assert edge_type.min() == 0

    conv = RGCNConv(16, 32, num_relations=3)
    out1 = conv(x, edge_index, edge_type)

    # Run `to_hetero`:
    node_types = ['paper', 'author']
    edge_types = [('paper', '_', 'paper'), ('paper', '_', 'author'),
                  ('author', '_', 'paper')]

    x_dict = {
        'paper': x[:6],
        'author': x[6:],
    }
    edge_index_dict = {
        ('paper', '_', 'paper'):
        edge_index[:, edge_type == 0],
        ('paper', '_', 'author'):
        edge_index[:, edge_type == 1] - torch.tensor([[0], [6]]),
        ('author', '_', 'paper'):
        edge_index[:, edge_type == 2] - torch.tensor([[6], [0]]),
    }

    adj_t_dict = {
        key: SparseTensor.from_edge_index(edge_index).t()
        for key, edge_index in edge_index_dict.items()
    }

    metadata = (list(x_dict.keys()), list(edge_index_dict.keys()))
    model = to_hetero(RGCN(16, 32), metadata)

    # Set model weights:
    for i, edge_type in enumerate(edge_types):
        weight = model.conv['__'.join(edge_type)].lin.weight
        weight.data = conv.weight[i].data.t()
    for i, node_type in enumerate(node_types):
        model.lin[node_type].weight.data = conv.root.data.t()
        model.lin[node_type].bias.data = conv.bias.data

    out2 = model(x_dict, edge_index_dict)
    out2 = torch.cat([out2['paper'], out2['author']], dim=0)
    assert torch.allclose(out1, out2, atol=1e-6)

    out3 = model(x_dict, adj_t_dict)
    out3 = torch.cat([out3['paper'], out3['author']], dim=0)
    assert torch.allclose(out1, out3, atol=1e-6)
示例#18
0
    def __init__(self,
                 num_features,
                 n_classes,
                 num_heads,
                 num_rels,
                 num_bases,
                 num_hidden,
                 num_hidden_layer_pairs,
                 dropout,
                 activation,
                 neg_slope,
                 bias=True):
        super(PRGAT3, self).__init__()
        self.neg_slope = neg_slope
        self.num_hidden_layer_pairs = num_hidden_layer_pairs
        # dropout
        if dropout:
            self.dropout = nn.Dropout(p=dropout)
        else:
            self.dropout = nn.Dropout(p=0.)
        # activation
        self.activation = activation

        if num_bases < 0:
            num_bases = num_rels

        self.layers = nn.ModuleList()

        layer_size = lambda i: int(
            (1. - i / num_hidden_layer_pairs) * num_hidden + 0.5)

        for i in range(num_hidden_layer_pairs):
            # RGCN
            if i == 0:
                self.layers.append(
                    RGCNConv(num_features,
                             layer_size(i),
                             num_rels,
                             num_bases,
                             bias=bias))
            else:
                self.layers.append(
                    RGCNConv(layer_size(i) * num_heads,
                             layer_size(i),
                             num_rels,
                             num_bases,
                             bias=bias))
            # GAT
            if i == num_hidden_layer_pairs - 1:
                self.layers.append(
                    GATConv(layer_size(i),
                            n_classes,
                            heads=num_heads,
                            concat=False,
                            negative_slope=self.neg_slope,
                            dropout=dropout,
                            bias=bias))
            else:
                self.layers.append(
                    GATConv(layer_size(i),
                            layer_size(i),
                            heads=num_heads,
                            concat=True,
                            negative_slope=self.neg_slope,
                            dropout=0,
                            bias=bias))
示例#19
0
 def __init__(self, in_channels, out_channels, num_relations):
     super(RGCN, self).__init__()
     self.conv1 = RGCNConv(in_channels, 16, num_relations, num_bases=30)
     self.conv2 = RGCNConv(16, out_channels, num_relations, num_bases=30)
示例#20
0
    def __init__(self,
                 num_features,
                 n_classes,
                 num_heads,
                 num_rels,
                 num_bases,
                 num_hidden,
                 num_hidden_layers_rgcn,
                 num_hidden_layers_gat,
                 dropout,
                 activation,
                 neg_slope,
                 bias=True):
        super(PRGAT, self).__init__()

        self.concat = True
        self.neg_slope = neg_slope
        self.num_hidden_layers_rgcn = num_hidden_layers_rgcn
        self.num_hidden_layers_gat = num_hidden_layers_gat

        # dropout
        if dropout:
            self.dropout = nn.Dropout(p=dropout)
        else:
            self.dropout = nn.Dropout(p=0.)
        # activation
        self.activation = activation
        # RGCN input layer
        self.rgcn_input = RGCNConv(
            num_features, num_hidden[0], num_rels, num_bases,
            bias=bias)  #aggr values ['add', 'mean', 'max'] default : add
        # RGCN Hidden layers
        self.layers = nn.ModuleList()
        for l in range(1, num_hidden_layers_rgcn - 1):
            self.layers.append(
                RGCNConv(num_hidden[l - 1],
                         num_hidden[l],
                         num_rels,
                         num_bases,
                         bias=bias))

        # GAT input layer
        self.layers.append(
            GATConv(num_hidden[l],
                    num_hidden[l + 1],
                    heads=num_heads[l + 1],
                    concat=self.concat,
                    negative_slope=self.neg_slope,
                    dropout=dropout,
                    bias=bias))

        # GAT Hidden layers
        for ll in range(l + 2,
                        num_hidden_layers_rgcn + num_hidden_layers_gat - 2):
            if self.concat:
                self.layers.append(
                    GATConv(num_hidden[ll - 1] * num_heads[ll - 1],
                            num_hidden[ll],
                            heads=num_heads[ll],
                            concat=self.concat,
                            negative_slope=self.neg_slope,
                            dropout=dropout,
                            bias=bias))
            else:
                self.layers.append(
                    GATConv(num_hidden[ll - 1],
                            num_hidden[ll],
                            heads=num_heads[ll],
                            concat=self.concat,
                            negative_slope=self.neg_slope,
                            dropout=dropout,
                            bias=bias))
        # GAT output layer
        if self.concat:
            self.gat_output = GATConv(num_hidden[ll] * num_heads[ll],
                                      n_classes,
                                      heads=num_heads[ll + 1],
                                      concat=self.concat,
                                      negative_slope=self.neg_slope,
                                      dropout=dropout,
                                      bias=bias)
        else:
            self.gat_output = GATConv(num_hidden[ll],
                                      n_classes,
                                      heads=num_heads[ll + 1],
                                      concat=self.concat,
                                      negative_slope=self.neg_slope,
                                      dropout=dropout,
                                      bias=bias)
示例#21
0
    def __init__(self,
                 config: BertConfig,
                 cls_token: int,
                 relation_types: int,
                 entity_types: int,
                 size_embedding: int,
                 prop_drop: float,
                 freeze_transformer: bool,
                 device: torch.device,
                 syn_graph=True,
                 sema_graph=True,
                 fusion_rgcn=True,
                 tw_grad_flow_token=True,
                 tw_grad_flow_subword=True,
                 tw_rel_atten_token=True,
                 tw_ent_atten_token=True,
                 tw_rel_atten_subword=False,
                 tw_ent_atten_subword=False,
                 trigger_attn=True,
                 trigger_grad_flow=False,
                 full_graph_retain_rate=0.8,
                 dt_graph_retain_rate=0.8,
                 max_pairs: int = 100,
                 split_epoch=18):
        super(TriMF, self).__init__(config)

        if fusion_rgcn:
            assert syn_graph and sema_graph

        # if tw_atten_token:
        #     assert tw_rel_atten_token and tw_ent_atten_token

        # if tw_atten_subword:
        #     assert tw_rel_atten_subword and tw_ent_atten_subword

        self._cls_token = cls_token
        self._relation_types = relation_types
        self._entity_types = entity_types
        self._max_pairs = max_pairs
        self._device = device

        self._syn_graph = syn_graph
        self._sema_graph = sema_graph
        self._fusion_rgcn = fusion_rgcn
        self._tw_grad_flow_token = tw_grad_flow_token
        self._tw_grad_flow_subword = tw_grad_flow_subword
        # self._tw_atten_token=tw_atten_token
        self._tw_rel_atten_token = tw_rel_atten_token
        self._tw_ent_atten_token = tw_ent_atten_token
        # self._tw_atten_subword=tw_atten_subword
        self._tw_rel_atten_subword = tw_rel_atten_subword
        self._tw_ent_atten_subword = tw_ent_atten_subword
        self._trigger_attn = trigger_attn
        self._trigger_grad_flow = trigger_grad_flow

        self._full_graph_retain_rate = full_graph_retain_rate
        self._dt_graph_retain_rate = dt_graph_retain_rate

        self.bert = BertModel(config)

        # layers
        self.rel_linear = nn.Linear(
            config.hidden_size * 3 + size_embedding * 2, config.hidden_size,
            False)
        self.rel_classifier = nn.Linear(config.hidden_size, relation_types,
                                        False)
        self.entity_linear = nn.Linear(config.hidden_size * 2 + size_embedding,
                                       config.hidden_size, False)
        self.entity_classifier = nn.Linear(config.hidden_size, entity_types,
                                           False)
        self.size_embeddings = nn.Embedding(100, size_embedding)
        self.dropout = nn.Dropout(prop_drop)

        if self._tw_rel_atten_token:
            self.token_rel_attn_W = nn.Linear(config.hidden_size,
                                              config.hidden_size, False)
        if self._tw_ent_atten_token:
            self.token_ent_attn_W = nn.Linear(config.hidden_size,
                                              config.hidden_size, False)

        if self._tw_rel_atten_subword:
            self.subword_rel_attn_W = nn.Linear(config.hidden_size,
                                                config.hidden_size, False)
        if self._tw_ent_atten_subword:
            self.subword_ent_attn_W = nn.Linear(config.hidden_size,
                                                config.hidden_size, False)

        # if self._trigger_attn:
        # self.head_W=nn.Linear(config.hidden_size,config.hidden_size,False)
        # self.tail_W=nn.Linear(config.hidden_size,config.hidden_size,False)
        # self.rel_W=nn.Linear(config.hidden_size,config.hidden_size,False)

        if self._fusion_rgcn:
            self.node_W = nn.Linear(config.hidden_size, config.hidden_size,
                                    False)

        self.relu = nn.ReLU()

        self._split_epoch = split_epoch
        self.save_config = [
            tw_rel_atten_token, tw_ent_atten_token, tw_rel_atten_subword,
            tw_ent_atten_subword, trigger_attn
        ]

        if self._syn_graph:
            self.conv1 = RGCNConv(in_channels=config.hidden_size,
                                  out_channels=config.hidden_size,
                                  num_relations=46,
                                  num_bases=None,
                                  root_weight=True,
                                  bias=True)
            # self.conv2 = RGCNConv(in_channels = config.hidden_size, out_channels = config.hidden_size, num_relations = 46, num_bases= None, root_weight= True, bias = True)
        if self._sema_graph:
            self.gat = GATConv(in_channels=config.hidden_size,
                               out_channels=config.hidden_size,
                               bias=True)
            # self.conv3 = RGCNConv(in_channels = config.hidden_size, out_channels = config.hidden_size, num_relations = 1, num_bases= None, root_weight= True, bias = True)

        self.init_weights()

        if freeze_transformer:
            print("Freeze transformer weights")

            # freeze all transformer weights
            for param in self.bert.parameters():
                param.requires_grad = False