Esempio n. 1
0
    def __init__(self, in_features, hidd_dim, kge_dim, rel_total,
                 heads_out_feat_params, blocks_params):
        super().__init__()
        self.in_features = in_features
        self.hidd_dim = hidd_dim
        self.rel_total = rel_total
        self.kge_dim = kge_dim
        self.n_blocks = len(blocks_params)

        self.initial_norm = LayerNorm(self.in_features)
        self.blocks = []
        self.net_norms = ModuleList()
        for i, (head_out_feats,
                n_heads) in enumerate(zip(heads_out_feat_params,
                                          blocks_params)):
            block = SSI_DDI_Block(n_heads,
                                  in_features,
                                  head_out_feats,
                                  final_out_feats=self.hidd_dim)
            self.add_module(f"block{i}", block)
            self.blocks.append(block)
            self.net_norms.append(LayerNorm(head_out_feats * n_heads))
            in_features = head_out_feats * n_heads

        self.co_attention = CoAttentionLayer(self.kge_dim)
        self.KGE = RESCAL(self.rel_total, self.kge_dim)
Esempio n. 2
0
def test_layer_norm(affine):
    x = torch.randn(100, 16)
    batch = torch.zeros(100, dtype=torch.long)

    norm = LayerNorm(16, affine=affine)
    assert norm.__repr__() == 'LayerNorm(16)'
    torch.jit.script(norm)
    out1 = norm(x)
    assert out1.size() == (100, 16)
    assert torch.allclose(norm(x, batch), out1)

    out2 = norm(torch.cat([x, x], dim=0), torch.cat([batch, batch + 1], dim=0))
    assert torch.allclose(out1, out2[:100])
    assert torch.allclose(out1, out2[100:])
Esempio n. 3
0
def test_layer_norm(affine, mode):
    x = torch.randn(100, 16)
    batch = torch.zeros(100, dtype=torch.long)

    norm = LayerNorm(16, affine=affine, mode=mode)
    assert norm.__repr__() == f'LayerNorm(16, mode={mode})'

    if is_full_test():
        torch.jit.script(norm)

    out1 = norm(x)
    assert out1.size() == (100, 16)
    assert torch.allclose(norm(x, batch), out1, atol=1e-6)

    out2 = norm(torch.cat([x, x], dim=0), torch.cat([batch, batch + 1], dim=0))
    assert torch.allclose(out1, out2[:100], atol=1e-6)
    assert torch.allclose(out1, out2[100:], atol=1e-6)
 def __init__(self, n_input, n_hidden=64, K=8, p=0.5, bn=False):
     super(Kipfblock, self).__init__()
     self.conv1 = ChebConv(n_input, n_hidden, K=K)
     self.p = p
     self.n_input = n_input
     self.n_hidden = n_hidden
     self.do_bn = bn
     if bn:
         self.bn = LayerNorm(n_hidden)
 def __init__(self, n_input, n_hidden, K=3, bn=False):
     super(Sgblock, self).__init__()
     self.conv1 = SGConv(n_input, n_hidden, K, cached=False)
     #self.p = p
     self.n_input = n_input
     self.n_hidden = n_hidden
     self.do_bn = bn
     if bn:
         self.bn = LayerNorm(n_hidden)
 def __init__(self, n_input, n_hidden, bn=False):
     super(Sageblock, self).__init__()
     self.conv1 = SAGEConv(n_input, n_hidden)
     #self.p = p
     self.n_input = n_input
     self.n_hidden = n_hidden
     self.do_bn = bn
     if bn:
         self.bn = LayerNorm(n_hidden)
Esempio n. 7
0
 def __init__(self, norm_type, in_channels):
     super(NormLayer, self).__init__()
     if norm_type == 'bn':
         self.norm = BatchNorm(in_channels)
     elif norm_type == 'ln':
         self.norm = LayerNorm(in_channels)
     elif norm_type == 'in':
         self.norm = InstanceNorm(in_channels)
     else:
         self.norm = NoNorm(in_channels)
    def __init__(self, node_in_dim, node_h_dim, 
                 edge_in_dim, edge_h_dim,
                 seq_in=False, num_layers=3, drop_rate=0.1):
        
        super(SyntheticGNN, self).__init__()

        self.node_in_dim = node_in_dim
        self.edge_in_dim = edge_in_dim
        
        if seq_in:
            self.W_s = nn.Embedding(20, 20)
            node_in_dim = (node_in_dim[0] + 20, node_in_dim[1])
        
        self.W_v = nn.Sequential(
            LayerNorm(node_in_dim),
            nn.Linear(node_in_dim, node_h_dim)
        )
        self.W_e = nn.Sequential(
            LayerNorm(edge_in_dim),
            nn.Linear(edge_in_dim, edge_h_dim)
        )
        
        self.layers = nn.ModuleList(
                GNNConvLayer(node_h_dim, edge_h_dim, drop_rate=drop_rate) 
            for _ in range(num_layers))
        
        ns= node_h_dim
        self.W_out = nn.Sequential(
            LayerNorm(node_h_dim),
            nn.Linear(node_h_dim, ns),
            nn.Sigmoid()
        )
            
        self.dense = nn.Sequential(
            nn.Linear(ns, 2*ns), nn.ReLU(inplace=True),
            nn.Dropout(p=drop_rate),
            nn.Linear(2*ns, 1)
        )
    def __init__(self, node_dims, edge_dims,
                 n_message=3, n_feedforward=2, drop_rate=.1,
                 autoregressive=False):
        
        super(GNNConvLayer, self).__init__()
        self.conv = GNNConv(node_dims, node_dims, edge_dims, n_message,
                           aggr="add" if autoregressive else "mean")
        self.norm = nn.ModuleList([LayerNorm(node_dims) for _ in range(2)])
        self.dropout = nn.ModuleList([nn.Dropout(drop_rate) for _ in range(2)])

        ff_func = []
        if n_feedforward == 1:
            ff_func.append(nn.Linear(node_dims, node_dims))
        else:
            hid_dims = 4*node_dims
            ff_func.append(nn.Linear(node_dims, hid_dims))
            ff_func.append(nn.Sigmoid())
            for i in range(n_feedforward-2):
                ff_func.append(nn.Linear(hid_dims, hid_dims))
                ff_func.append(nn.Sigmoid())
            ff_func.append(nn.Linear(hid_dims, node_dims))
        self.ff_func = nn.Sequential(*ff_func)
Esempio n. 10
0
import pytest
from itertools import product

import torch
import torch.nn.functional as F
from torch.nn import ReLU, BatchNorm1d
from torch_geometric.nn import LayerNorm
from torch_geometric.nn.models import GCN, GraphSAGE, GIN, GAT, PNA

out_dims = [None, 8]
dropouts = [0.0, 0.5]
acts = [None, torch.relu_, F.elu, ReLU()]
norms = [None, BatchNorm1d(16), LayerNorm(16)]
jks = ['last', 'cat', 'max', 'lstm']


@pytest.mark.parametrize('out_dim,dropout,act,norm,jk',
                         product(out_dims, dropouts, acts, norms, jks))
def test_gcn(out_dim, dropout, act, norm, jk):
    x = torch.randn(3, 8)
    edge_index = torch.tensor([[0, 1, 1, 2], [1, 0, 2, 1]])
    out_channels = 16 if out_dim is None else out_dim

    model = GCN(8, 16, num_layers=2, out_channels=out_dim, dropout=dropout,
                act=act, norm=norm, jk=jk)
    assert str(model) == f'GCN(8, {out_channels}, num_layers=2)'
    assert model(x, edge_index).size() == (3, out_channels)


@pytest.mark.parametrize('out_dim,dropout,act,norm,jk',
                         product(out_dims, dropouts, acts, norms, jks))