Ejemplo n.º 1
0
    def __init__(self, config):
        super(Encoder, self).__init__()
        #        self.initializer = Initializer(config)
        #        layer = EncoderLayer(config)
        #        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
        #        self.layer = nn.ModuleList([layer])
        #        self.conv = FastRGCNConv(config.hidden_size,config.hidden_size)
        #        self.conv3 = FastRGCNConv(config.hidden_size,config.hidden_size,25,num_bases=128)

        self.ctoq = MultiHeadedAttention(8, config.hidden_size)
        self.qtoc = MultiHeadedAttention(8, config.hidden_size)
        self.rnn = torch.nn.LSTM(config.hidden_size,
                                 config.hidden_size // 2,
                                 dropout=0.4,
                                 bidirectional=True,
                                 num_layers=2,
                                 batch_first=True)
        self.gelu = torch.nn.functional.gelu

        # self.conv3 = RGCNConv(config.hidden_size, config.hidden_size, 35, num_bases=30)
        self.conv2 = torch.nn.ModuleList()
        for i in range(4):
            self.conv2.append(DNAConv(config.hidden_size, 8, 1, 0.4))

        # self.conv = GraphConv(config.hidden_size, config.hidden_size,'max')

        self.lineSub = torch.nn.Linear(config.hidden_size * 2,
                                       config.hidden_size)
        self.hidden_size = config.hidden_size
        self.config = config
        self.dropout = nn.Dropout(0.2)
        self.TopNet = nn.ModuleList(
            [getMaxScore(self.hidden_size) for _ in range(2)])
        self.dnaAct = torch.tanh
Ejemplo n.º 2
0
 def __init__(self, in_channels, hidden_channels, out_channels, num_layers, heads=1, groups=1):
     super(DNANet, self).__init__()
     self.hidden_channels = hidden_channels
     self.lin1 = torch.nn.Linear(in_channels, hidden_channels)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(DNAConv(hidden_channels, heads, groups, dropout=0.8, cached=True))
     self.lin2 = torch.nn.Linear(hidden_channels, out_channels)
Ejemplo n.º 3
0
    def __init__(self, config):
        super(Encoder, self).__init__()
        #        self.initializer = Initializer(config)
        #        layer = EncoderLayer(config)
        #        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
        #        self.layer = nn.ModuleList([layer])
        #        self.conv = FastRGCNConv(config.hidden_size,config.hidden_size)
        self.conv3 = RGCNConv(config.hidden_size,
                              config.hidden_size,
                              25,
                              num_bases=128)
        self.conv2 = torch.nn.ModuleList()
        self.conv22 = torch.nn.ModuleList()

        for i in range(3):
            self.conv2.append(DNAConv(config.hidden_size, 32, 4, 0.1))
            self.conv22.append(DNAConv(config.hidden_size, 32, 4, 0.1))

        self.hidden_size = config.hidden_size
Ejemplo n.º 4
0
 def __init__(self, features_num, num_class, num_layers, hidden, heads,
              groups, dropout):
     super(DNA_Net, self).__init__()
     self.hidden = hidden
     self.dropout = dropout
     self.lin1 = Linear(features_num, hidden)
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(
             DNAConv(hidden, heads, groups, dropout=0.8, cached=True))
     self.lin2 = Linear(hidden, num_class)
Ejemplo n.º 5
0
    def __init__(self):
        super(Net, self).__init__()
        in_channels = 5
        hidden_channels = 60
        self.hidden_channels = hidden_channels
        num_layers = 6

        self.lin1 = torch.nn.Linear(in_channels,hidden_channels)
        self.DNAConvs = torch.nn.ModuleList()
        for i in range(num_layers):
            self.DNAConvs.append(DNAConv(hidden_channels,heads=3,groups=1,dropout=0))
        self.lin2 = torch.nn.Linear(hidden_channels,3)
Ejemplo n.º 6
0
def test_dna_conv():
    x = torch.randn((4, 3, 32))
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    row, col = edge_index
    value = torch.rand(row.size(0))
    adj2 = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))
    adj1 = adj2.set_value(None)

    conv = DNAConv(32, heads=4, groups=8, dropout=0.0)
    assert conv.__repr__() == 'DNAConv(32, heads=4, groups=8)'
    out1 = conv(x, edge_index)
    assert out1.size() == (4, 32)
    assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6)
    out2 = conv(x, edge_index, value)
    assert out2.size() == (4, 32)
    assert torch.allclose(conv(x, adj2.t()), out2, atol=1e-6)

    if is_full_test():
        t = '(Tensor, Tensor, OptTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x, edge_index).tolist() == out1.tolist()
        assert jit(x, edge_index, value).tolist() == out2.tolist()

        t = '(Tensor, SparseTensor, OptTensor) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x, adj1.t()), out1, atol=1e-6)
        assert torch.allclose(jit(x, adj2.t()), out2, atol=1e-6)

    conv.cached = True
    conv(x, edge_index)
    assert conv(x, edge_index).tolist() == out1.tolist()
    conv(x, adj1.t())
    assert torch.allclose(conv(x, adj1.t()), out1, atol=1e-6)
Ejemplo n.º 7
0
 def __init__(self, in_channels, hidden_channels, out_channels, num_layers, heads=1, groups=1,
              training_method='dfa'):
     super(DFADNANet, self).__init__()
     self.hidden_channels = hidden_channels
     self.lin1 = torch.nn.Linear(in_channels, hidden_channels)
     self.dfa1 = DFALayer()
     self.convs = torch.nn.ModuleList()
     self.dfa_convs = []
     for i in range(num_layers):
         self.convs.append(DNAConv(hidden_channels, heads, groups, dropout=0., cached=True))
         self.dfa_convs.append(DFALayer())
     self.lin2 = torch.nn.Linear(hidden_channels, out_channels)
     self.dfa = DFA([self.dfa1, *self.dfa_convs], no_training=training_method != 'dfa')
Ejemplo n.º 8
0
    def __init__(
        self,
        num_layers: int = 2,
        hidden_channels: int = 128,
        heads: int = 8,
        groups: int = 16,
        dropout: float = 0.8,
        cached: bool = False,
        num_features: int = None,
        num_classes: int = None,
    ):
        super().__init__()

        assert num_features is not None
        assert num_classes is not None

        # utils from Lightning to save __init__ arguments
        self.save_hyperparameters()
        hparams = self.hparams

        # Instantiate metrics
        self.val_acc = Accuracy(hparams["num_classes"])
        self.test_acc = Accuracy(hparams["num_classes"])

        # Define DNA graph convolution model
        self.hidden_channels = hparams["hidden_channels"]
        self.lin1 = nn.Linear(hparams["num_features"],
                              hparams["hidden_channels"])

        # Create ModuleList to hold all convolutions
        self.convs = nn.ModuleList()

        # Iterate through the number of layers
        for _ in range(hparams["num_layers"]):

            # Create a DNA Convolution - This graph convolution relies on MultiHead Attention mechanism
            # to route information similar to Transformers.
            # https://github.com/rusty1s/pytorch_geometric/blob/master/torch_geometric/nn/conv/dna_conv.py#L172
            self.convs.append(
                DNAConv(
                    hparams["hidden_channels"],
                    hparams["heads"],
                    hparams["groups"],
                    dropout=hparams["dropout"],
                    cached=False,
                ))
        # classification MLP
        self.lin2 = nn.Linear(hparams["hidden_channels"],
                              hparams["num_classes"],
                              bias=False)
Ejemplo n.º 9
0
def test_dna_conv():
    channels = 32
    num_layers = 3
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, num_layers, channels))

    conv = DNAConv(channels, heads=4, groups=8, dropout=0.5)
    assert conv.__repr__() == 'DNAConv(32, heads=4, groups=8)'
    assert conv(x, edge_index).size() == (num_nodes, channels)

    conv = DNAConv(channels, heads=1, groups=1, dropout=0.5)
    assert conv.__repr__() == 'DNAConv(32, heads=1, groups=1)'
    assert conv(x, edge_index).size() == (num_nodes, channels)

    conv = DNAConv(channels, heads=1, groups=1, dropout=0.5, cached=True)
    conv(x, edge_index).size() == (num_nodes, channels)
    conv(x, edge_index).size() == (num_nodes, channels)
Ejemplo n.º 10
0
    def __init__(self, config):
        super(Encoder, self).__init__()
#        self.initializer = Initializer(config)
        layer = EncoderLayer(config)
#        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
        self.layer = nn.ModuleList([layer])
#        self.conv = FastRGCNConv(config.hidden_size,config.hidden_size)
        self.conv3 = RGCNConv(config.hidden_size,config.hidden_size,25,num_bases=128)
        self.conv2 = torch.nn.ModuleList()
        for i in range(5):
            self.conv2.append(
                    DNAConv(config.hidden_size,32,2,0.1))
        self.hidden_size = config.hidden_size
#        self.conv2 = DNAConv(config.hidden_size,32,16,0.1)
        
#        self.conv2 = AGNNConv(config.hidden_size,config.hidden_size)
        self.norm = nn.LayerNorm([512,config.hidden_size],1e-05)
Ejemplo n.º 11
0
    def __init__(self, config):
        super(Encoder, self).__init__()
        self.att_heads = config.num_attention_heads
        
#        self.initializer = Initializer(config)
#        layer = EncoderLayer(config)
#        self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)])
#        self.layer = nn.ModuleList([layer])
#        self.conv = FastRGCNConv(config.hidden_size,config.hidden_size)
#        self.conv3 = FastRGCNConv(config.hidden_size,config.hidden_size,25,num_bases=128)
        
        # self.ctoq = MultiHeadedAttention(self.att_heads,config.hidden_size)
        self.qtoc = MultiHeadedAttention(self.att_heads,config.hidden_size)
        # self.rnn = torch.nn.LSTM(config.hidden_size,config.hidden_size // 2,dropout=0.4,
        #                          bidirectional=True, num_layers=2, batch_first=True)
        self.gelu = torch.nn.functional.gelu
        
        # self.conv3 = RGCNConv(config.hidden_size, config.hidden_size, 35, num_bases=30)
        self.conv2 = torch.nn.ModuleList()
        for i in range(2):
            self.conv2.append(
                    DNAConv(config.hidden_size,self.att_heads,1,0.4))
        self.conv3 = torch.nn.ModuleList()
        # for i in range(2):
        #     self.conv3.append(
        #         DNAConv(config.hidden_size,self.att_heads,1,0,0.4))
            
        # self.conv = GraphConv(config.hidden_size, config.hidden_size,'max')
            
        # self.lineSub = torch.nn.Linear(config.hidden_size*3,config.hidden_size)
        # self.lineSub = torch.nn.Linear(config.hidden_size*2,config.hidden_size)
        #self.lineSub = torch.nn.Linear(config.hidden_size*2,config.hidden_size)
        
        self.hidden_size = config.hidden_size
        self.config = config
        self.dropout = nn.Dropout(0.1)

        # self.dropout = nn.Dropout(0.3) seems to high
        
        self.TopNet = nn.ModuleList([getMaxScore2(self.hidden_size) for _ in range(1)])
        self.TopNet[0].ql = self.qtoc.linears[0]
        self.TopNet[0].kl = self.qtoc.linears[1]
        
        # self.BoudSelect = nn.ModlueList([getThresScore(self.hidden_size) for _ in range(3)])
        self.dnaAct = torch.relu
Ejemplo n.º 12
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        self.save_hyperparameters()

        self.hidden_channels = kwargs["hidden_channels"]
        self.lin1 = torch.nn.Linear(kwargs["num_features"], kwargs["hidden_channels"])
        self.convs = torch.nn.ModuleList()
        for _ in range(kwargs["num_layers"]):
            self.convs.append(
                DNAConv(
                    kwargs["hidden_channels"],
                    kwargs["heads"],
                    kwargs["groups"],
                    dropout=kwargs["dropout"],
                    cached=False,
                )
            )
        self.lin2 = torch.nn.Linear(kwargs["hidden_channels"], kwargs["num_classes"])
Ejemplo n.º 13
0
    def __init__(self):
        super(Net, self).__init__()
        in_channels = 5
        hidden_channels = 30
        self.hidden_channels = hidden_channels
        num_layers = 2

        nn1 = torch.nn.Sequential(
            torch.nn.Linear(in_channels, hidden_channels), torch.nn.ReLU())
        nn2 = torch.nn.Sequential(
            torch.nn.Linear(hidden_channels, hidden_channels), torch.nn.ReLU())
        self.nnconv1 = GINConv(nn1)
        self.sconv1 = SGConv(hidden_channels, hidden_channels, K=5)
        self.sconv2 = SGConv(hidden_channels, hidden_channels, K=5)
        self.nnconv2 = GINConv(nn2)

        self.lin1 = torch.nn.Linear(in_channels, hidden_channels)
        self.DNAConvs = torch.nn.ModuleList()
        for i in range(num_layers):
            self.DNAConvs.append(
                DNAConv(hidden_channels, heads=3, groups=1, dropout=0))
        self.lin2 = torch.nn.Linear(hidden_channels, 2)
Ejemplo n.º 14
0
def test_dna_conv():
    channels = 32
    num_layers = 3
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, num_layers, channels))

    conv = DNAConv(channels, heads=4, groups=8, dropout=0.0)
    assert conv.__repr__() == 'DNAConv(32, heads=4, groups=8)'
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, channels)

    if is_full_test():
        jit = torch.jit.script(conv.jittable())
        assert jit(x, edge_index).tolist() == out.tolist()

    conv = DNAConv(channels, heads=1, groups=1, dropout=0.0)
    assert conv.__repr__() == 'DNAConv(32, heads=1, groups=1)'
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, channels)

    if is_full_test():
        jit = torch.jit.script(conv.jittable())
        assert jit(x, edge_index).tolist() == out.tolist()

    conv = DNAConv(channels, heads=1, groups=1, dropout=0.0, cached=True)
    out = conv(x, edge_index)
    out = conv(x, edge_index)
    assert out.size() == (num_nodes, channels)

    if is_full_test():
        jit = torch.jit.script(conv.jittable())
        assert jit(x, edge_index).tolist() == out.tolist()
Ejemplo n.º 15
0
 def __init__(self, channels, heads, groups, dropout):
     super(NormalizedRegularizedDNALayer, self).__init__()
     self.dna = DNAConv(channels, heads, groups, dropout)
     self.bn = nn.BatchNorm1d(channels)