def test_dynamic_edge_conv_conv():
    x1 = torch.randn(8, 16)
    x2 = torch.randn(4, 16)
    batch1 = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])
    batch2 = torch.tensor([0, 0, 1, 1])

    nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32))
    conv = DynamicEdgeConv(nn, k=2)
    assert conv.__repr__() == (
        'DynamicEdgeConv(nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=16, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=16, out_features=32, bias=True)\n'
        '), k=2)')
    out11 = conv(x1)
    assert out11.size() == (8, 32)

    out12 = conv(x1, batch1)
    assert out12.size() == (8, 32)

    out21 = conv((x1, x2))
    assert out21.size() == (4, 32)

    out22 = conv((x1, x2), (batch1, batch2))
    assert out22.size() == (4, 32)

    t = '(Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1).tolist() == out11.tolist()
    assert jit(x1, batch1).tolist() == out12.tolist()

    t = '(PairTensor, Optional[PairTensor]) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2)).tolist() == out21.tolist()
    assert jit((x1, x2), (batch1, batch2)).tolist() == out22.tolist()
Esempio n. 2
0
 def __init__(self, in_feats, hidden_dim, out_feats, k=20, dropout=0.5):
     super(DGCNN, self).__init__()
     mlp1 = nn.Sequential(
         GINMLP(2 * in_feats, hidden_dim, hidden_dim, num_layers=3),
         nn.ReLU(), nn.BatchNorm1d(hidden_dim))
     mlp2 = nn.Sequential(
         GINMLP(2 * hidden_dim,
                2 * hidden_dim,
                2 * hidden_dim,
                num_layers=1),
         nn.ReLU(),
         nn.BatchNorm1d(2 * hidden_dim),
     )
     self.conv1 = DynamicEdgeConv(mlp1, k, "max")
     self.conv2 = DynamicEdgeConv(mlp2, k, "max")
     self.linear = nn.Linear(hidden_dim + 2 * hidden_dim, 1024)
     self.final_mlp = nn.Sequential(
         nn.Linear(1024, 512),
         nn.BatchNorm1d(512),
         nn.Dropout(dropout),
         nn.Linear(512, 256),
         nn.BatchNorm1d(256),
         nn.Dropout(dropout),
         nn.Linear(256, out_feats),
     )
Esempio n. 3
0
    def __init__(self,
                 D,
                 C,
                 G=0,
                 k=4,
                 task='graph',
                 aggr='max',
                 conclayers=True):
        super(DECNet, self).__init__()

        self.D = D
        self.C = C
        self.G = G

        self.task = task
        self.conclayers = conclayers

        # Convolution layers
        self.conv1 = DynamicEdgeConv(MLP([2 * self.D, 32, 32]), k=k, aggr=aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 32, 64]), k=k, aggr=aggr)

        # "Fusion" layer taking in conv1 and conv2 outputs
        if self.conclayers:
            self.lin1 = MLP([32 + 64, 96])
        else:
            self.lin1 = MLP([64, 96])

        if (self.G > 0):
            self.Z = 96 + self.G
        else:
            self.Z = 96

        # Final layers concatenating everything
        self.mlp1 = MLP([self.Z, self.Z, self.C])
Esempio n. 4
0
    def __init__(self, k=30, aggr='max'):
        super().__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * 6, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
        self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
        self.lin1 = MLP([3 * 64, 1024])
    def __init__(self, out_channels, k=20, aggr='max'):
        super().__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
        self.lin1 = Linear(128 + 64, 1024)

        self.mlp = MLP([1024, 512, 256, out_channels], dropout=0.5, norm=None)
    def __init__(self, feature_num=1024, k=20, aggr='max'):
        super().__init__()
        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64]), k, aggr)
        self.conv3 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
        self.conv4 = DynamicEdgeConv(MLP([2 * 128, 256]), k, aggr)

        self.lin1 = MLP([64 + 64 + 128 + 256, feature_num])
    def __init__(self, out_channels, k=20, aggr='max'):
        super().__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
        self.lin1 = MLP([128 + 64, 1024])

        self.mlp = Seq(MLP([1024, 512]), Dropout(0.5), MLP([512, 256]),
                       Dropout(0.5), Lin(256, out_channels))
Esempio n. 8
0
    def __init__(self, out_channels, k=30, aggr='max'):
        super().__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * 6, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
        self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)

        self.mlp = MLP([3 * 64, 1024, 256, 128, out_channels], dropout=0.5,
                       batch_norm=False)
Esempio n. 9
0
    def __init__(self, out_channels, k=30, aggr='max'):
        super(Net, self).__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
        self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64]), k, aggr)
        self.lin1 = MLP([3 * 64, 1024])

        self.mlp = Seq(MLP([1024, 256]), Dropout(0.5), MLP([256, 128]),
                       Dropout(0.5), Lin(128, out_channels))
Esempio n. 10
0
def test_dynamic_edge_conv_conv():
    x = torch.randn((4, 16))

    nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32))
    conv = DynamicEdgeConv(nn, k=6)
    assert conv.__repr__() == (
        'DynamicEdgeConv(nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=16, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=16, out_features=32, bias=True)\n'
        '), k=6)')
    assert conv(x).size() == (4, 32)
Esempio n. 11
0
    def __init__(self, input_size, embedding_size, n_classes, aggr='max', k=5, pool_op='max', same_size=False):
        super(DEC, self).__init__()
        self.k = k
        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64, 64]), self.k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), self.k, aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        self.mlp = Seq(
            MLP([1024, 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5),
            Lin(256, n_classes))
def test_dynamic_edge_conv_conv():
    in_channels, out_channels = (16, 32)
    num_nodes = 20
    x = torch.randn((num_nodes, in_channels))

    nn = Seq(Lin(2 * in_channels, 32), ReLU(), Lin(32, out_channels))
    conv = DynamicEdgeConv(nn, k=6)
    assert conv.__repr__() == (
        'DynamicEdgeConv(nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '), k=6)')
    assert conv(x).size() == (num_nodes, out_channels)
Esempio n. 13
0
    def __init__(self, num_classes):
        super().__init__()

        nn = Seq(Lin(6, 64), ReLU(), Lin(64, 64), ReLU(), Lin(64, 64), ReLU())
        self.conv1 = DynamicEdgeConv(nn, k=20, aggr='max')

        nn = Seq(Lin(128, 128), ReLU(), Lin(128, 128), ReLU(), Lin(128, 256),
                 ReLU())
        self.conv2 = DynamicEdgeConv(nn, k=20, aggr='max')

        self.lin0 = Lin(256, 512)

        self.lin1 = Lin(512, 256)
        self.lin2 = Lin(256, 256)
        self.lin3 = Lin(256, num_classes)
    def __init__(self, input_size, embedding_size, n_classes, fov=1, aggr='max', k=5, pool_op='max', same_size=False):
        super(DECSeq6, self).__init__()
        self.fov = fov
        self.bn0 = nn.BatchNorm1d(32)
        self.conv0 = nn.Sequential(
            nn.Conv1d(input_size, 32, kernel_size=fov),nn.ReLU())
        #self.bn0, nn.ReLU())
        self.conv1 = DynamicEdgeConv(MLP([2 * 32, 64, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        self.mlp = Seq(
            MLP([1024, 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5),
            Lin(256, n_classes))
Esempio n. 15
0
    def __init__(self,
                 input_size,
                 embedding_size,
                 n_classes,
                 dropout=True,
                 k=5,
                 aggr='max',
                 pool_op='max'):
        super(DECSeq, self).__init__()
        # self.bn0 = BN(input_size)
        # self.bn1 = BN(64)
        # self.bn2 = BN(128)
        self.conv1 = EdgeConv(MLP([2 * 3, 64, 64, 64], batch_norm=True), aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128], batch_norm=True), k,
                                     aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        if dropout:
            self.mlp = Seq(MLP([1024, 512], batch_norm=True), Dropout(0.5),
                           MLP([512, 256], batch_norm=True), Dropout(0.5),
                           Lin(256, n_classes))
        else:
            self.mlp = Seq(MLP([1024, 512]), MLP([512, 256]),
                           Lin(256, n_classes))
Esempio n. 16
0
    def __init__(self,
                 n_features,
                 n_labels,
                 classification=False,
                 width=128,
                 conv_depth=3,
                 point_depth=3,
                 lin_depth=5,
                 aggr='max'):
        super(CEnsembleNet, self).__init__()
        self.classification = classification
        self.n_features = n_features
        self.n_labels = n_labels
        self.lin_depth = lin_depth
        self.conv_depth = conv_depth
        self.width = width
        self.point_depth = point_depth
        self.aggr = aggr
        n_intermediate = self.width

        self.conv1 = ChebConv(self.n_features, n_intermediate, 2)
        self.convfkt = torch.nn.ModuleList([
            ChebConv(n_intermediate, n_intermediate, 2)
            for i in range(self.conv_depth - 1)
        ])

        self.point1 = DynamicEdgeConv(
            LNN([2 * n_features, n_intermediate, n_intermediate]), 2,
            self.aggr)
        self.pointfkt = torch.nn.ModuleList([
            DynamicEdgeConv(LNN([2 * n_intermediate, n_intermediate]), 2,
                            self.aggr) for i in range(self.point_depth - 1)
        ])

        n_intermediate2 = 2 * self.conv_depth * n_intermediate + 2 * self.point_depth * n_intermediate
        self.dim2 = n_intermediate2
        self.batchnorm1 = BatchNorm1d(n_intermediate2)
        self.linearfkt = torch.nn.ModuleList([
            torch.nn.Linear(n_intermediate2, n_intermediate2)
            for i in range(self.lin_depth)
        ])
        self.drop = torch.nn.ModuleList(
            [torch.nn.Dropout(.3) for i in range(self.lin_depth)])
        self.out = torch.nn.Linear(n_intermediate2, self.n_labels)
        self.out2 = torch.nn.Linear(self.n_labels, self.n_labels)
    def __init__(self, num_classes):
        super(Net, self).__init__()

        nn = Seq(Lin(6, 64), LeakyReLU(negative_slope=0.2), Lin(64, 64),
                 LeakyReLU(negative_slope=0.2), Lin(64, 64),
                 LeakyReLU(negative_slope=0.2))
        self.conv1 = DynamicEdgeConv(nn, k=20, aggr='max')

        nn = Seq(Lin(128, 128), LeakyReLU(negative_slope=0.2), Lin(128, 128),
                 LeakyReLU(negative_slope=0.2), Lin(128, 256),
                 LeakyReLU(negative_slope=0.2))
        self.conv2 = DynamicEdgeConv(nn, k=20, aggr='max')

        self.lin0 = Lin(256, 512)

        self.lin1 = Lin(512, 256)
        self.lin2 = Lin(256, 256)
        self.lin3 = Lin(256, num_classes)
Esempio n. 18
0
    def __init__(self, input_dim=4, big_dim=32, hidden_dim=2, aggr='mean'):
        super(EdgeNetDynamic, self).__init__()
        encoder_nn = nn.Sequential(
            nn.Linear(2 * (input_dim), big_dim),
            nn.ReLU(),
            nn.Linear(big_dim, big_dim),
            nn.ReLU(),
            nn.Linear(big_dim, hidden_dim),
            nn.ReLU(),
        )

        decoder_nn = nn.Sequential(nn.Linear(2 * (hidden_dim), big_dim),
                                   nn.ReLU(), nn.Linear(big_dim, big_dim),
                                   nn.ReLU(), nn.Linear(big_dim, input_dim))

        self.batchnorm = nn.BatchNorm1d(input_dim)

        self.encoder = DynamicEdgeConv(nn=encoder_nn, aggr=aggr, k=3)
        self.decoder = DynamicEdgeConv(nn=decoder_nn, aggr=aggr, k=3)
Esempio n. 19
0
 def __init__(self, input_dim=3, big_dim=32, bigger_dim=256, global_dim=2, output_dim=1, k=16, aggr='mean'):
     super(DeeperDynamicEdgeNet, self).__init__()
     convnn = nn.Sequential(nn.Linear(2*(input_dim), big_dim),
                            nn.BatchNorm1d(big_dim),
                            nn.ReLU(),
                            nn.Linear(big_dim, big_dim),
                            nn.BatchNorm1d(big_dim),
                            nn.ReLU(),
                            nn.Linear(big_dim, big_dim),
                            nn.BatchNorm1d(big_dim),
                            nn.ReLU(),
     )
     convnn2 = nn.Sequential(nn.Linear(2*(big_dim+input_dim), big_dim*2),
                            nn.BatchNorm1d(big_dim*2),
                            nn.ReLU(),
                            nn.Linear(big_dim*2, big_dim*2),
                            nn.BatchNorm1d(big_dim*2),
                            nn.ReLU(),
                            nn.Linear(big_dim*2, big_dim*2),
                            nn.BatchNorm1d(big_dim*2),
                            nn.ReLU(),
     )
     convnn3 = nn.Sequential(nn.Linear(2*(big_dim*2+input_dim), big_dim*4),
                            nn.BatchNorm1d(big_dim*4),
                            nn.ReLU(),
                            nn.Linear(big_dim*4, big_dim*4),
                            nn.BatchNorm1d(big_dim*4),
                            nn.ReLU(),
                            nn.Linear(big_dim*4, big_dim*4),
                            nn.BatchNorm1d(big_dim*4),
                            nn.ReLU(),
     )
             
     self.batchnorm = nn.BatchNorm1d(input_dim)
     self.batchnormglobal = nn.BatchNorm1d(global_dim)
     self.outnn = nn.Sequential(nn.Linear(big_dim*4+input_dim+global_dim, bigger_dim),
                                nn.ReLU(),
                                nn.Linear(bigger_dim, output_dim)
     )
     
     self.conv = DynamicEdgeConv(nn=convnn, aggr=aggr, k=k)
     self.conv2 = DynamicEdgeConv(nn=convnn2, aggr=aggr, k=k)
     self.conv3 = DynamicEdgeConv(nn=convnn3, aggr=aggr, k=k)
Esempio n. 20
0
    def __init__(self, output_shape, args):
        torch.nn.Module.__init__(self)

        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64, 64]), args.k,
                                     args.aggregation)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), args.k,
                                     args.aggregation)
        self.lin1 = MLP([128 + 64, 1024])

        self.mlp = torch.nn.Sequential(MLP([1024, 512]), torch.nn.Dropout(0.5),
                                       MLP([512, 256]), torch.nn.Dropout(0.5))

        self.lin = {
            key: torch.nn.Linear(256, output_shape[key][-1])
            for key in output_shape
        }

        for key in self.lin:
            self.add_module("lin_{}".format(key), self.lin[key])
Esempio n. 21
0
 def __init__(self, in_channels, classes):
     super(DGCNNClassifier, self).__init__()
     self.align = nn.Linear(in_channels, in_channels)
     self.ec1 = DynamicEdgeConv(MLP_EdgeConv(2 * 3, 64, hidden=128),
                                k=16,
                                aggr='max')
     self.ec2 = DynamicEdgeConv(MLP_EdgeConv(2 * 64, 64, hidden=128),
                                k=16,
                                aggr='max')
     self.ec3 = DynamicEdgeConv(MLP_EdgeConv(2 * 64, 128, hidden=256),
                                k=16,
                                aggr='max')
     self.mlp = MLP(256, 1024)  # fin = (128+64+64)=256
     self.fc = nn.Sequential(
         MLP(1024, 512), MLP(512, 256),
         MLP(256,
             classes,
             batchnorm=False,
             dropout=0.3,
             activation=nn.LogSoftmax))
Esempio n. 22
0
    def __init__(self, out_channels=40, k=20, BiLinear=BiLinear, pool='max'):
        super().__init__()

        if pool == 'mean':
            self.pool = 'mean'
            self.ema_max = False
        elif pool == 'max':
            self.pool = 'max'
            self.ema_max = False
        elif pool == 'ema-max':
            self.pool = 'max'
            self.ema_max = True
        self.conv1 = DynamicEdgeConv(
            Seq(Lin(2 * 3, 64),
                BiMLP([64, 64, 64], activation=ReLU, BiLinear=BiLinear)), k,
            self.pool)
        self.conv2 = DynamicEdgeConv(
            BiMLP([2 * 64, 128], activation=ReLU, BiLinear=BiLinear), k,
            self.pool)
        self.lin1 = BiMLP([128 + 64, 1024], activation=ReLU, BiLinear=BiLinear)

        self.mlp = Seq(BiMLP([1024, 512], activation=ReLU, BiLinear=BiLinear),
                       BiMLP([512, 256], activation=ReLU, BiLinear=BiLinear),
                       Lin(256, out_channels))
 def __init__(self, input_size, embedding_size, n_classes, dropout=True, k=5, aggr='max',pool_op='max', k_global=5):
     super(DECSeqKnn, self).__init__()
     self.k_global = k_global
     self.conv1 = EdgeConv(MLP([2 * 3, 64, 64, 64]), aggr)
     self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
     self.lin1 = MLP([128 + 64, 1024])
     if pool_op == 'max':
         self.pool = global_max_pool
     if dropout:
         self.mlp = Seq(
             MLP([1024, 512]), Dropout(0.5), MLP([512, 256]))
     else:
         self.mlp = Seq(
             MLP([1024, 512]), MLP([512, 256]))
     self.lin_global = Lin(256, k_global)
     self.lin2 = Lin(256, n_classes)
Esempio n. 24
0
 def __init__(self, fin, hidden_layers: List[int], activation: bool = True):
     super().__init__()
     self.fin = fin
     hidden_layers = [fin] + hidden_layers
     self.has_activation = activation
     self.mlps = nn.ModuleList(
         [MLP(2 * i, o) for i, o in layers(hidden_layers)])
     for i, o in layers(hidden_layers):
         sprint("Created layer (%d, %d)" % (i, o))
     self.filters = nn.ModuleList(
         [DynamicEdgeConv(mlp, k=32) for mlp in self.mlps])
     self.activation = nn.ModuleList([
         nn.Sequential(nn.ReLU(), nn.BatchNorm1d(o))
         if idx != len(hidden_layers) - 2 else nn.Identity()
         for idx, (i, o) in enumerate(layers(hidden_layers))
     ])
 def __init__(self, input_size, embedding_size, n_classes, dropout=True, k=5, aggr='max',pool_op='max', k_global=25):
     super(DECSeqGlob, self).__init__()
     self.k_global = k_global
     self.conv1 = EdgeConv(MLP([2 * 3, 64, 64, 64]), aggr)
     self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
     self.lin1 = MLP([128 + 64, 1024])
     if pool_op == 'max':
         self.pool = global_max_pool
     if dropout:
         self.mlp = Seq(
             MLP([1024, 512]), Dropout(0.5), MLP([512, 256]),
             Dropout(0.5), MLP([256, 32]))
     else:
         self.mlp = Seq(
             MLP([1024, 512]), MLP([512, 256]), MLP([256, 32]))
     self.lin = Lin(32, n_classes)
     # self.conv_glob = EdgeConv(MLP([2 * 32, 32]), aggr)
     self.conv_glob = GATConv(32, 32, heads=4, dropout=0.5, concat=False)
    def __init__(self,
                 input_size,
                 embedding_size,
                 n_classes,
                 fov=4,
                 k=5,
                 aggr='max',
                 bn=True):
        super(DECSeq2, self).__init__()
        pad = int(fov / 2)
        self.pad = pad
        self.bn1 = nn.BatchNorm1d(64)
        self.conv1 = nn.Sequential(
            nn.Conv1d(2 * input_size, 64, kernel_size=fov, padding=pad),
            self.bn1, nn.ReLU())
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
        self.lin1 = MLP([128 + 64, 1024])

        self.mlp = Seq(MLP([1024, 512]), Dropout(0.5), MLP([512, 256]),
                       Dropout(0.5), Lin(256, n_classes))