Exemplo n.º 1
0
    def __init__(self,
                 class_count,
                 n_features=3,
                 num_points=1024,
                 sort_pool_k=32):
        super(PointNet2MRGSortPoolClass, self).__init__()

        nFeaturesL2 = 3 + 128

        shared_mpls = [
            SAModuleFullPoint(0.4, 16, MLP([n_features, 64, 64, 128])),
            SAModuleFullPoint(0.9, 32, MLP([nFeaturesL2, 128, 128, 256]))
        ]

        # The mpls are shared to lower the model memory footprint
        self.high_resolution_module = SAModuleMRG(num_points, 512, shared_mpls)
        self.mid_resolution_module = SAModuleMRG(num_points, 256, shared_mpls)
        self.low_resolution_module = SAModuleMRG(num_points, 128, shared_mpls)

        self.readout = GlobalSortPool(MLP([789, 1024, 1024, 1024]),
                                      k=sort_pool_k)

        # Classification Layers
        sort_pool_out = 1024 * sort_pool_k
        self.lin1 = Lin(sort_pool_out, 512)
        self.bn1 = nn.BatchNorm1d(512)
        self.lin2 = Lin(512, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.lin3 = Lin(256, class_count)
def test_dense_sage_conv():
    in_channels, out_channels = (16, 32)
    nn = Seq(Lin(in_channels, 32), ReLU(), Lin(32, out_channels))
    sparse_conv = GINConv(nn)
    dense_conv = DenseGINConv(nn)
    dense_conv = DenseGINConv(nn, train_eps=True)
    assert dense_conv.__repr__() == (
        'DenseGINConv(nn=Sequential(\n'
        '  (0): Linear(in_features=16, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')

    x = torch.randn((5, in_channels))
    edge_index = torch.tensor([[0, 0, 1, 1, 2, 2, 3, 4],
                               [1, 2, 0, 2, 0, 1, 4, 3]])

    sparse_out = sparse_conv(x, edge_index)

    x = torch.cat([x, x.new_zeros(1, in_channels)],
                  dim=0).view(2, 3, in_channels)
    adj = torch.Tensor([
        [[0, 1, 1], [1, 0, 1], [1, 1, 0]],
        [[0, 1, 0], [1, 0, 0], [0, 0, 0]],
    ])
    mask = torch.tensor([[1, 1, 1], [1, 1, 0]], dtype=torch.uint8)

    dense_out = dense_conv(x, adj, mask)
    assert dense_out.size() == (2, 3, out_channels)
    dense_out = dense_out.view(6, out_channels)[:-1]
    assert torch.allclose(sparse_out, dense_out, atol=1e-04)
Exemplo n.º 3
0
    def __init__(self, in_channels, out_channels, dim_model, k=16):
        super().__init__()
        self.k = k

        # dummy feature is created if there is none given
        in_channels = max(in_channels, 1)

        # first block
        self.mlp_input = MLP([in_channels, dim_model[0]])

        self.transformer_input = TransformerBlock(in_channels=dim_model[0],
                                                  out_channels=dim_model[0])
        # backbone layers
        self.transformers_down = torch.nn.ModuleList()
        self.transition_down = torch.nn.ModuleList()

        for i in range(len(dim_model) - 1):
            # Add Transition Down block followed by a Transformer block
            self.transition_down.append(
                TransitionDown(in_channels=dim_model[i],
                               out_channels=dim_model[i + 1],
                               k=self.k))

            self.transformers_down.append(
                TransformerBlock(in_channels=dim_model[i + 1],
                                 out_channels=dim_model[i + 1]))

        # class score computation
        self.mlp_output = Seq(Lin(dim_model[-1], 64), ReLU(), Lin(64, 64),
                              ReLU(), Lin(64, out_channels))
Exemplo n.º 4
0
    def __init__(self, cfg):
        super(BasicAttentionModel, self).__init__()

        if 'modules' in cfg:
            self.model_config = cfg['modules']['attention_gnn']
        else:
            self.model_config = cfg

        self.nheads = self.model_config['nheads']

        # first layer increases number of features from 4 to 16
        # self.attn1 = GATConv(4, 16, heads=self.nheads, concat=False)
        # first layer increases number of features from 15 to 16
        self.attn1 = GATConv(16, 16, heads=self.nheads, concat=False)

        # second layer increases number of features from 16 to 32
        self.attn2 = GATConv(16, 32, heads=self.nheads, concat=False)

        # third layer increases number of features from 32 to 64
        self.attn3 = GATConv(32, 64, heads=self.nheads, concat=False)

        # final prediction layer
        self.edge_pred_mlp = Seq(Lin(138, 64), Dropout(p=0.2), LeakyReLU(0.12),
                                 Dropout(p=0.2), Lin(64, 16), LeakyReLU(0.12),
                                 Lin(16, 1), Sigmoid())

        def edge_pred_model(source, target, edge_attr, u, batch):
            out = torch.cat([source, target, edge_attr], dim=1)
            out = self.edge_pred_mlp(out)
            return out

        self.edge_predictor = MetaLayer(edge_pred_model, None, None)
Exemplo n.º 5
0
    def __init__(self,
                 class_count,
                 nfeatures,
                 batch_size,
                 bn_momentum=0.1,
                 nPoints=1024):
        super(PointNetInputEnhanced, self).__init__()

        self.nPoints = nPoints
        self.nFeatures = nfeatures + 4  # 4 is number of radii used to count the points
        self.batch_size = batch_size

        self.de_layer = AddNeightboursCount(
            max_points=[1024, 1024, 1024, 1024], radii=[0.1, 0.2, 0.4, 0.8])

        self.net = Sequential(Conv1d(self.nFeatures, 64, 1, stride=1), ReLU(),
                              Conv1d(64, 64, 1, stride=1), ReLU(),
                              Conv1d(64, 128, 1, stride=1), ReLU(),
                              Conv1d(128, 1024, 1, stride=1), ReLU())

        self.sa3_module = GlobalSAModule()  # Maxpool

        # Classification Layers
        self.lin1 = Lin(1024, 512)
        self.bn1 = BatchNorm1d(512, momentum=bn_momentum)
        self.lin2 = Lin(512, 256)
        self.bn2 = BatchNorm1d(256, momentum=bn_momentum)
        self.lin3 = Lin(256, class_count)
Exemplo n.º 6
0
    def __init__(self, class_count, nfeatures=3):
        super(PointNet2MSGClass, self).__init__()

        self.sa1_module = SAModuleMSG(512, [0.1, 0.2, 0.4], [16, 32, 128], [
            MLP([nfeatures, 32, 32, 64]),
            MLP([nfeatures, 64, 64, 128]),
            MLP([nfeatures, 64, 96, 128])
        ])

        #Because we concat the out of each layer as a feature of each point
        nFeaturesL2 = 3 + 64 + 128 + 128
        self.sa2_module = SAModuleMSG(128, [0.2, 0.4, 0.8], [32, 64, 128], [
            MLP([nFeaturesL2, 64, 64, 128]),
            MLP([nFeaturesL2, 128, 128, 256]),
            MLP([nFeaturesL2, 128, 128, 256])
        ])

        nFeaturesL3 = 3 + 128 + 256 + 256
        self.sa3_module = GlobalSAModule(MLP([nFeaturesL3, 256, 512, 1024]))

        #Classification Layers
        self.lin1 = Lin(1024, 512)
        self.bn1 = nn.BatchNorm1d(512)
        self.lin2 = Lin(512, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.lin3 = Lin(256, class_count)
Exemplo n.º 7
0
    def __init__(self,
                 in_channels,
                 hidden_channels,
                 out_channels,
                 num_layers,
                 heads,
                 dropout=0.5):
        super(GAT, self).__init__()

        self.num_layers = num_layers
        self.dropout = dropout
        self.convs = torch.nn.ModuleList()
        self.convs.append(GATConv(dataset.num_features, hidden_channels,
                                  heads))
        for _ in range(num_layers - 2):
            self.convs.append(
                GATConv(heads * hidden_channels, hidden_channels, heads))
        self.convs.append(
            GATConv(heads * hidden_channels, out_channels, heads,
                    concat=False))

        self.skips = torch.nn.ModuleList()
        self.skips.append(Lin(dataset.num_features, hidden_channels * heads))
        for _ in range(num_layers - 2):
            self.skips.append(
                Lin(hidden_channels * heads, hidden_channels * heads))
        self.skips.append(Lin(hidden_channels * heads, out_channels))
Exemplo n.º 8
0
    def __init__(self, hidden_channels):
        super(GINConv, self).__init__(aggr='mean')

        self.mlp = Seq(
            Lin(hidden_channels, 2 * hidden_channels), ReLU(),
            Lin(2 * hidden_channels, hidden_channels))
        self.eps = torch.nn.Parameter(torch.Tensor([0.]))
Exemplo n.º 9
0
    def __init__(self):
        super(SaNet, self).__init__()

        self.sa1_module = SAModule(0.25, 0.2, MLP([3 + 3, 64, 64, 128]))
        self.sa2_module = SAModule(0.5, 0.4, MLP([128 + 3, 128, 128, 256]))
        self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 512]))

        self.skip_attn1 = SkipAttention(MLP([512 + 2, 128]), MLP([256, 128]),
                                        MLP([256, 512 + 2]),
                                        MLP([512 + 2, 512]))
        self.skip_attn2 = SkipAttention(MLP([256, 64]), MLP([128, 64]),
                                        MLP([128, 256]), MLP([256, 256]))

        self.folding1 = FoldingBlock(64, 256, [
            MLP([512 + 512, 256]),
            MLP([512 + 512, 256]),
            MLP([512 + 512, 512 + 512]),
            MLP([512 + 512, 512, 256])
        ], [512 + 2, 512], [1024, 512])

        self.folding2 = FoldingBlock(256, 512, [
            MLP([256 + 256, 64]),
            MLP([256 + 256, 64]),
            MLP([256 + 256, 256 + 256]),
            MLP([256 + 256, 256, 128])
        ], [256 + 2, 256], [256, 256])
        self.folding3 = FoldingBlock(512, 2048, [
            MLP([128 + 128, 64]),
            MLP([128 + 128, 64]),
            MLP([128 + 128, 128 + 128]),
            MLP([128 + 128, 128])
        ], [128 + 2, 128], [512, 256, 128])

        self.lin = Seq(Lin(128, 64), ReLU(), Lin(64, 3))
Exemplo n.º 10
0
def test_point_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))
    pos = torch.rand((num_nodes, 3))
    norm = torch.nn.functional.normalize(torch.rand((num_nodes, 3)), dim=1)

    local_nn = Seq(Lin(in_channels + 4, 32), ReLU(), Lin(32, out_channels))
    global_nn = Seq(Lin(out_channels, out_channels))
    conv = PPFConv(local_nn, global_nn)
    assert conv.__repr__() == (
        'PPFConv(local_nn=Sequential(\n'
        '  (0): Linear(in_features=20, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '), global_nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')
    out = conv(x, pos, norm, edge_index)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, pos=pos, norm=norm, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, pos, norm, edge_index).tolist() == out.tolist()
Exemplo n.º 11
0
    def __init__(self, node_size, input_feature, num_classes):
        super(NetGAT, self).__init__()
        self.node_per_graph = node_size

        hidden_size = 256
        gat_head = 8
        head_size = hidden_size // gat_head
        self.input_feature = input_feature

        # self.linprev = MLP([input_feature, 64, 64, 64])
        self.linprev = EdgeConv(MLP([input_feature * 2, 64, 64, 64]),
                                aggr='max')

        self.conv1 = GATConv(64, head_size, gat_head)
        self.bn1 = torch.nn.BatchNorm1d(hidden_size)
        self.lin1 = torch.nn.Linear(64, hidden_size)

        self.conv2 = GATConv(hidden_size, head_size, gat_head)
        self.bn2 = torch.nn.BatchNorm1d(hidden_size)
        self.lin2 = torch.nn.Linear(hidden_size, hidden_size)

        self.conv3 = GATConv(hidden_size, head_size, gat_head)
        self.bn3 = torch.nn.BatchNorm1d(hidden_size)
        self.lin3 = torch.nn.Linear(hidden_size, hidden_size)

        self.conv4 = GATConv(hidden_size, head_size, gat_head)
        self.bn4 = torch.nn.BatchNorm1d(hidden_size)
        self.lin4 = torch.nn.Linear(hidden_size, hidden_size)

        self.mlp = Seq(Lin(2048, 512), Dropout(0.4), Lin(512, 256),
                       Dropout(0.4), Lin(256, num_classes))
Exemplo n.º 12
0
 def __init__(self):
     super(EdgeModel_ONE, self).__init__()
     hidden = HIDDEN_EDGE_ONE
     in_channels = ENCODING_EDGE_1 + 2 * ENCODING_NODE_1
     self.edge_mlp = Seq(Lin(in_channels, hidden), LeakyReLU(),
                         LayerNorm(hidden),
                         Lin(hidden, ENCODING_EDGE_1)).apply(init_weights)
Exemplo n.º 13
0
    def __init__(self, NUM_CLASS):
        super(RotationEstimateNN, self).__init__()
        self.NUM_CLASS = NUM_CLASS
        self.cls_model = Classifier(NUM_CLASS)
        self.cls_model.load_state_dict(
            torch.load(os.path.join(BASE_DIR, 'models', 'classifier_1024',
                                    '117_0.98_1024'),
                       map_location='cpu'))
        # self.cls_model.load_state_dict(torch.load(os.path.join(BASE_DIR, 'models', 'classifier_1024', '117_0.98_1024')))
        self.cls_model.eval()
        self.pcnn1 = XConv(0, 48, dim=3, kernel_size=8, hidden_channels=32)
        # in_c , out_c , nei_c , spread, n_rep
        self.pcnn2 = XConv(48,
                           96,
                           dim=3,
                           kernel_size=12,
                           hidden_channels=64,
                           dilation=2)

        self.pcnn3 = XConv(96,
                           192,
                           dim=3,
                           kernel_size=16,
                           hidden_channels=128,
                           dilation=2)
        # self.pcnn4 = XConv(
        #     192, 384, dim=3, kernel_size=16, hidden_channels=256, dilation=2)
        # self.pcnn5 = XConv(
        #     384, 768, dim=3, kernel_size=16, hidden_channels=512, dilation=2)

        self.lin1 = Lin(200, 256)
        self.lin2 = Lin(256, 128)
        self.lin3 = Lin(128, 3)
Exemplo n.º 14
0
def test_nn_conv():
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    value = torch.rand(row.size(0), 3)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    nn = Seq(Lin(3, 32), ReLU(), Lin(32, 8 * 32))
    conv = NNConv(8, 32, nn=nn)
    assert conv.__repr__() == (
        'NNConv(8, 32, aggr=add, nn=Sequential(\n'
        '  (0): Linear(in_features=3, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=256, bias=True)\n'
        '))')
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 32)
    assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    if is_full_test():
        t = '(Tensor, Tensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, edge_index, value).tolist() == out.tolist()
        assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()

        t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    conv = NNConv((8, 16), 32, nn=nn)
    assert conv.__repr__() == (
        'NNConv((8, 16), 32, aggr=add, nn=Sequential(\n'
        '  (0): Linear(in_features=3, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=256, bias=True)\n'
        '))')
    out1 = conv((x1, x2), edge_index, value)
    out2 = conv((x1, None), edge_index, value, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
    assert conv((x1, None), adj.t()).tolist() == out2.tolist()

    if is_full_test():
        t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, x2), edge_index, value).tolist() == out1.tolist()
        assert jit((x1, x2), edge_index, value,
                   size=(4, 2)).tolist() == out1.tolist()
        assert jit((x1, None), edge_index, value,
                   size=(4, 2)).tolist() == out2.tolist()

        t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
        assert jit((x1, None), adj.t()).tolist() == out2.tolist()
Exemplo n.º 15
0
 def __init__(self):
     super(EdgeModel_TWO, self).__init__()
     hidden = HIDDEN_EDGE_TWO
     in_channels = NO_EDGE_FEATURES_TWO+2*NO_NODE_FEATURES_TWO
     self.edge_mlp = Seq(Lin(in_channels, hidden), LeakyReLU(), LayerNorm(hidden),
                         Lin(hidden, hidden), LeakyReLU(), LayerNorm(hidden),
                         Lin(hidden, NO_EDGE_FEATURES_TWO)).apply(init_weights)
def test_dynamic_edge_conv_conv():
    x1 = torch.randn(8, 16)
    x2 = torch.randn(4, 16)
    batch1 = torch.tensor([0, 0, 0, 0, 1, 1, 1, 1])
    batch2 = torch.tensor([0, 0, 1, 1])

    nn = Seq(Lin(32, 16), ReLU(), Lin(16, 32))
    conv = DynamicEdgeConv(nn, k=2)
    assert conv.__repr__() == (
        'DynamicEdgeConv(nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=16, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=16, out_features=32, bias=True)\n'
        '), k=2)')
    out11 = conv(x1)
    assert out11.size() == (8, 32)

    out12 = conv(x1, batch1)
    assert out12.size() == (8, 32)

    out21 = conv((x1, x2))
    assert out21.size() == (4, 32)

    out22 = conv((x1, x2), (batch1, batch2))
    assert out22.size() == (4, 32)

    t = '(Tensor, OptTensor) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1).tolist() == out11.tolist()
    assert jit(x1, batch1).tolist() == out12.tolist()

    t = '(PairTensor, Optional[PairTensor]) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2)).tolist() == out21.tolist()
    assert jit((x1, x2), (batch1, batch2)).tolist() == out22.tolist()
Exemplo n.º 17
0
def get_mlp(
    in_size,
    out_size,
    n_hidden,
    hidden_size,
    activation=nn.LeakyReLU,
    activate_last=True,
    layer_norm=True,
):
    arch = []
    l_in = in_size
    for l_idx in range(n_hidden):
        arch.append(Lin(l_in, hidden_size))
        arch.append(activation())
        l_in = hidden_size

    arch.append(Lin(l_in, out_size))

    if activate_last:
        arch.append(activation())

        if layer_norm:
            arch.append(LayerNorm(out_size))

    return Seq(*arch)
Exemplo n.º 18
0
    def __init__(self, NUM_CLASS):
        super(Classifier, self).__init__()
        self.pcnn1 = XConv(0, 48, dim=3, kernel_size=8, hidden_channels=32)
        # in_c , out_c , nei_c , spread, n_rep
        self.pcnn2 = XConv(48,
                           96,
                           dim=3,
                           kernel_size=12,
                           hidden_channels=64,
                           dilation=2)

        self.pcnn3 = XConv(96,
                           192,
                           dim=3,
                           kernel_size=16,
                           hidden_channels=128,
                           dilation=2)
        # self.pcnn4 = XConv(
        #     192, 384, dim=3, kernel_size=16, hidden_channels=256, dilation=2)
        # self.pcnn5 = XConv(
        #     384, 768, dim=3, kernel_size=16, hidden_channels=512, dilation=2)

        self.lin1 = Lin(192, 256)
        self.lin2 = Lin(256, 128)
        self.lin3 = Lin(128, NUM_CLASS)
    def __init__(self, class_count, n_feature=3, sort_pool_k=32):
        super(PointNet2MSGSortPoolClass, self).__init__()

        self.sa1_module = SAModuleMSG(512, [0.1, 0.2, 0.4], [16, 32, 128], [
            MLP([n_feature, 32, 32, 64]),
            MLP([n_feature, 64, 64, 128]),
            MLP([n_feature, 64, 96, 128])
        ])

        #Because we concat the outout of each layer as a feature of each point
        n_features_l2 = 3 + 64 + 128 + 128
        self.sa2_module = SAModuleMSG(128, [0.2, 0.4, 0.8], [32, 64, 128], [
            MLP([n_features_l2, 64, 64, 128]),
            MLP([n_features_l2, 128, 128, 256]),
            MLP([n_features_l2, 128, 128, 256])
        ])

        n_features_l3 = 3 + 128 + 256 + 256
        self.sa3_module = GlobalSortPool(MLP([n_features_l3, 256, 512, 1024]),
                                         sort_pool_k)

        #Classification Layers
        classification_point_feature = 1024 * sort_pool_k
        self.lin1 = Lin(classification_point_feature, 512)
        self.bn1 = nn.BatchNorm1d(512)
        self.lin2 = Lin(512, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.lin3 = Lin(256, class_count)
Exemplo n.º 20
0
  def __init__(self,
              in_channels,
              out_channels,
              beta = 0.3):
    super().__init__()
    self.in_channels = in_channels
    self.out_channels = out_channels
    self.beta = beta

    text_feature_size = 220
    self.conv1_img = GATConv(train_dataset.num_features, 512, heads= 3, dropout=0.2)
    self.conv1_text = GATConv(text_feature_size, text_feature_size // 2 , heads= 3, dropout=0.2)

    self.conv2_img = GATConv(512 * 3, 512, heads= 1, dropout=0.2)
    self.conv2_text = GATConv(text_feature_size // 2 * 3, text_feature_size // 2, heads= 1, dropout=0.2)

    self.conv3_img = GATConv(512, 20, heads = 1, dropout = 0.2)
    self.conv3_text = GATConv(text_feature_size // 2, 20, heads = 1, dropout= 0.2)

    self.conv4 = GATConv(40, 20, heads= 2, dropout=0.2)

    self.pool1= EdgePooling(40)

    #global_mean_pool 

    self.lin1 = Lin(40, 20) 
    self.lin2 = Lin(20, 2)
Exemplo n.º 21
0
    def __init__(self,
                 input_size,
                 embedding_size,
                 n_classes,
                 dropout=True,
                 k=5,
                 aggr='max',
                 pool_op='max'):
        super(DECSeq, self).__init__()
        # self.bn0 = BN(input_size)
        # self.bn1 = BN(64)
        # self.bn2 = BN(128)
        self.conv1 = EdgeConv(MLP([2 * 3, 64, 64, 64], batch_norm=True), aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128], batch_norm=True), k,
                                     aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        if dropout:
            self.mlp = Seq(MLP([1024, 512], batch_norm=True), Dropout(0.5),
                           MLP([512, 256], batch_norm=True), Dropout(0.5),
                           Lin(256, n_classes))
        else:
            self.mlp = Seq(MLP([1024, 512]), MLP([512, 256]),
                           Lin(256, n_classes))
Exemplo n.º 22
0
    def __init__(self, num_classes):
        super(Net, self).__init__()

        self.conv1 = XConv(0, 48, dim=3, kernel_size=8, hidden_channels=32)
        self.conv2 = XConv(48,
                           96,
                           dim=3,
                           kernel_size=12,
                           hidden_channels=64,
                           dilation=2)
        self.conv3 = XConv(96,
                           192,
                           dim=3,
                           kernel_size=16,
                           hidden_channels=128,
                           dilation=2)
        self.conv4 = XConv(192,
                           384,
                           dim=3,
                           kernel_size=16,
                           hidden_channels=256,
                           dilation=2)

        self.lin1 = Lin(384, 256)
        self.lin2 = Lin(256, 128)
        self.lin3 = Lin(128, num_classes)
Exemplo n.º 23
0
    def __init__(self):
        super(GlobalModel_ONE, self).__init__()
        hidden = HIDDEN_GRAPH_ONE
        in_channels=ENCODING_NODE_1+ENCODING_EDGE_1

        self.global_mlp = Seq(Lin(in_channels, hidden), LeakyReLU(),LayerNorm(hidden),
                              Lin(hidden, hidden), LeakyReLU(), LayerNorm(hidden),
                              Lin(hidden, NO_GRAPH_FEATURES_ONE)).apply(init_weights)
Exemplo n.º 24
0
 def __init__(self, node_dims, edge_dims, u_dims, hidden_size=32):
     super().__init__()
     mlp_1_input_size = node_dims + edge_dims
     self.node_mlp_1 = Seq(Lin(mlp_1_input_size, hidden_size), ReLU(),
                           Lin(hidden_size, hidden_size))
     mlp_2_input_size = node_dims + hidden_size
     self.node_mlp_2 = Seq(Lin(mlp_2_input_size, hidden_size), ReLU(),
                           Lin(hidden_size, node_dims))
Exemplo n.º 25
0
def test_static_gin_conv():
    x = torch.randn(3, 4, 16)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])

    nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32))
    conv = GINConv(nn, train_eps=True)
    out = conv(x, edge_index)
    assert out.size() == (3, 4, 32)
Exemplo n.º 26
0
 def __init__(self):
     super(NodeDecoder, self).__init__()
     self.node_mlp_1 = Seq(Lin(2+32, 32), 
                           ReLU(), 
                           Lin(32, 32))
     self.node_mlp_2 = Seq(Lin(2+32+32, 32), 
                           ReLU(), 
                           Lin(32, 4))
Exemplo n.º 27
0
    def __init__(self):
        super(GlobalModel_TWO, self).__init__()
        hidden = HIDDEN_GRAPH_TWO
        in_channels=NO_EDGE_FEATURES_TWO+NO_NODE_FEATURES_TWO

        self.global_mlp = Seq(Lin(in_channels, hidden), LeakyReLU(),LayerNorm(hidden),
                              Lin(hidden, hidden), LeakyReLU(), LayerNorm(hidden),
                              Lin(hidden, NO_GRAPH_FEATURES_TWO)).apply(init_weights)
Exemplo n.º 28
0
 def __init__(self):
     super(NodeModel, self).__init__()
     self.node_mlp_1 = Seq(
         Lin(num_node_features + num_edge_features, num_node_features),
         ReLU(), Lin(num_node_features, num_node_features))
     self.node_mlp_2 = Seq(
         Lin(2 * num_node_features, num_node_features), ReLU(),
         Lin(num_node_features, num_node_features))
Exemplo n.º 29
0
 def __init__(self):
     super(NodeEncoder, self).__init__()
     self.node_mlp_1 = Seq(Lin(4+32, 32), 
                           ReLU(), 
                           Lin(32, 32))
     self.node_mlp_2 = Seq(Lin(4+32, 32), 
                           ReLU(), 
                           Lin(32, 2))
Exemplo n.º 30
0
 def __init__(self, ratio, r):
     super(Net, self).__init__()
     self.r = r
     self.fc1 = MLP([3, 64])
     self.mu_conv = MuConv(MLP([64, 64]), MLP([64, 20]))
     self.sig_conv = SigmaConv(MLP([64, 64]), MLP([64, 20]))
     self.fc3 = Lin(20, 1024)
     self.fc4 = Lin(1024, 3072)