def make_mlp(in_channels, mlp_channels, batch_norm=True):
    assert len(mlp_channels) >= 1
    layers = []

    for c in mlp_channels:
        layers += [Lin(in_channels, c)]
        if batch_norm:
            layers += [BatchNorm1d(c)]
        layers += [ReLU()]

        in_channels = c

    return Seq(*layers)
Ejemplo n.º 2
0
    def __init__(self,
                 input_size,
                 n_classes=2,
                 embedding_size=128,
                 hidden_size=256,
                 dropout=True):
        super(BiLSTM, self).__init__()
        self.emb_size = embedding_size
        self.h_size = hidden_size
        self.mlp = MLP([input_size, embedding_size])
        self.lstm = nn.LSTM(embedding_size,
                            hidden_size,
                            bidirectional=True,
                            batch_first=True)

        if dropout:
            self.lin = Seq(MLP([hidden_size * 2, 128]), Dropout(0.5),
                           MLP([256, 40]), Dropout(0.5),
                           nn.Linear(128, n_classes))
        else:
            self.lin = Seq(MLP([hidden_size * 2, 128]), MLP([128, 40]),
                           nn.Linear(40, n_classes))
Ejemplo n.º 3
0
    def __init__(self, option, model_type, dataset, modules):
        # Extract parameters from the dataset
        # Assemble encoder / decoder
        UnwrappedUnetBasedModel.__init__(self, option, model_type, dataset, modules)

        # Build final MLP
        last_mlp_opt = option.mlp_cls

        self.out_channels = option.out_channels
        in_feat = last_mlp_opt.nn[0]
        self.FC_layer = Seq()
        for i in range(1, len(last_mlp_opt.nn)):
            self.FC_layer.add_module(
                str(i),
                Seq(
                    *[
                        Lin(in_feat, last_mlp_opt.nn[i], bias=False),
                        FastBatchNorm1d(last_mlp_opt.nn[i], momentum=last_mlp_opt.bn_momentum),
                        LeakyReLU(0.2),
                    ]
                ),
            )
            in_feat = last_mlp_opt.nn[i]

        if last_mlp_opt.dropout:
            self.FC_layer.add_module("Dropout", Dropout(p=last_mlp_opt.dropout))

        self.FC_layer.add_module("Last", Lin(in_feat, self.out_channels, bias=False))
        self.mode = option.loss_mode
        self.normalize_feature = option.normalize_feature
        self.loss_names = ["loss_reg", "loss"]

        self.lambda_reg = self.get_from_opt(option, ["loss_weights", "lambda_reg"])
        if self.lambda_reg:
            self.loss_names += ["loss_regul"]

        self.lambda_internal_losses = self.get_from_opt(option, ["loss_weights", "lambda_internal_losses"])

        self.visual_names = ["data_visual"]
Ejemplo n.º 4
0
    def __init__(self, dim_local, dim_global, dim_hidden, dim_pre_aggr):
        super(GlobalModel, self).__init__()
        self.dim_local = dim_local
        self.dim_global = dim_global
        self.dim_hidden = dim_hidden
        self.dim_concat = self.dim_local + self.dim_global
        self.dim_pre_aggr = dim_pre_aggr

        # MLP prior to aggregating node encodings
        self.mlp_pre_aggr = Seq(Lin(self.dim_concat, self.dim_hidden),
                                ReLU(),
                                Lin(self.dim_hidden, self.dim_hidden),
                                ReLU(),
                                Lin(self.dim_hidden, self.dim_pre_aggr),
                                LayerNorm(self.dim_pre_aggr))
        # MLP after aggregating node encodings
        self.mlp_post_aggr = Seq(Lin(self.dim_pre_aggr+self.dim_global, self.dim_hidden),
                                 ReLU(),
                                 Lin(self.dim_hidden, self.dim_hidden),
                                 ReLU(),
                                 Lin(self.dim_hidden, self.dim_global),
                                 LayerNorm(self.dim_global))
Ejemplo n.º 5
0
    def __init__(self, in_channels, out_channels, k=32, aggr='max'):
        super(DynamicEdge, self).__init__()

        self.conv1 = DynamicEdgeConv(MLP([2 * in_channels, in_channels * 2]),
                                     k, aggr)
        self.conv2 = DynamicEdgeConv(
            MLP([2 * in_channels * 2, in_channels * 2]), k, aggr)

        self.lin1 = MLP([in_channels * 2, in_channels * 4])

        self.mlp = Seq(MLP([in_channels * 4, in_channels * 2]), Dropout(0.5),
                       MLP([in_channels * 2, in_channels]), Dropout(0.5),
                       Lin(in_channels, out_channels))
Ejemplo n.º 6
0
def test_gine_conv():
    x1 = torch.randn(4, 16)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    value = torch.randn(row.size(0), 16)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32))
    conv = GINEConv(nn, train_eps=True)
    assert conv.__repr__() == (
        'GINEConv(nn=Sequential(\n'
        '  (0): Linear(in_features=16, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 32)
    assert conv(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()
    assert conv(x1, adj.t()).tolist() == out.tolist()

    t = '(Tensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, edge_index, value).tolist() == out.tolist()
    assert jit(x1, edge_index, value, size=(4, 4)).tolist() == out.tolist()

    t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit(x1, adj.t()).tolist() == out.tolist()

    adj = adj.sparse_resize((4, 2))
    out1 = conv((x1, x2), edge_index, value)
    out2 = conv((x1, None), edge_index, value, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert conv((x1, x2), edge_index, value, (4, 2)).tolist() == out1.tolist()
    assert conv((x1, x2), adj.t()).tolist() == out1.tolist()
    assert conv((x1, None), adj.t()).tolist() == out2.tolist()

    t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), edge_index, value).tolist() == out1.tolist()
    assert jit((x1, x2), edge_index, value,
               size=(4, 2)).tolist() == out1.tolist()
    assert jit((x1, None), edge_index, value,
               size=(4, 2)).tolist() == out2.tolist()

    t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor'
    jit = torch.jit.script(conv.jittable(t))
    assert jit((x1, x2), adj.t()).tolist() == out1.tolist()
    assert jit((x1, None), adj.t()).tolist() == out2.tolist()
Ejemplo n.º 7
0
    def __init__(self, BATCH_SIZE, NO_MP_ONE, NO_MP_TWO):
        super(GNN_FULL_CLASS, self).__init__()
        self.meta1 = MetaLayer(EdgeModel_ONE(), NodeModel_ONE(),
                               GlobalModel_ONE())
        self.meta2 = MetaLayer(EdgeModel_ONE(), NodeModel_ONE(),
                               GlobalModel_ONE())
        self.meta3 = MetaLayer(EdgeModel_TWO(), NodeModel_TWO(),
                               GlobalModel_TWO())

        self.encoding_edge_1 = Seq(Lin(NO_EDGE_FEATURES_ONE, ENCODING_EDGE_1),
                                   LeakyReLU(), LayerNorm(ENCODING_EDGE_1),
                                   Lin(ENCODING_EDGE_1,
                                       ENCODING_EDGE_1)).apply(init_weights)

        self.encoding_node_1 = Seq(Lin(NO_NODE_FEATURES_ONE, ENCODING_NODE_1),
                                   LeakyReLU(), LayerNorm(ENCODING_NODE_1),
                                   Lin(ENCODING_NODE_1,
                                       ENCODING_NODE_1)).apply(init_weights)

        self.encoding_edge_2 = Seq(
            Lin(2, ENCODING_EDGE_2), LeakyReLU(),
            Lin(ENCODING_EDGE_2, NO_EDGE_FEATURES_TWO)).apply(init_weights)

        self.encoding_node_2 = Seq(
            Lin(NO_GRAPH_FEATURES_ONE, NO_GRAPH_FEATURES_ONE), LeakyReLU(),
            LayerNorm(NO_GRAPH_FEATURES_ONE),
            Lin(NO_GRAPH_FEATURES_ONE,
                NO_GRAPH_FEATURES_ONE)).apply(init_weights)

        self.mlp_last = Seq(Lin(NO_GRAPH_FEATURES_TWO, NO_GRAPH_FEATURES_TWO),
                            LeakyReLU(), LayerNorm(NO_GRAPH_FEATURES_TWO),
                            Lin(NO_GRAPH_FEATURES_TWO, NO_GRAPH_FEATURES_TWO),
                            LeakyReLU(), LayerNorm(NO_GRAPH_FEATURES_TWO),
                            Lin(NO_GRAPH_FEATURES_TWO, 15)).apply(init_weights)

        self.batch_size = BATCH_SIZE
        self.no_mp_one = NO_MP_ONE
        self.no_mp_two = NO_MP_TWO
    def __init__(self, k):
        super(PCI_Net, self).__init__()

        self.k = k

        conv1_out = 64
        nn = Seq(
            Linear(12, conv1_out, bn=True),
            Linear(conv1_out, conv1_out, bn=True),
            Linear(conv1_out, conv1_out, bn=True),
        )

        #nn = Seq(Conv2d(12, conv1_out), ReLU(), Conv2d(conv1_out, conv1_out), ReLU(), Conv2d(conv1_out, conv1_out), ReLU())
        self.conv1 = DualEdgeConv(nn, aggr='max')

        conv2_out = 256
        nn = Seq(Linear(2 * 2 * conv1_out, 128, bn=True),
                 Linear(128, 128, bn=True), Linear(128, conv2_out, bn=True))
        # nn = Seq(Conv2d(2*2*conv1_out, 128), ReLU(), Conv2d(128, 128), ReLU(), Conv2d(128, conv2_out), ReLU())
        self.conv2 = EdgeConv(nn, aggr='max')

        conv3_out = 1024
        #self.conv3 = Conv2d(2*2*conv1_out+conv2_out, conv3_out, bn=True)
        self.conv3 = Linear(2 * 2 * conv1_out + conv2_out, conv3_out, bn=True)

        # Convs at end
        self.conv4 = Conv2d(conv3_out + 2 * 2 * conv1_out + conv2_out,
                            256,
                            bn=True)
        self.conv5 = Conv2d(256, 256, bn=True)
        self.conv6 = Conv2d(256, 128, bn=False, activation_fn=None)
        self.conv7 = Conv2d(128, 6, bn=False, activation_fn=None)
        # self.conv4 = Linear(conv3_out+2*2*conv1_out+conv2_out, 256, bn=True)
        # self.conv5 = Linear(256, 256, bn=True)
        # self.conv6 = Linear(256, 128, bn=False, activation_fn=None)
        # self.conv7 = Linear(128, 6, bn=False, activation_fn=None)

        self._weights_init()
Ejemplo n.º 9
0
 def __init__(self,
              n_node_features,
              n_global_features,
              n_hiddens,
              n_targets,
              use_batch_norm=False):
     super(GlobalModel, self).__init__()
     self.global_mlp = Seq(
         Lin(n_global_features + n_node_features, n_hiddens),
         LeakyReLU(),
         Lin(n_hiddens, n_hiddens),
         LeakyReLU(),
         Lin(n_hiddens, n_targets),
     )
Ejemplo n.º 10
0
    def __init__(self, channels, act='relu', norm=None, bias=False, drop=0):
        super(MLP1dLayer, self).__init__()
        m = []
        for i in range(1, len(channels)):
            m.append(Lin(channels[i - 1], channels[i], bias))
            if norm:
                m.append(norm_layer1d(channels[-1], norm))
            if act:
                m.append(act_layer(act))
            if drop > 0:
                m.append(nn.Dropout(drop))

        self.body = Seq(*m)
        self.reset_parameters()
    def __init__(self, out_channels, k=30, aggr='max'):
        super(Net, self).__init__()

        self.tconv1 = DynamicEdgeConv(MLP([2 * 3, 64, 128]), k, aggr)
        self.tmlp1 = MLP([128, 1024])
        self.tmlp2 = MLP([1024, 512, 256, 9])

        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64, 64]), k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 64, 64, 64]), k, aggr)
        self.conv3 = DynamicEdgeConv(MLP([2 * 64, 64, 64, 64]), k, aggr)
        self.lin1 = MLP([64, 1024])

        self.mlp = Seq(MLP([1152, 256, 256]), Dropout(0.5), Lin(256, 128),
                       Dropout(0.5), Lin(128, out_channels))
Ejemplo n.º 12
0
def test_global_attention():
    channels, batch_size = (32, 10)
    gate_nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, 1))
    nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, channels))

    glob = GlobalAttention(gate_nn, nn)
    assert glob.__repr__() == (
        'GlobalAttention(gate_nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=1, bias=True)\n'
        '), nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')

    x = torch.randn((batch_size**2, channels))
    batch = torch.arange(batch_size, dtype=torch.long)
    batch = batch.view(-1, 1).repeat(1, batch_size).view(-1)

    assert glob(x, batch).size() == (batch_size, channels)
    assert glob(x, batch, batch_size + 1).size() == (batch_size + 1, channels)
Ejemplo n.º 13
0
def test_dynamic_edge_conv_conv():
    in_channels, out_channels = (16, 32)
    num_nodes = 20
    x = torch.randn((num_nodes, in_channels))

    nn = Seq(Lin(2 * in_channels, 32), ReLU(), Lin(32, out_channels))
    conv = DynamicEdgeConv(nn, k=6)
    assert conv.__repr__() == (
        'DynamicEdgeConv(nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '), k=6)')
    assert conv(x).size() == (num_nodes, out_channels)
Ejemplo n.º 14
0
def test_gine_conv_edge_dim():
    x = torch.randn(4, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    edge_attr = torch.randn(edge_index.size(1), 8)

    nn = Seq(Lin(16, 32), ReLU(), Lin(32, 32))
    conv = GINEConv(nn, train_eps=True, edge_dim=8)
    out = conv(x, edge_index, edge_attr)
    assert out.size() == (4, 32)

    nn = Lin(16, 32)
    conv = GINEConv(nn, train_eps=True, edge_dim=8)
    out = conv(x, edge_index, edge_attr)
    assert out.size() == (4, 32)
Ejemplo n.º 15
0
    def __init__(self, out_channels, k=10, aggr='max'):
        super(Net, self).__init__()
        self.transform_net = STN3d()
        self.k = k
        self.conv0 = DiffGCNBlock(3, 64, 20, 1)
        self.conv1 = DiffGCNBlock(64, 64, 5, 2, pool=True)
        self.conv2 = DiffGCNBlock(64, 64, 5, 2, pool=True)
        self.conv3 = DiffGCNBlock(64, 128, 5, 2, pool=True)

        self.lin1 = MLP([64 * 3 + 128, 2048])

        self.mlp = Seq(MLP([2048 + 64 * 3 + 128 + 16, 512]), Dropout(0.5),
                       MLP([512, 256]), Dropout(0.5), MLP([256, 128]),
                       Dropout(0.5), Lin(128, out_channels))
Ejemplo n.º 16
0
 def __init__(self, dim_local, dim_global, dim_hidden):
     super(NodeModel, self).__init__()
     self.dim_local = dim_local
     self.dim_global = dim_global
     self.dim_hidden = dim_hidden
     self.dim_concat = self.dim_local + self.dim_global
     self.mlp = Seq(Lin(self.dim_concat, self.dim_hidden),
                    LayerNorm(self.dim_hidden),
                    ReLU(),
                    Lin(self.dim_hidden, self.dim_hidden),
                    LayerNorm(self.dim_hidden),
                    ReLU(),
                    Lin(self.dim_hidden, self.dim_local),
                    LayerNorm(self.dim_local))
    def __init__(self, d1=3, d2=50, d3=15):
        super(META5, self).__init__()

        self.edge_mlp = Seq(Lin(d1 * 2, d2), ReLU(), Lin(d2, d3))
        self.node_mlp = Seq(Lin(d1, d2), ReLU(), Lin(d2, d3))
        self.global_mlp = Seq(Lin(2, d2), ReLU(), Lin(d2, d3))

        def edge_model(source, target, edge_attr, u):
            # source, target: [E, F_x], where E is the number of edges.
            # edge_attr: [E, F_e]
            # u: [B, F_u], where B is the number of graphs.
            out = torch.cat([source, target], dim=1)
            #print("edge_model")
            #print(out.size())
            return self.edge_mlp(out)

        def node_model(x, edge_index, edge_attr, u):
            # x: [N, F_x], where N is the number of nodes.
            # edge_index: [2, E] with max entry N - 1.
            # edge_attr: [E, F_e]
            # u: [B, F_u]
            row, col = edge_index
            out = torch.cat([x[col]], dim=1)
            out = self.node_mlp(out)
            return scatter_mean(out, row, dim=0, dim_size=x.size(0))

        def global_model(x, edge_index, edge_attr, u, batch):
            # x: [N, F_x], where N is the number of nodes.
            # edge_index: [2, E] with max entry N - 1.
            # edge_attr: [E, F_e]
            # u: [B, F_u]
            # batch: [N] with max entry B - 1.
            out = torch.cat([u, scatter_mean(x, batch, dim=0)], dim=1)

            return self.global_mlp(out)

        self.op = MetaLayer(edge_model, node_model, global_model)
Ejemplo n.º 18
0
    def __init__(self, psi_1, psi_2, num_steps, k=-1, detach=False):
        super(DGMC, self).__init__()

        self.psi_1 = psi_1
        self.psi_2 = psi_2
        self.num_steps = num_steps
        self.k = k
        self.detach = detach
        self.backend = 'auto'

        self.mlp = Seq(
            Lin(psi_2.out_channels, psi_2.out_channels),
            ReLU(),
            Lin(psi_2.out_channels, 1),
        )
Ejemplo n.º 19
0
    def __init__(self, input_size, embedding_size, n_classes, aggr='max', k=5, pool_op='max', same_size=False):
        super(DEC, self).__init__()
        self.k = k
        self.conv1 = DynamicEdgeConv(MLP([2 * 3, 64, 64, 64]), self.k, aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), self.k, aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        # self.mlp = Seq(
        #     MLP([1024, 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5),
        #     Lin(256, n_classes))
        self.mlp = Seq(
            MLP([1024, 512]),MLP([512, 256]),
            Lin(256, n_classes))
Ejemplo n.º 20
0
def test_point_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))
    pos = torch.rand((num_nodes, 3))

    local_nn = Seq(Lin(in_channels + 3, 32), ReLU(), Lin(32, out_channels))
    global_nn = Seq(Lin(out_channels, out_channels))
    conv = PointConv(local_nn, global_nn)
    assert conv.__repr__() == (
        'PointConv(local_nn=Sequential(\n'
        '  (0): Linear(in_features=19, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '), global_nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')
    out = conv(x, pos, edge_index)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable(x=x, pos=pos, edge_index=edge_index)
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, pos, edge_index).tolist() == out.tolist()
Ejemplo n.º 21
0
    def __init__(self,
                 input_size,
                 embedding_size,
                 n_classes,
                 dropout=False,
                 k=5,
                 aggr='max',
                 pool_op='max'):
        super(DECSeq, self).__init__()
        self.conv1 = EdgeConv(
            MLP([2 * input_size, 64, 64, 64], batch_norm=True), aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128], batch_norm=True), k,
                                     aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        if dropout:
            self.mlp = Seq(MLP([1024, 512], batch_norm=True), Dropout(0.5),
                           MLP([512, 256], batch_norm=True), Dropout(0.5),
                           Lin(256, n_classes))
        else:
            self.mlp = Seq(MLP([1024, 512]), MLP([512, 256]),
                           Lin(256, n_classes))
Ejemplo n.º 22
0
    def __init__(self, node_in, edge_in, leak):
        """
        Basic model for making edge predictions
        
        parameters:
            node_in - number of node features coming in
            edge_in - number of edge features coming in
            leak - leakiness of leakyrelus
        """
        super(EdgeModel, self).__init__()

        self.edge_pred_mlp = Seq(Lin(2 * node_in + edge_in, 64),
                                 LeakyReLU(leak), Lin(64, 32), LeakyReLU(leak),
                                 Lin(32, 16), LeakyReLU(leak), Lin(16, 8),
                                 LeakyReLU(leak), Lin(8, 2))
Ejemplo n.º 23
0
def test_edge_conv_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    nn = Seq(Lin(2 * in_channels, 32), ReLU(), Lin(32, out_channels))
    conv = EdgeConv(nn)
    assert conv.__repr__() == (
        'EdgeConv(nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
    def __init__(self, config):
        Module.__init__(self)
        self.mlp1_inc = config['n_inc'] + config['e_outc']
        self.mlp1_hs1 = config['node_model_mlp1_hidden_sizes'][0]
        self.mlp1_hs2 = config['node_model_mlp1_hidden_sizes'][1]
        self.mlp2_hs1 = config['node_model_mlp2_hidden_sizes'][0]

        self.dim_out = config['n_outc']
        self.g_inc = config['g_inc']
        self.node_mlp_1 = Seq(Linear(self.mlp1_inc, self.mlp1_hs1),
                              LayerNorm(self.mlp1_hs1), ReLU(),
                              Linear(self.mlp1_hs1, self.mlp1_hs2))

        self.mlp2_inc_uncond = config['n_inc'] + self.mlp1_hs2 + config['u_inc']
        self.mlp2_inc_cond = self.mlp2_inc_uncond + self.mlp1_hs2
Ejemplo n.º 25
0
def test_dense_gin_conv_with_broadcasting():
    batch_size, num_nodes, channels = 8, 3, 16
    nn = Seq(Lin(channels, channels), ReLU(), Lin(channels, channels))
    conv = DenseGINConv(nn)

    x = torch.randn(batch_size, num_nodes, channels)
    adj = torch.Tensor([
        [0, 1, 1],
        [1, 0, 1],
        [1, 1, 0],
    ])

    assert conv(x, adj).size() == (batch_size, num_nodes, channels)
    mask = torch.tensor([1, 1, 1], dtype=torch.bool)
    assert conv(x, adj, mask).size() == (batch_size, num_nodes, channels)
Ejemplo n.º 26
0
    def __init__(self, out_channels=40, k=20, BiLinear=BiLinear, pool='max'):
        super().__init__()

        if pool == 'mean':
            self.pool = 'mean'
            self.ema_max = False
        elif pool == 'max':
            self.pool = 'max'
            self.ema_max = False
        elif pool == 'ema-max':
            self.pool = 'max'
            self.ema_max = True
        self.conv1 = DynamicEdgeConv(
            Seq(Lin(2 * 3, 64),
                BiMLP([64, 64, 64], activation=ReLU, BiLinear=BiLinear)), k,
            self.pool)
        self.conv2 = DynamicEdgeConv(
            BiMLP([2 * 64, 128], activation=ReLU, BiLinear=BiLinear), k,
            self.pool)
        self.lin1 = BiMLP([128 + 64, 1024], activation=ReLU, BiLinear=BiLinear)

        self.mlp = Seq(BiMLP([1024, 512], activation=ReLU, BiLinear=BiLinear),
                       BiMLP([512, 256], activation=ReLU, BiLinear=BiLinear),
                       Lin(256, out_channels))
Ejemplo n.º 27
0
    def __init__(self, k, emb_dims, out_channels=13):
        super().__init__()

        # Global GLEncoder
        self.global_encoder = GlobalFeat(k=k, emb_dims=emb_dims)

        # Classification Head
        self.cls_mlp = Seq(
            self.mlp([emb_dims, 512]),
            nn.Dropout(0.5),
            self.mlp([512, 256]),
            nn.Dropout(0.5),
            Lin(256, out_channels))

        self._initialize_weights()
Ejemplo n.º 28
0
 def __init__(self,
              input_size,
              embedding_size,
              n_classes,
              dropout=True,
              k=5,
              aggr='max',
              pool_op='max',
              k_global=25):
     super(DECSeqGlob, self).__init__()
     self.k_global = k_global
     self.conv1 = EdgeConv(MLP([2 * 3, 64, 64, 64]), aggr)
     self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128]), k, aggr)
     self.lin1 = MLP([128 + 64, 1024])
     if pool_op == 'max':
         self.pool = global_max_pool
     if dropout:
         self.mlp = Seq(MLP([1024, 512]), Dropout(0.5), MLP([512, 256]),
                        Dropout(0.5), MLP([256, 32]))
     else:
         self.mlp = Seq(MLP([1024, 512]), MLP([512, 256]), MLP([256, 32]))
     self.lin = Lin(256, n_classes)
     # self.conv_glob = EdgeConv(MLP([2 * 32, 32]), aggr)
     self.conv_glob = GATConv(32, 32, heads=8, dropout=0.5, concat=True)
Ejemplo n.º 29
0
Archivo: models.py Proyecto: jdh4/gn
    def __init__(self, n_f, msg_dim, ndim, hidden=300, aggr='add'):
        super(GN, self).__init__(aggr=aggr)  # "Add" aggregation.
        self.msg_fnc = Seq(
            Lin(2 * n_f, hidden),
            ReLU(),
            Lin(hidden, hidden),
            ReLU(),
            Lin(hidden, hidden),
            ReLU(),
            ##(Can turn on or off this layer:)
            #             Lin(hidden, hidden),
            #             ReLU(),
            Lin(hidden, msg_dim))

        self.node_fnc = Seq(
            Lin(msg_dim + n_f, hidden),
            ReLU(),
            Lin(hidden, hidden),
            ReLU(),
            Lin(hidden, hidden),
            ReLU(),
            #             Lin(hidden, hidden),
            #             ReLU(),
            Lin(hidden, ndim))
Ejemplo n.º 30
0
    def __init__(self, cfg):
        super(EdgeMetaModel, self).__init__()

        if 'modules' in cfg:
            self.model_config = cfg['modules']['attention_gnn']
        else:
            self.model_config = cfg

        self.leak = self.model_config.get('leak', 0.1)

        self.node_in = self.model_config.get('node_feats', 16)
        self.edge_in = self.model_config.get('edge_feats', 10)

        self.aggr = self.model_config.get('aggr', 'add')

        self.bn_node = BatchNorm1d(self.node_in)
        self.bn_edge = BatchNorm1d(self.edge_in)

        self.num_mp = self.model_config.get('num_mp', 3)

        self.nn = torch.nn.ModuleList()
        self.en = torch.nn.ModuleList()
        self.layer = torch.nn.ModuleList()
        einput = self.edge_in
        eoutput = max(self.edge_in, 32)
        ninput = self.node_in
        noutput = max(2 * self.node_in, 32)
        for i in range(self.num_mp):
            self.en.append(
                MetaLayer(BilinEdgeModel(ninput, einput, eoutput, self.leak)))
            self.nn.append(
                Seq(Lin(eoutput, ninput), LeakyReLU(self.leak),
                    Lin(ninput, ninput * noutput), LeakyReLU(self.leak)))
            self.layer.append(
                NNConv(ninput, noutput, self.nn[i], aggr=self.aggr))
            ninput = noutput
            einput = eoutput

        # final prediction layer
        pred_cfg = self.model_config.get('pred_model', 'basic')
        if pred_cfg == 'basic':
            self.edge_predictor = MetaLayer(
                EdgePredModel(noutput, eoutput, self.leak))
        elif pred_cfg == 'bilin':
            self.edge_predictor = MetaLayer(
                BilinEdgePredModel(noutput, eoutput, self.leak))
        else:
            raise Exception('unrecognized prediction model: ' + pred_cfg)