Пример #1
0
    def __init__(self, input_dim=4, big_dim=32, hidden_dim=2, aggr='mean'):
        super(EdgeNetDeeper4, self).__init__()

        encoder_nn_1 = nn.Sequential(nn.Linear(2*(input_dim), big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, big_dim),
                                   nn.ReLU(),
        )
        encoder_nn_2 = nn.Sequential(nn.Linear(2*(big_dim), big_dim // 2),
                                   nn.ReLU(),
                                   nn.Linear(big_dim // 2, big_dim // 2),
                                   nn.ReLU(),
                                   nn.Linear(big_dim // 2, hidden_dim),
                                   nn.ReLU(),
        )
        decoder_nn_1 = nn.Sequential(nn.Linear(2*(hidden_dim), big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, input_dim)
        )

        self.batchnorm = nn.BatchNorm1d(input_dim)

        self.encoder_1 = EdgeConv(nn=encoder_nn_1,aggr=aggr)
        self.encoder_2 = EdgeConv(nn=encoder_nn_2,aggr=aggr)
        self.decoder_1 = EdgeConv(nn=decoder_nn_1,aggr=aggr)
Пример #2
0
    def __init__(self, input_dim=4, big_dim=32, hidden_dim=2, aggr='mean'):
        super(EdgeNetEmbed, self).__init__()
        self.embed_nn = nn.Sequential(nn.Linear(input_dim, big_dim),
                                      nn.ReLU(),
                                      nn.Linear(big_dim, big_dim),
                                      nn.ReLU(),
                                      nn.Linear(big_dim, big_dim),
                                      nn.ReLU()
        )                                
        encoder_nn = nn.Sequential(nn.Linear(2*(big_dim), big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, hidden_dim),
                                   nn.ReLU(),
        )        
        decoder_nn = nn.Sequential(nn.Linear(2*(hidden_dim), big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, big_dim),
                                   nn.ReLU(),
                                   nn.Linear(big_dim, big_dim),
                                   nn.ReLU()
                                   
        )
        self.deembed_nn = nn.Sequential(nn.Linear(big_dim, big_dim),
                                        nn.ReLU(),
                                        nn.Linear(big_dim, big_dim),
                                        nn.ReLU(),
                                        nn.Linear(big_dim, input_dim)
        )                                
        
        self.batchnorm = nn.BatchNorm1d(input_dim)

        self.encoder = EdgeConv(nn=encoder_nn,aggr=aggr)
        self.decoder = EdgeConv(nn=decoder_nn,aggr=aggr)
Пример #3
0
    def __init__(self,
                 input_dim=4,
                 big_dim=32,
                 hidden_dim=2,
                 aggr='mean',
                 emd_modname='EmdNNRel.best.pth'):
        super(EdgeNetEMD, self).__init__()
        encoder_nn = nn.Sequential(
            nn.Linear(2 * (input_dim), big_dim),
            nn.ReLU(),
            nn.Linear(big_dim, big_dim),
            nn.ReLU(),
            nn.Linear(big_dim, hidden_dim),
            nn.ReLU(),
        )

        decoder_nn = nn.Sequential(nn.Linear(2 * (hidden_dim), big_dim),
                                   nn.ReLU(), nn.Linear(big_dim, big_dim),
                                   nn.ReLU(), nn.Linear(big_dim, input_dim))

        self.batchnorm = nn.BatchNorm1d(input_dim)

        self.encoder = EdgeConv(nn=encoder_nn, aggr=aggr)
        self.decoder = EdgeConv(nn=decoder_nn, aggr=aggr)

        emd_model = load_emd_model(
            emd_modname, device='cuda' if torch.cuda.is_available() else 'cpu')
        self.emd_model = emd_model.requires_grad_(False)
Пример #4
0
    def __init__(self, out_channels, aggr='sum'):
        super(Net, self).__init__()

        self.conv1 = EdgeConv(MLP([2 * 4, 64, 64]), aggr)
        self.conv2 = EdgeConv(MLP([2 * 64, 64, 64]), aggr)
        self.conv3 = EdgeConv(MLP([2 * 64, 64, 64]), aggr)
        self.lin1 = MLP([3 * 64, 1024])

        self.mlp = Seq(MLP([1024, 256]), Dropout(0.5), MLP([256, 128]),
                       Dropout(0.5), Lin(128, out_channels))
Пример #5
0
    def __init__(self, input_size, output_size = 1,x_col = 0,y_col = 1,z_col = 2, mode = 'custom', k = 4, c=3, device = 'cuda' ):
        ####
        # INPUTS:
        # input_size    : INTEGER - dimension of input tensor
        # output_size   : INTEGER - dimension of output tensor
        # x             : INTEGER - column index in input tensor for x-coordinate of DOM position. DEFAULT - 0
        # y             : INTEGER - column index in input tensor for y-coordinate of DOM position. DEFAULT - 1
        # z             : INTEGER - column index in input tensor for z-coordinate of DOM position. DEFAULT - 2
        # k             : INTEGER - number of neighbours. DEFAULT - 8
        # device        : STRING  - the device ID on which the model is run. DEFAULT - 'cuda'
        # c             : INTEGER - the dimension factor. DEFAULT - 3
        # target        : STRING  - specifies which version of dynedge to run. ['energy', 'angle', 'classifcation']
        #                  target = energy         : Regresses energy_log10. Use in conjuction with 'logcosh' loss function.
        #                  target = angle          : Regresses either zenith or azimuth. Use in conjunction with 'vonMisesSineCosineLoss
        #                  target = pid            : Use in conjuction with torch.loss.CrossEntropyLoss
        #                 
                                                                                   
        super(dynedge, self).__init__()
        self.k = k
        self.mode = mode
        self.device = device
        self.pos_idx = [x_col,y_col,z_col]
        
        
        l1, l2, l3, l4, l5,l6,l7 = input_size,c*16*2,c*32*2,c*42*2,c*32*2,c*16*2,output_size
        
        if mode == 'angle':
            output_size = 3            # VonMisesSineCosineLoss requires three dimensionsional output
        if mode == 'energy':
            output_size = 1            # logcosh requires one-dimensional output
        if mode == 'pid':
            output_size = 2            # CrossEntropyLoss requires two-dimensional output
        
        self.nn_conv1 = torch.nn.Sequential(torch.nn.Linear(l1*2,l2),torch.nn.LeakyReLU(),torch.nn.Linear(l2,l3),torch.nn.LeakyReLU()).to(device)

        self.conv_add = EdgeConv(self.nn_conv1,aggr = 'add')

        self.nn_conv2 = torch.nn.Sequential(torch.nn.Linear(l3*2,l4),torch.nn.LeakyReLU(),torch.nn.Linear(l4,l3),torch.nn.LeakyReLU()).to(device)

        self.conv_add2 = EdgeConv(self.nn_conv2,aggr = 'add')

        self.nn_conv3 = torch.nn.Sequential(torch.nn.Linear(l3*2,l4),torch.nn.LeakyReLU(),torch.nn.Linear(l4,l3),torch.nn.LeakyReLU()).to(device)

        self.conv_add3 = EdgeConv(self.nn_conv3,aggr = 'add')

        self.nn_conv4 = torch.nn.Sequential(torch.nn.Linear(l3*2,l4),torch.nn.LeakyReLU(),torch.nn.Linear(l4,l3),torch.nn.LeakyReLU()).to(device)

        self.conv_add4 = EdgeConv(self.nn_conv4,aggr = 'add')

        self.nn1 = torch.nn.Linear(l3*4 + l1,l4)                                               
        self.nn2   = torch.nn.Linear(l4,l5)
        self.nn3 =  torch.nn.Linear(4*l5,l6)
        self.nn4 = torch.nn.Linear(l6,l7)
        self.relu = torch.nn.LeakyReLU()
        self.tanh = torch.nn.Tanh()
Пример #6
0
    def __init__(self, input_size, embedding_size, n_classes, aggr='max', k=5, pool_op='max', same_size=False):
        super(ECnet, self).__init__()
        self.conv1 = EdgeConv(MLP([2 * 3, 64, 64, 64]), aggr)
        self.conv2 = EdgeConv(MLP([2 * 64, 128]), aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        self.mlp = Seq(
            MLP([1024, 512]), Dropout(0.5), MLP([512, 256]), Dropout(0.5),
            Lin(256, n_classes))
Пример #7
0
    def __init__(self, cfg):
        super(NodeEConvModel, self).__init__()

        if 'modules' in cfg:
            self.model_config = cfg['modules']['node_econv']
        else:
            self.model_config = cfg

        self.node_in = self.model_config.get('node_feats', 16)
        self.edge_in = self.model_config.get('edge_feats', 10)

        # first layer increases number of features from 4 to 16
        self.econv_mlp1 = Seq(Lin(2 * self.node_in, 32), LeakyReLU(0.1),
                              Lin(32, 16), LeakyReLU(0.1))
        self.econv1 = EdgeConv(self.econv_mlp1, aggr='max')

        # second layer increases number of features from 16 to 32
        self.econv_mlp2 = Seq(Lin(32, 64), LeakyReLU(0.1), Lin(64, 32),
                              LeakyReLU(0.1))
        self.econv2 = EdgeConv(self.econv_mlp2, aggr='max')

        # third layer increases number of features from 32 to 64
        self.econv_mlp3 = Seq(Lin(64, 128), LeakyReLU(0.1), Lin(128, 64),
                              LeakyReLU(0.1))
        self.econv3 = EdgeConv(self.econv_mlp3, aggr='max')

        # final prediction layer
        class EdgeModel(torch.nn.Module):
            def __init__(self):
                super(EdgeModel, self).__init__()

                self.edge_mlp = Seq(Lin(128, 64), LeakyReLU(0.12), Lin(64, 16))

            def forward(self, src, dest, edge_attr, u, batch):
                return self.edge_mlp(torch.cat([src, dest], dim=1))

        class NodeModel(torch.nn.Module):
            def __init__(self):
                super(NodeModel, self).__init__()

                self.node_mlp_1 = Seq(Lin(80, 64), LeakyReLU(0.12),
                                      Lin(64, 32))
                self.node_mlp_2 = Seq(Lin(32, 16), LeakyReLU(0.12), Lin(16, 2))
                #self.node_mlp = Seq(Lin(64, 32), LeakyReLU(0.12), Lin(32, 16), LeakyReLU(0.12), Lin(32, 2))

            def forward(self, x, edge_index, edge_attr, u, batch):
                row, col = edge_index
                out = torch.cat([x[col], edge_attr], dim=1)
                out = self.node_mlp_1(out)
                out = scatter_mean(out, row, dim=0, dim_size=x.size(0))
                return self.node_mlp_2(out)

        self.predictor = MetaLayer(EdgeModel(), NodeModel())
Пример #8
0
    def __init__(self, cfg):
        super(EdgeConvModel, self).__init__()

        if 'modules' in cfg:
            self.model_config = cfg['modules']['attention_gnn']
        else:
            self.model_config = cfg

        self.aggr = self.model_config.get('aggr', 'max')
        self.leak = self.model_config.get('leak', 0.1)

        self.node_in = self.model_config.get('node_feats', 16)
        self.edge_in = self.model_config.get('edge_feats', 10)

        # perform batch normalization
        self.bn_node = BatchNorm1d(self.node_in)
        self.bn_edge = BatchNorm1d(self.edge_in)

        # go from 16 to 24 node features
        ninput = self.node_in
        noutput = 24
        self.nn0 = Seq(Lin(2 * ninput, 2 * noutput), LeakyReLU(self.leak),
                       Lin(2 * noutput, noutput), LeakyReLU(self.leak),
                       Lin(noutput, noutput))
        self.layer0 = EdgeConv(self.nn0, aggr=self.aggr)

        # go from 24 to 32 node features
        ninput = 24
        noutput = 32
        self.nn1 = Seq(Lin(2 * ninput, 2 * noutput), LeakyReLU(self.leak),
                       Lin(2 * noutput, noutput), LeakyReLU(self.leak),
                       Lin(noutput, noutput))
        self.layer1 = EdgeConv(self.nn1, aggr=self.aggr)

        # go from 32 to 64 node features
        ninput = 32
        noutput = 64
        self.nn2 = Seq(Lin(2 * ninput, 2 * noutput), LeakyReLU(self.leak),
                       Lin(2 * noutput, noutput), LeakyReLU(self.leak),
                       Lin(noutput, noutput))
        self.layer2 = EdgeConv(self.nn2, aggr=self.aggr)

        # final prediction layer
        pred_cfg = self.model_config.get('pred_model', 'basic')
        if pred_cfg == 'basic':
            self.edge_predictor = MetaLayer(
                EdgeModel(noutput, self.edge_in, self.leak))
        elif pred_cfg == 'bilin':
            self.edge_predictor = MetaLayer(
                BilinEdgeModel(noutput, self.edge_in, self.leak))
        else:
            raise Exception('unrecognized prediction model: ' + pred_cfg)
    def __init__(self,
                 input_dim=3,
                 hidden_dim=8,
                 output_dim=4,
                 n_iters=1,
                 aggr='add',
                 norm=torch.tensor(
                     [1. / 500., 1. / 500., 1. / 54., 1 / 25., 1. / 1000.])):
        super(EdgeNetWithCategoriesJittable, self).__init__()

        self.datanorm = nn.Parameter(norm)

        start_width = 2 * (hidden_dim + input_dim)
        middle_width = (3 * hidden_dim + 2 * input_dim) // 2

        self.n_iters = n_iters

        self.inputnet = nn.Sequential(
            nn.Linear(input_dim, 2 * hidden_dim),
            nn.Tanh(),
            nn.Linear(2 * hidden_dim, 2 * hidden_dim),
            nn.Tanh(),
            nn.Linear(2 * hidden_dim, hidden_dim),
            nn.Tanh(),
        )

        self.edgenetwork = nn.Sequential(
            nn.Linear(2 * n_iters * hidden_dim, 2 * hidden_dim),
            nn.ELU(),
            nn.Linear(2 * hidden_dim, 2 * hidden_dim),
            nn.ELU(),
            nn.Linear(2 * hidden_dim, output_dim),
            nn.LogSoftmax(dim=-1),
        )

        convnn = nn.Sequential(
            nn.Linear(start_width, middle_width),
            nn.ELU(),
            #nn.Dropout(p=0.5, inplace=False),
            nn.Linear(middle_width, hidden_dim),
            nn.ELU())
        self.firstnodenetwork = EdgeConv(nn=convnn, aggr=aggr).jittable()
        self.nodenetwork = nn.ModuleList()
        for i in range(n_iters - 1):
            convnn = nn.Sequential(
                nn.Linear(start_width, middle_width),
                nn.ELU(),
                #nn.Dropout(p=0.5, inplace=False),
                nn.Linear(middle_width, hidden_dim),
                nn.ELU())
            self.nodenetwork.append(EdgeConv(nn=convnn, aggr=aggr).jittable())
Пример #10
0
    def __init__(self,
                 num_hits,
                 node_feat_size,
                 num_classes=5,
                 device=torch.device('cpu')):
        super(ParticleNet, self).__init__()
        self.num_hits = num_hits
        self.node_feat_size = node_feat_size
        self.num_classes = num_classes
        self.device = device

        self.k = 16
        self.num_edge_convs = 3
        self.kernel_sizes = [64, 128, 256]
        self.fc_size = 256
        self.dropout = 0.1

        self.edge_nets = nn.ModuleList()
        self.edge_convs = nn.ModuleList()

        self.kernel_sizes.insert(0, self.node_feat_size)
        self.output_sizes = np.cumsum(self.kernel_sizes)

        self.edge_nets.append(
            ParticleNetEdgeNet(self.node_feat_size, self.kernel_sizes[1]))
        self.edge_convs.append(EdgeConv(self.edge_nets[-1], aggr='mean'))

        for i in range(1, self.num_edge_convs):
            self.edge_nets.append(
                ParticleNetEdgeNet(self.output_sizes[i], self.kernel_sizes[
                    i + 1]))  # adding kernel sizes because of skip connections
            self.edge_convs.append(EdgeConv(self.edge_nets[-1], aggr='mean'))

        self.fc1 = nn.Sequential(nn.Linear(self.output_sizes[-1],
                                           self.fc_size))

        self.dropout_layer = nn.Dropout(p=self.dropout)

        self.fc2 = nn.Linear(self.fc_size, self.num_classes)

        # logging.info("edge nets: ")
        # logging.info(self.edge_nets)

        logging.info("edge_convs: ")
        logging.info(self.edge_convs)

        logging.info("fc1: ")
        logging.info(self.fc1)

        logging.info("fc2: ")
        logging.info(self.fc2)
Пример #11
0
    def __init__(
        self,
        input_coord_dim=2,
        input_features_dim=5,
        output_dim=2,
        aggr='mean',
    ):
        super(ParticleNet, self).__init__()
        self.input_features_dim = input_features_dim
        self.input_coord_dim = input_coord_dim
        self.output_dim = output_dim
        self.k = 4
        self.aggr = aggr

        # convnn1 = nn.Sequential(
        #     nn.Linear(self.input_features_dim, 32),
        #     nn.ELU(),
        #     nn.Linear(32, 64),
        #     nn.ELU(),
        #     )
        convnn1 = nn.Sequential(
            nn.Linear(64, 32),
            nn.ELU(),
            nn.Linear(32, 5),
            nn.ELU(),
        )
        self.edgeconv1 = EdgeConv(nn=convnn1, aggr=aggr)

        convnn2 = nn.Sequential(
            nn.Linear(64, (64 + 128) // 2),
            nn.ELU(),
            nn.Linear((64 + 128) // 2, 128),
            nn.ELU(),
        )
        self.edgeconv2 = EdgeConv(nn=convnn2, aggr=aggr)

        convnn3 = nn.Sequential(
            nn.Linear(128, (128 + 256) // 2),
            nn.ELU(),
            nn.Linear((128 + 256) // 2, 256),
            nn.ELU(),
        )
        self.edgeconv3 = EdgeConv(nn=convnn3, aggr=aggr)

        self.ec_output_dim = 5 + 64 + 128 + 256  # Include all the shortcuts
        self.output = nn.Sequential(
            nn.Linear(self.ec_output_dim, self.ec_output_dim), nn.ELU(),
            nn.Linear(self.ec_output_dim, self.ec_output_dim // 2), nn.ELU(),
            nn.Linear(self.ec_output_dim // 2, self.output_dim))
Пример #12
0
    def __init__(self, num_classes):
        super(Net, self).__init__()

        nn = Seq(Lin(6, 64), ReLU(), Lin(64, 64), ReLU(), Lin(64, 64), ReLU())
        self.conv1 = EdgeConv(nn, aggr='max')

        nn = Seq(Lin(128, 128), ReLU(), Lin(128, 128), ReLU(), Lin(128, 256),
                 ReLU())
        self.conv2 = EdgeConv(nn, aggr='max')

        self.lin0 = Lin(256, 512)

        self.lin1 = Lin(512, 256)
        self.lin2 = Lin(256, 256)
        self.lin3 = Lin(256, num_classes)
Пример #13
0
def test_edge_conv_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))

    nn = Seq(Lin(2 * in_channels, 32), ReLU(), Lin(32, out_channels))
    conv = EdgeConv(nn)
    assert conv.__repr__() == (
        'EdgeConv(nn=Sequential(\n'
        '  (0): Linear(in_features=32, out_features=32, bias=True)\n'
        '  (1): ReLU()\n'
        '  (2): Linear(in_features=32, out_features=32, bias=True)\n'
        '))')
    assert conv(x, edge_index).size() == (num_nodes, out_channels)
Пример #14
0
    def __init__(self,
                 input_size,
                 embedding_size,
                 n_classes,
                 dropout=True,
                 k=5,
                 aggr='max',
                 pool_op='max'):
        super(DECSeq, self).__init__()
        # self.bn0 = BN(input_size)
        # self.bn1 = BN(64)
        # self.bn2 = BN(128)
        self.conv1 = EdgeConv(MLP([2 * 3, 64, 64, 64], batch_norm=True), aggr)
        self.conv2 = DynamicEdgeConv(MLP([2 * 64, 128], batch_norm=True), k,
                                     aggr)
        self.lin1 = MLP([128 + 64, 1024])
        if pool_op == 'max':
            self.pool = global_max_pool

        if dropout:
            self.mlp = Seq(MLP([1024, 512], batch_norm=True), Dropout(0.5),
                           MLP([512, 256], batch_norm=True), Dropout(0.5),
                           Lin(256, n_classes))
        else:
            self.mlp = Seq(MLP([1024, 512]), MLP([512, 256]),
                           Lin(256, n_classes))
Пример #15
0
    def __init__(self, node_size, input_feature, num_classes):
        super(NetGAT, self).__init__()
        self.node_per_graph = node_size

        hidden_size = 256
        gat_head = 8
        head_size = hidden_size // gat_head
        self.input_feature = input_feature

        # self.linprev = MLP([input_feature, 64, 64, 64])
        self.linprev = EdgeConv(MLP([input_feature * 2, 64, 64, 64]),
                                aggr='max')

        self.conv1 = GATConv(64, head_size, gat_head)
        self.bn1 = torch.nn.BatchNorm1d(hidden_size)
        self.lin1 = torch.nn.Linear(64, hidden_size)

        self.conv2 = GATConv(hidden_size, head_size, gat_head)
        self.bn2 = torch.nn.BatchNorm1d(hidden_size)
        self.lin2 = torch.nn.Linear(hidden_size, hidden_size)

        self.conv3 = GATConv(hidden_size, head_size, gat_head)
        self.bn3 = torch.nn.BatchNorm1d(hidden_size)
        self.lin3 = torch.nn.Linear(hidden_size, hidden_size)

        self.conv4 = GATConv(hidden_size, head_size, gat_head)
        self.bn4 = torch.nn.BatchNorm1d(hidden_size)
        self.lin4 = torch.nn.Linear(hidden_size, hidden_size)

        self.mlp = Seq(Lin(2048, 512), Dropout(0.4), Lin(512, 256),
                       Dropout(0.4), Lin(256, num_classes))
Пример #16
0
    def __init__(self,
                 input_dim,
                 hidden_dim,
                 edge_dim=1,
                 output_dim=10,
                 num_layers_emulsion=3,
                 num_layers_edge_conv=3,
                 bias_init=0., **kwargs):
        super().__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        previous_dim = input_dim
        self._layers = nn.ModuleList()
        for i in range(num_layers_emulsion):
            self._layers.append(
                nn.Sequential(nn.Linear(previous_dim, self.hidden_dim), nn.ReLU())
            )
            self._layers.append(
                EmulsionConv(self.hidden_dim, self.hidden_dim, edge_dim=edge_dim)
            )
            previous_dim = self.hidden_dim

        for i in range(num_layers_edge_conv):
            if num_layers_emulsion == 0 and i == 0:
                self._layers.append(
                    nn.Sequential(nn.Linear(previous_dim, self.hidden_dim), nn.ReLU())
                )
            self._layers.append(
                EdgeConv(Sequential(nn.Linear(2 * self.hidden_dim, self.hidden_dim), nn.ReLU()), 'max')
            )

        self.output = nn.Linear(self.hidden_dim, output_dim)
        init_bias_model(self, b=0.)
 def make_edge_conv_layers_(self):
     layers = []
     dims = self.edge_conv_dims
     for i in range(len(dims) - 1):
         mlp_dims = [dims[i] * 2, dims[i + 1]]
         layers.append(EdgeConv(nn=MLP(mlp_dims), aggr=self.conv_aggr))
     return nn.Sequential(*layers)
Пример #18
0
    def __init__(self, input_dim=5, hidden_dim=64, output_dim=1, k=16, aggr='add',
                 norm=torch.tensor([1./500., 1./500., 1./54., 1/25., 1./1000.])):
        super(DynamicReductionNetworkJittable, self).__init__()

        self.datanorm = nn.Parameter(norm)
        
        self.k = k
        start_width = 2 * hidden_dim
        middle_width = 3 * hidden_dim // 2

        
        self.inputnet =  nn.Sequential(
            nn.Linear(input_dim, hidden_dim*2),            
            nn.ELU(),
            nn.Linear(hidden_dim*2, hidden_dim*2),
            nn.ELU(),
            nn.Linear(hidden_dim*2, hidden_dim),
            nn.ELU(),
        )
                
        convnn1 = nn.Sequential(nn.Linear(start_width, middle_width),
                                nn.ELU(),
                                nn.Linear(middle_width, hidden_dim),                                             
                                nn.ELU()
                                )
        convnn2 = nn.Sequential(nn.Linear(start_width, middle_width),
                                nn.ELU(),
                                nn.Linear(middle_width, hidden_dim),                                             
                                nn.ELU()
                                )
        
        convnn3 = nn.Sequential(nn.Linear(start_width, middle_width),
                                nn.ELU(),
                                nn.Linear(middle_width, hidden_dim),                                             
                                nn.ELU()
                                )
                
        self.edgeconv1 = EdgeConv(nn=convnn1, aggr=aggr).jittable()
        self.edgeconv2 = EdgeConv(nn=convnn2, aggr=aggr).jittable()
        self.edgeconv3 = EdgeConv(nn=convnn3, aggr=aggr).jittable()
        
        self.output = nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
                                    nn.ELU(),
                                    nn.Linear(hidden_dim, hidden_dim//2),
                                    nn.ELU(),
                                    nn.Linear(hidden_dim//2, output_dim)
                                   )
Пример #19
0
    def __init__(self, cfg):
        super(NodeEConvModel, self).__init__()

        # first layer increases number of features from 4 to 16
        #self.econv_mlp1 = Seq(Lin(32,32), LeakyReLU(0.1), Lin(32,16), LeakyReLU(0.1))
        self.econv_mlp1 = Seq(Lin(8, 32), LeakyReLU(0.1), Lin(32, 16),
                              LeakyReLU(0.1))
        self.econv1 = EdgeConv(self.econv_mlp1, aggr='max')

        # second layer increases number of features from 16 to 32
        self.econv_mlp2 = Seq(Lin(32, 64), LeakyReLU(0.1), Lin(64, 32),
                              LeakyReLU(0.1))
        self.econv2 = EdgeConv(self.econv_mlp2, aggr='max')

        # third layer increases number of features from 32 to 64
        self.econv_mlp3 = Seq(Lin(64, 128), LeakyReLU(0.1), Lin(128, 64),
                              LeakyReLU(0.1))
        self.econv3 = EdgeConv(self.econv_mlp3, aggr='max')

        # final prediction layer
        self.edge_mlp = Seq(Lin(128, 64), LeakyReLU(0.12), Lin(64, 16))
        self.node_mlp_1 = Seq(Lin(80, 64), LeakyReLU(0.12), Lin(64, 32))
        self.node_mlp_2 = Seq(Lin(32, 16), LeakyReLU(0.12), Lin(16, 2))

        #self.node_mlp = Seq(Lin(64, 32), LeakyReLU(0.12), Lin(32, 16), LeakyReLU(0.12), Lin(32, 2))

        def edge_model(src, target, edge_attr, u, batch):
            # source, target: [E, F_x], where E is the number of edges.
            # edge_attr: [E, F_e]
            # u: [B, F_u], where B is the number of graphs.
            # batch: [E] with max entry B - 1.
            out = torch.cat([src, target], 1)
            return self.edge_mlp(out)

        def node_model(x, edge_index, edge_attr, u, batch):
            # x: [N, F_x], where N is the number of nodes.
            # edge_index: [2, E] with max entry N - 1.
            # edge_attr: [E, F_e]
            # u: [B, F_u]
            # batch: [N] with max entry B - 1.
            row, col = edge_index
            out = torch.cat([x[col], edge_attr], dim=1)
            out = self.node_mlp_1(out)
            out = scatter_mean(out, row, dim=0, dim_size=x.size(0))
            return self.node_mlp_2(out)

        self.predictor = MetaLayer(edge_model, node_model, None)
Пример #20
0
 def build_block(self, in_channels, out_channels, hiddens):
     mlp = nn.Sequential(
         nn.Linear(2 * in_channels, hiddens),
         nn.ReLU(),
         nn.Linear(hiddens, out_channels),
     )
     conv = EdgeConv(nn=mlp, aggr=self.aggr)
     return conv
    def __init__(self, num_classes):
        super(Net, self).__init__()

        n1 = Seq(Lin(6, 64), nn.BatchNorm1d(64), ReLU(), Lin(64, 64),
                 nn.BatchNorm1d(64), ReLU(), Lin(64, 64), nn.BatchNorm1d(64),
                 ReLU())
        self.conv1 = EdgeConv(n1, aggr='max')

        n2 = Seq(Lin(128, 128), nn.BatchNorm1d(128), ReLU(), Lin(128, 128),
                 nn.BatchNorm1d(128), ReLU(), Lin(128, 256),
                 nn.BatchNorm1d(256), ReLU())
        self.conv2 = EdgeConv(n2, aggr='max')

        self.lin0 = Lin(256, 512)

        self.lin1 = Lin(832, 256)
        self.lin2 = Lin(256, 256)
Пример #22
0
 def __init__(self):
     super(Net, self).__init__()
     #self.conv1 = GINConv(dataset.num_node_features, dataset.num_classes)
     #self.conv2 = SAGEConv(16, dataset.num_classes)
     nn1 = Sequential(Linear(2 * (dataset.num_node_features), 32),
                      ReLU(), Linear(32, dataset.num_classes))
     #nn1 = Sequential(Linear(dataset.num_node_features, dataset.num_classes))
     self.conv1 = EdgeConv(nn1)
Пример #23
0
 def make_edge_conv_layers_(self):
     """Define structure of the EdgeConv Blocks
     edge_conv_dims: [[convi_mlp_dims]], e.g., [[3, 64], [64, 128]]
     """
     layers = []
     for dims in self.edge_conv_dims:
         mlp_dims = [dims[0] * 2] + dims[1::]
         layers.append(EdgeConv(nn=MLP(mlp_dims), aggr=self.conv_aggr))
     return nn.Sequential(*layers)
Пример #24
0
    def __init__(self,
                 n_features,
                 n_labels,
                 classification=False,
                 width=64,
                 conv_depth=7,
                 point_depth=1,
                 lin_depth=7,
                 aggr='max'):
        super(EnsembleNet, self).__init__()
        self.classification = classification
        self.n_features = n_features
        self.n_labels = n_labels
        self.lin_depth = lin_depth
        self.conv_depth = conv_depth
        self.width = width
        self.point_depth = point_depth
        self.aggr = aggr
        n_intermediate = self.width

        self.conv1 = TAGConv(self.n_features, n_intermediate, 2)
        self.convfkt = torch.nn.ModuleList([
            TAGConv(n_intermediate, n_intermediate, 2)
            for i in range(self.conv_depth - 1)
        ])

        self.point1 = EdgeConv(
            LNN([2 * n_features, n_intermediate, n_intermediate]), self.aggr)
        self.pointfkt = torch.nn.ModuleList([
            EdgeConv(LNN([2 * n_intermediate, n_intermediate]), self.aggr)
            for i in range(self.point_depth - 1)
        ])

        n_intermediate2 = 2 * self.conv_depth * n_intermediate + 2 * self.point_depth * n_intermediate
        self.dim2 = n_intermediate2
        self.batchnorm1 = BatchNorm1d(n_intermediate2)
        self.linearfkt = torch.nn.ModuleList([
            torch.nn.Linear(n_intermediate2, n_intermediate2)
            for i in range(self.lin_depth)
        ])
        self.drop = torch.nn.ModuleList(
            [torch.nn.Dropout(.3) for i in range(self.lin_depth)])
        self.out = torch.nn.Linear(n_intermediate2, self.n_labels)
        self.out2 = torch.nn.Linear(self.n_labels, self.n_labels)
Пример #25
0
    def __init__(self,
                 input_dim=4,
                 hidden_dim=64,
                 output_dim=2,
                 k=16,
                 aggr='add',
                 norm=torch.tensor(
                     [1. / 1000., 1. / 10., 1. / 3.15, 1 / 3000.])):
        super(DynamicReductionNetwork, self).__init__()

        self.datanorm = nn.Parameter(norm)

        self.k = k
        start_width = 2 * hidden_dim
        middle_width = 3 * hidden_dim // 2

        self.inputnet = nn.Sequential(nn.BatchNorm1d(input_dim),
                                      nn.Linear(input_dim, hidden_dim // 2),
                                      nn.ELU(),
                                      nn.Linear(hidden_dim // 2, hidden_dim),
                                      nn.ELU(),
                                      nn.Linear(hidden_dim, hidden_dim),
                                      nn.ELU())

        convnn1 = nn.Sequential(
            nn.Linear(start_width, middle_width),
            nn.ELU(),
            nn.Linear(middle_width, hidden_dim),
            nn.ELU(),
        )
        convnn2 = nn.Sequential(
            nn.Linear(start_width, middle_width),
            nn.ELU(),
            nn.Linear(middle_width, hidden_dim),
            nn.ELU(),
        )
        self.edgeconv1 = EdgeConv(nn=convnn1, aggr=aggr)
        self.edgeconv2 = EdgeConv(nn=convnn2, aggr=aggr)

        self.output = nn.Sequential(nn.Linear(hidden_dim, hidden_dim),
                                    nn.ELU(),
                                    nn.Linear(hidden_dim, hidden_dim // 2),
                                    nn.ELU(),
                                    nn.Linear(hidden_dim // 2, output_dim))
Пример #26
0
 def make_edge_conv_layers_(self):
     """Define structure of the EdgeConv Blocks
     edge_conv_dims: [[convi_mlp_dims]], e.g., [[3, 64], [64, 128]]
     """
     layers = []
     dims = self.edge_conv_dims
     for i in range(len(dims) - 1):
         mlp_dims = [dims[i] * 2, dims[i + 1]]
         layers.append(EdgeConv(nn=MLP(mlp_dims), aggr=self.conv_aggr))
     return nn.Sequential(*layers)
Пример #27
0
    def __init__(self):
        super(Net, self).__init__()

        self.eg1 = EdgeConv(
            Seq(Lin(dataset.num_features * 2, 32), ReLU(), Lin(32, 16), ReLU()), 'max')

        self.eg2 = EdgeConv(
            Seq(Lin(32, 32), ReLU(), Lin(32, 16), ReLU()), 'max')

        self.eg3 = EdgeConv(
            Seq(Lin(32, 32), ReLU(), Lin(32, 32), ReLU()), 'max')

        # self.eg4 = EdgeConv(
        #     Seq(Lin(32, 32), ReLU(), Lin(32, 16), ReLU()), 'max')
        #
        # self.eg5 = EdgeConv(Seq(Lin(32, 32), ReLU(), Lin(32, 32), ReLU()), 'max')

        self.lin1 = Lin(32, 16)
        self.lin2 = Lin(16, dataset.num_classes)
Пример #28
0
    def __init__(self, global_aggr='max', output_dim=3):
        super(BaseGNN, self).__init__()

        self.output_dim = output_dim

        self.encoder = nn.Sequential(
            nn.Conv2d(3, 16, 3, padding=1, bias=True),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),  # [8, 8]
            nn.Conv2d(16, 32, 3, padding=1, bias=True),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),  # [4, 4]
            nn.Conv2d(32, 64, 3, padding=1, bias=False),
            nn.GroupNorm(4, 64),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),  # [2, 2]
            nn.Conv2d(64, 128, 3, padding=1, bias=False),
            nn.GroupNorm(8, 128),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),  # [1, 1]
        )

        local_nn = nn.Sequential(
            nn.Linear((128 + 4) * 2, 128, bias=False),
            nn.GroupNorm(8, 128),
            nn.ReLU(),
            nn.Linear(128, 128, bias=False),
            nn.GroupNorm(8, 128),
            nn.ReLU(),
            nn.Linear(128, 128, bias=False),
            nn.GroupNorm(8, 128),
            nn.ReLU(),
        )
        self.gnn = EdgeConv(local_nn, aggr='max')

        self.encoder2 = nn.Sequential(
            nn.Linear(128, 128, bias=False),
            nn.GroupNorm(8, 128),
            nn.ReLU(),
            nn.Linear(128, 128, bias=False),
            nn.GroupNorm(8, 128),
            nn.ReLU(),
        )

        self.global_aggr = global_aggr

        self.fc = nn.Sequential(
            nn.Linear(128, 128, bias=True),
            nn.ReLU(),
            nn.Linear(128, 128, bias=True),
            nn.ReLU(),
            nn.Linear(128, output_dim),
        )

        self.reset_parameters()
Пример #29
0
    def __init__(self, in_features, out_features, k):
        super(DirectionalEdgeConv, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.k = k

        self.mlp = Seq(Linear(3+2*in_features, out_features*2),
                       ReLU(),
                       Linear(out_features*2, out_features))

        self.econv = EdgeConv(self.mlp, aggr='max')
Пример #30
0
 def build_block(self, in_channels, out_channels, hiddens, ratio=1.0):
     mlp = nn.Sequential(
         nn.Linear(2 * in_channels, hiddens),
         nn.ReLU(),
         nn.Linear(hiddens, out_channels),
     )
     conv = EdgeConv(nn=mlp, aggr=self.aggr)
     if self.ratio < 1.0:
         pool = SAGPooling(out_channels, ratio=ratio)
     else:
         pool = None
     return conv, pool