Ejemplo n.º 1
0
 def __init__(self, kernel_size):
     super(MoNet, self).__init__()
     self.conv1 = GMMConv(1, 32, dim=2, kernel_size=kernel_size)
     self.conv2 = GMMConv(32, 64, dim=2, kernel_size=kernel_size)
     self.conv3 = GMMConv(64, 64, dim=2, kernel_size=kernel_size)
     self.fc1 = torch.nn.Linear(64, 128)
     self.fc2 = torch.nn.Linear(128, 10)
Ejemplo n.º 2
0
    def __init__(self, net_params):
        super().__init__()
        self.name = 'MoNet'
        in_dim_node = net_params['in_dim']
        hidden_dim = net_params['hidden_dim']
        out_dim = net_params['out_dim']
        kernel = net_params['kernel']  # for MoNet
        dim = net_params['pseudo_dim_MoNet']  # for MoNet
        n_classes = net_params['n_classes']
        self.dropout = net_params['dropout']
        self.n_layers = net_params['L']
        self.readout = net_params['readout']
        self.batch_norm = net_params['batch_norm']
        self.residual = net_params['residual']
        self.device = net_params['device']
        self.n_classes = n_classes
        self.dim = dim
        # aggr_type = "sum"  # default for MoNet
        aggr_type = "mean"

        self.embedding_h = nn.Linear(in_dim_node,
                                     hidden_dim)  # node feat is an integer
        # self.embedding_e = nn.Linear(1, dim)  # edge feat is a float
        self.layers = nn.ModuleList()
        self.pseudo_proj = nn.ModuleList()
        self.batchnorm_h = nn.ModuleList()
        # Hidden layer
        for _ in range(self.n_layers - 1):
            self.layers.append(
                GMMConv(hidden_dim,
                        hidden_dim,
                        dim,
                        kernel,
                        separate_gaussians=False,
                        aggr=aggr_type,
                        root_weight=True,
                        bias=True))
            if self.batch_norm:
                self.batchnorm_h.append(nn.BatchNorm1d(hidden_dim))
            self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim),
                                                  nn.Tanh()))
        # Output layer
        self.layers.append(
            GMMConv(hidden_dim,
                    out_dim,
                    dim,
                    kernel,
                    separate_gaussians=False,
                    aggr=aggr_type,
                    root_weight=True,
                    bias=True))
        if self.batch_norm:
            self.batchnorm_h.append(nn.BatchNorm1d(out_dim))
        self.pseudo_proj.append(nn.Sequential(nn.Linear(2, dim), nn.Tanh()))

        # self.MLP_layer = MLPReadout(out_dim, n_classes)
        self.MLP_layer = nn.Linear(out_dim, n_classes, bias=True)
Ejemplo n.º 3
0
def test_gmm_conv(separate_gaussians):
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    row, col = edge_index
    value = torch.rand(row.size(0), 3)
    adj = SparseTensor(row=row, col=col, value=value, sparse_sizes=(4, 4))

    conv = GMMConv(8,
                   32,
                   dim=3,
                   kernel_size=25,
                   separate_gaussians=separate_gaussians)
    assert conv.__repr__() == 'GMMConv(8, 32, dim=3)'
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 32)
    assert torch.allclose(conv(x1, edge_index, value, size=(4, 4)), out)
    assert torch.allclose(conv(x1, adj.t()), out)

    if is_full_test():
        t = '(Tensor, Tensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x1, edge_index, value), out)
        assert torch.allclose(jit(x1, edge_index, value, size=(4, 4)), out)

        t = '(Tensor, SparseTensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit(x1, adj.t()), out)

    adj = adj.sparse_resize((4, 2))
    conv = GMMConv((8, 16),
                   32,
                   dim=3,
                   kernel_size=5,
                   separate_gaussians=separate_gaussians)
    assert conv.__repr__() == 'GMMConv((8, 16), 32, dim=3)'
    out1 = conv((x1, x2), edge_index, value)
    out2 = conv((x1, None), edge_index, value, (4, 2))
    assert out1.size() == (2, 32)
    assert out2.size() == (2, 32)
    assert torch.allclose(conv((x1, x2), edge_index, value, (4, 2)), out1)
    assert torch.allclose(conv((x1, x2), adj.t()), out1)
    assert torch.allclose(conv((x1, None), adj.t()), out2)

    if is_full_test():
        t = '(OptPairTensor, Tensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit((x1, x2), edge_index, value), out1)
        assert torch.allclose(jit((x1, x2), edge_index, value, size=(4, 2)),
                              out1)
        assert torch.allclose(jit((x1, None), edge_index, value, size=(4, 2)),
                              out2)

        t = '(OptPairTensor, SparseTensor, OptTensor, Size) -> Tensor'
        jit = torch.jit.script(conv.jittable(t))
        assert torch.allclose(jit((x1, x2), adj.t()), out1)
        assert torch.allclose(jit((x1, None), adj.t()), out2)
Ejemplo n.º 4
0
 def __init__(self, num_features):
     super(MoNet, self).__init__()
     self.conv1 = GMMConv(in_channels=num_features, out_channels=32, dim=2)
     self.batchnorm_1 = torch.nn.BatchNorm1d(32)
     self.conv2 = GMMConv(in_channels=32, out_channels=64, dim=2)
     self.batchnorm_2 = torch.nn.BatchNorm1d(64)
     self.conv3 = GMMConv(in_channels=64, out_channels=64, dim=2)
     self.batchnorm_3 = torch.nn.BatchNorm1d(64)
     self.fc1 = torch.nn.Linear(64, 80)
     self.fc2 = torch.nn.Linear(80, 10)
Ejemplo n.º 5
0
 def __init__(self, kernel_size, dropout=0.5, wgan=False, device='cpu'):
     super(MoNet, self).__init__()
     self.conv1 = GMMConv(1, 32, dim=2, kernel_size=kernel_size)
     self.conv2 = GMMConv(32, 64, dim=2, kernel_size=kernel_size)
     self.conv3 = GMMConv(64, 64, dim=2, kernel_size=kernel_size)
     self.fc1 = torch.nn.Linear(64, 128)
     self.fc2 = torch.nn.Linear(128, 1)
     self.dropout = dropout
     self.device = device
     self.wgan = wgan
Ejemplo n.º 6
0
def test_gmm_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))
    pseudo = torch.rand((edge_index.size(1), 3))

    conv = GMMConv(in_channels, out_channels, dim=3, kernel_size=25)
    assert conv.__repr__() == 'GMMConv(16, 32)'
    assert conv(x, edge_index, pseudo).size() == (num_nodes, out_channels)
Ejemplo n.º 7
0
    def __init__(self):
        super(Net, self).__init__()

        self.conv1 = GMMConv(1, 64, dim=2)
        self.bn1 = torch.nn.BatchNorm1d(64)

        self.conv2 = GMMConv(64, 128, dim=2)
        self.bn2 = torch.nn.BatchNorm1d(128)

        self.conv3 = GMMConv(128, 256, dim=2)
        self.bn3 = torch.nn.BatchNorm1d(256)

        self.conv4 = GMMConv(256, 512, dim=2)
        self.bn4 = torch.nn.BatchNorm1d(512)

        self.fc1 = torch.nn.Linear(32 * 512, 1024)
        self.fc2 = torch.nn.Linear(1024, 10)
Ejemplo n.º 8
0
def test_gmm_conv():
    in_channels, out_channels = (16, 32)
    edge_index = torch.tensor([[0, 0, 0, 1, 2, 3], [1, 2, 3, 0, 0, 0]])
    num_nodes = edge_index.max().item() + 1
    x = torch.randn((num_nodes, in_channels))
    pseudo = torch.rand((edge_index.size(1), 3))

    conv = GMMConv(in_channels, out_channels, dim=3, kernel_size=25)
    assert conv.__repr__() == 'GMMConv(16, 32)'
    out = conv(x, edge_index, pseudo)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable()
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index, pseudo).tolist() == out.tolist()

    conv = GMMConv(in_channels,
                   out_channels,
                   dim=3,
                   kernel_size=25,
                   separate_gaussians=True)
    out = conv(x, edge_index, pseudo)
    assert out.size() == (num_nodes, out_channels)

    jit_conv = conv.jittable()
    jit_conv = torch.jit.script(jit_conv)
    assert jit_conv(x, edge_index, pseudo).tolist() == out.tolist()
Ejemplo n.º 9
0
 def __init__(self, num_features, kernel=3, dim=3):
     super(MoNet, self).__init__()
     self.conv1 = GMMConv(in_channels=num_features,
                          out_channels=8,
                          dim=dim,
                          kernel_size=kernel)
     self.conv2 = GMMConv(in_channels=8,
                          out_channels=16,
                          dim=dim,
                          kernel_size=kernel)
     self.conv3 = GMMConv(in_channels=16,
                          out_channels=8,
                          dim=dim,
                          kernel_size=kernel)
     self.conv4 = GMMConv(in_channels=8,
                          out_channels=4,
                          dim=dim,
                          kernel_size=kernel)
    def __init__(self, in_channel, out_channel):
        super(ResidualBlock, self).__init__()
        self.left_conv1 = GMMConv(in_channel,
                                  out_channel,
                                  dim=3,
                                  kernel_size=5)
        self.left_bn1 = torch.nn.BatchNorm1d(out_channel)
        self.left_conv2 = GMMConv(out_channel,
                                  out_channel,
                                  dim=3,
                                  kernel_size=5)
        self.left_bn2 = torch.nn.BatchNorm1d(out_channel)

        self.shortcut_conv = GMMConv(in_channel,
                                     out_channel,
                                     dim=3,
                                     kernel_size=1)
        self.shortcut_bn = torch.nn.BatchNorm1d(out_channel)
Ejemplo n.º 11
0
 def __init__(self, num_layers=2, hidden=32, features_num=32, num_class=2):
     super(GMM, self).__init__()
     self.convs = torch.nn.ModuleList()
     for i in range(num_layers):
         self.convs.append(GMMConv(hidden, hidden, 1, 1))
     self.out = Linear(hidden, num_class)
     self.first_lin = Linear(features_num, hidden)
     self.fuse_weight = torch.nn.Parameter(torch.FloatTensor(num_layers),requires_grad=True)
     self.fuse_weight.data.fill_(float(1) / (num_layers + 1))
Ejemplo n.º 12
0
 def get_layer(self, i, o):
     sprint("Created GMMConv %d => %d" % (i, o))
     return GMMConv(
         in_channels=i,
         out_channels=o,
         dim=i,
         kernel_size=self.kernel_size,
         separate_gaussians=self.separate_gaussians,
     )
Ejemplo n.º 13
0
    def layers(self):
        # TODO adapt to per-layer configurability
        self.conv_in = GMMConv(in_channels=self.config.feature_dimensionality,
                               out_channels=self.config.hidden_units,
                               dim=self.config.pseudo_dimensionality,
                               bias=self.config.use_bias)

        self.hidden_layers = torch.nn.ModuleList()
        for i in range(self.config.hidden_layers):
            layer = GMMConv(in_channels=self.config.hidden_units,
                            out_channels=self.config.hidden_units,
                            dim=self.config.pseudo_dimensionality,
                            bias=self.config.use_bias)
            self.hidden_layers.append(layer)

        self.conv_out = GMMConv(in_channels=self.config.hidden_units,
                                out_channels=self.model_type.out_channels,
                                dim=self.config.pseudo_dimensionality,
                                bias=self.config.use_bias)
Ejemplo n.º 14
0
 def makeConv(self, nin, nout, conv_args):
     # FeaStConv
     if(conv_args['name'] == 'FeaSt'):
         return FeaStConv(nin, nout, conv_args["num_heads"])
     
     # SplineConv
     if(conv_args['name'] == 'Spline'):
         return SplineConv(nin, nout, 
             self.edge_dim,
             conv_args["kernel_size"],
             is_open_spline=conv_args["open_spline"],
             degree=conv_args["degree"]
         )
     
     # GMMConv
     if(conv_args['name'] == "GMM"):
         return GMMConv(nin, nout,
             self.edge_dim,
             conv_args["kernel_size"]
         )
     
     # NNConv
     if(conv_args["name"] == "NN"):
         h = nn.Sequential(
                 nn.Linear(self.edge_dim, nin*nout),
                 nn.ReLU()
                 #nn.Linear(int(nin*nout/2), nin*nout)
         )
         return NNConv(nin, nout, h)
     
     # PPFConv
     if(conv_args["name"] == "PPF"):
         cin = nin+4
         hl = nn.Sequential(
             nn.Linear(cin, conv_args['nhidden']),
             nn.ReLU()
         )
         hg = nn.Linear(conv_args['nhidden'], nout)
         #hl = nn.Sequential(
                 #nn.Linear(cin, int(conv_args['nhidden']/2)),
                 #nn.ReLU(),
                 #nn.Linear(int(conv_args['nhidden']/2), conv_args['nhidden'])
         #)
         #hg = nn.Sequential(
                 #nn.Linear(conv_args['nhidden'], nout),
                 #nn.ReLU(),
                 #nn.Linear(nout, nout)
         #)
         return PPFConv(hl, hg)
     
     # CGConv
     if(conv_args["name"] == "CG"):
         return CGConv(nin, self.edge_dim)
Ejemplo n.º 15
0
 def __init__(self, args):
     super(GaussianGenerator, self).__init__()
     self.args = args
     self.conv1 = GMMConv(self.args.channels[0],
                          5 * self.args.channels[1],
                          dim=2,
                          kernel_size=args.kernel_size)
     self.conv2 = GMMConv(self.args.channels[1],
                          3 * self.args.channels[2],
                          dim=2,
                          kernel_size=args.kernel_size)
     self.conv3 = GMMConv(self.args.channels[2],
                          self.args.channels[3],
                          dim=2,
                          kernel_size=args.kernel_size)
     self.pos1fc1 = torch.nn.Linear(2 + self.args.channels[1], 128)
     self.pos1fc2 = torch.nn.Linear(128, 64)
     self.pos1fc3 = torch.nn.Linear(64, 2)
     self.pos2fc1 = torch.nn.Linear(2 + self.args.channels[2], 128)
     self.pos2fc2 = torch.nn.Linear(128, 64)
     self.pos2fc3 = torch.nn.Linear(64, 2)
    def __init__(self,
                 input_dim=1024,
                 net_type='gcn',
                 channel_dims=[256, 256, 512],
                 fc_dim=512,
                 num_classes=256,
                 cheb_order=2):
        super(GraphCNN, self).__init__()

        # Define graph convolutional layers
        gcn_dims = [input_dim] + channel_dims
        self.net_type = net_type
        if net_type == 'gcn':
            gcn_layers = [
                GCNConv(gcn_dims[i - 1], gcn_dims[i], bias=True)
                for i in range(1, len(gcn_dims))
            ]
        elif net_type == 'chebcn':
            gcn_layers = [
                ChebConv(gcn_dims[i - 1],
                         gcn_dims[i],
                         K=cheb_order,
                         normalization='sym',
                         bias=True) for i in range(1, len(gcn_dims))
            ]
        elif net_type == 'gmmcn':
            gcn_layers = [
                GMMConv(gcn_dims[i - 1],
                        gcn_dims[i],
                        dim=1,
                        kernel_size=1,
                        separate_gaussians=False,
                        bias=True) for i in range(1, len(gcn_dims))
            ]
        elif net_type == 'gincn':
            gcn_layers = [
                GINConv(MLP(gcn_dims[i - 1], gcn_dims[i], gcn_dims[i]),
                        eps=0,
                        train_eps=False) for i in range(1, len(gcn_dims))
            ]
        self.gcn = nn.ModuleList(gcn_layers)

        # Define dropout
        self.drop = nn.Dropout(p=0.3)

        # Define fully-connected layers
        self.fc_dim = fc_dim
        if self.fc_dim > 0:
            self.fc = nn.Linear(channel_dims[-1], fc_dim)
            self.fc_out = nn.Linear(fc_dim, num_classes)
        else:
            self.fc_out = nn.Linear(channel_dims[-1], num_classes)
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = GMMConv(1, 64, dim=3, kernel_size=5)
        self.bn1 = torch.nn.BatchNorm1d(64)

        self.block1 = ResidualBlock(64, 128)
        self.block2 = ResidualBlock(128, 256)
        self.block3 = ResidualBlock(256, 512)

        self.fc1 = torch.nn.Linear(8 * 512, 1024)
        self.bn = torch.nn.BatchNorm1d(1024)
        self.drop_out = torch.nn.Dropout()
        self.fc2 = torch.nn.Linear(1024, 20)
Ejemplo n.º 18
0
 def __init__(self, config):
     super(MoNet, self).__init__()
     monet_hidden_layer_sizes = [config.num_node_features] + list(
         config.monet_params.hidden_layer_sizes)
     linear_layer_sizes = (
         [monet_hidden_layer_sizes[-1]] +
         list(config.linear_layer_params.intermediate_layer_sizes) +
         [config.num_classes])
     self.monet_layers = ModuleList([
         GMMConv(
             in_channels=in_channels,
             out_channels=out_channels,
             dim=2,
             kernel_size=config.kernel_size,
         ) for in_channels, out_channels in zip(
             monet_hidden_layer_sizes[:-1], monet_hidden_layer_sizes[1:])
     ])
     self.linear_layers = ModuleList([
         Linear(in_features=in_features, out_features=out_features)
         for in_features, out_features in zip(linear_layer_sizes[:-1],
                                              linear_layer_sizes[1:])
     ])
Ejemplo n.º 19
0
def test_lazy_gmm_conv(separate_gaussians):
    x1 = torch.randn(4, 8)
    x2 = torch.randn(2, 16)
    edge_index = torch.tensor([[0, 1, 2, 3], [0, 0, 1, 1]])
    value = torch.rand(edge_index.size(1), 3)

    conv = GMMConv(-1,
                   32,
                   dim=3,
                   kernel_size=25,
                   separate_gaussians=separate_gaussians)
    assert conv.__repr__() == 'GMMConv(-1, 32, dim=3)'
    out = conv(x1, edge_index, value)
    assert out.size() == (4, 32)

    conv = GMMConv((-1, -1),
                   32,
                   dim=3,
                   kernel_size=25,
                   separate_gaussians=separate_gaussians)
    assert conv.__repr__() == 'GMMConv((-1, -1), 32, dim=3)'
    out = conv((x1, x2), edge_index, value)
    assert out.size() == (2, 32)
Ejemplo n.º 20
0
 def __init__(self, in_channels, dim, out_size):
     super(GaussGNN, self).__init__()
     self.conv1 = GMMConv(in_channels, in_channels, dim, kernel_size=25)
     self.conv2 = GMMConv(in_channels, in_channels, dim, kernel_size=25)
     self.conv3 = GMMConv(in_channels, in_channels, dim, kernel_size=25)
     self.linear = nn.Linear(in_channels, out_size)
Ejemplo n.º 21
0
    def init_layers(self, batch):


        self.layer_lst = [] ##[in_channel, out_channel, in_pn, out_pn, max_neighbor_num, neighbor_num_lst,neighbor_id_lstlst,conv_layer, residual_layer]
        
        self.layer_num = len(self.channel_lst)
        
        in_point_num = self.point_num
        in_channel = 3
        
        for l in range(self.layer_num):
            out_channel = self.channel_lst[l]
            weight_num = self.weight_num_lst[l]
            residual_rate = self.residual_rate_lst[l]
            

            connection_info  = np.load(self.connection_layer_fn_lst[l])
            print ("##Layer",self.connection_layer_fn_lst[l])
            out_point_num = connection_info.shape[0]

            neighbor_num_lst = torch.FloatTensor(connection_info[:,0].astype(float)).cuda() #out_point_num*1
            neighbor_id_dist_lstlst = connection_info[:, 1:] #out_point_num*(max_neighbor_num*2)
            neighbor_id_lstlst = neighbor_id_dist_lstlst.reshape((out_point_num, -1,2))[:,:,0] #out_point_num*max_neighbor_num
            #neighbor_id_lstlst = torch.LongTensor(neighbor_id_lstlst)
            avg_neighbor_num = neighbor_num_lst.mean().item()
            max_neighbor_num = neighbor_id_lstlst.shape[1]

            pc_mask  = torch.ones(in_point_num+1).cuda()
            pc_mask[in_point_num]=0
            neighbor_mask_lst = pc_mask[ torch.LongTensor(neighbor_id_lstlst)].contiguous() #out_pn*max_neighbor_num neighbor is 1 otherwise 0
            
            

            if((residual_rate<0) or (residual_rate>1)):
                print ("Invalid residual rate", residual_rate)
            ####parameters for conv###############
            conv_layer = ""
            if(residual_rate<1):
                edge_index_lst = self.get_edge_index_lst_from_neighbor_id_lstlst(neighbor_id_lstlst, neighbor_num_lst)
                print ("edge_index_lst", edge_index_lst.shape)
                if(self.conv_method=="Cheb"):
                    conv_layer = edge_index_lst, ChebConv(in_channel, out_channel, K=6, normalization='sym', bias=True)
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()/10
                        self.register_parameter("GMM"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="GAT"):
                    #conv_layer = edge_index_lst,GATConv(in_channel, out_channel, heads=1)
                    if((l!=((self.layer_num/2)-2)) and (l!=(self.layer_num-1))): #not middle or last layer
                        conv_layer = edge_index_lst, GATConv(in_channel, out_channel, heads=8, concat=True)
                        out_channel=out_channel*8
                    else:
                        conv_layer = edge_index_lst, GATConv(in_channel, out_channel, heads=8, concat=False) 
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("GAT"+str(l)+"_"+str(ii),p)
                        ii+=1
                    #conv_layer = edge
                elif(self.conv_method=="GMM"):
                    pseudo_coordinates = self.get_GMM_pseudo_coordinates( edge_index_lst, neighbor_num_lst) #edge_num*2
                    #print ("pseudo_coordinates", pseudo_coordinates.shape)
                    conv_layer = edge_index_lst,pseudo_coordinates, GMMConv(in_channel, out_channel, dim=2 , kernel_size=25)
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("GMM"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="FeaST"):
                    
                    conv_layer = edge_index_lst,FeaStConv(in_channel, out_channel, heads=32)
                    ii=0
                    for p in conv_layer[-1].parameters():
                        p.data=p.data.cuda()
                        self.register_parameter("FeaST"+str(l)+"_"+str(ii),p)
                        ii+=1
                elif(self.conv_method=="vw"):
                    weights = torch.randn(out_point_num, max_neighbor_num, out_channel,in_channel)/(avg_neighbor_num*weight_num)
                    
                    weights = nn.Parameter(weights.cuda())
                    
                    self.register_parameter("weights"+str(l),weights)
                    
                    bias = nn.Parameter(torch.zeros(out_channel).cuda())
                    if(self.perpoint_bias == 1):
                        bias= nn.Parameter(torch.zeros(out_point_num, out_channel).cuda())
                    self.register_parameter("bias"+str(l),bias)
                    
                    conv_layer = (weights, bias)
                else:
                    print ("ERROR! Unknown convolution layer!!!")
                

            zeros_batch_outpn_outchannel = torch.zeros((batch, out_point_num, out_channel)).cuda()
            ####parameters for residual###############
            residual_layer = ""

            if(residual_rate>0):
                p_neighbors = ""
                weight_res = ""

                if((out_point_num != in_point_num) and (self.use_vanilla_pool == 0)):
                    p_neighbors = nn.Parameter((torch.randn(out_point_num, max_neighbor_num)/(avg_neighbor_num)).cuda())
                    self.register_parameter("p_neighbors"+str(l),p_neighbors)
                
                if(out_channel != in_channel):
                    weight_res = torch.randn(out_channel,in_channel)
                    #self.normalize_weights(weight_res)
                    weight_res = weight_res/out_channel
                    weight_res = nn.Parameter(weight_res.cuda())
                    self.register_parameter("weight_res"+str(l),weight_res)
                
                residual_layer = (weight_res, p_neighbors)
            
            #####put everythin together
            
            layer = (in_channel, out_channel, in_point_num, out_point_num, weight_num, max_neighbor_num, neighbor_num_lst,neighbor_id_lstlst, conv_layer, residual_layer, residual_rate, neighbor_mask_lst, zeros_batch_outpn_outchannel)
            
            self.layer_lst +=[layer]
            
            print ("in_channel", in_channel,"out_channel",out_channel, "in_point_num", in_point_num, "out_point_num", out_point_num, "weight_num", weight_num,
                   "max_neighbor_num", max_neighbor_num, "avg_neighbor_num", avg_neighbor_num)
            
            in_point_num=out_point_num
            in_channel = out_channel
Ejemplo n.º 22
0
    def __init__(
            self,
            nIn,
            nOut=None,
            conv_args={},
            pool_args={},
            crf_args={},
            nhidden=32,
            nhidden_lin=16,
            level_hidden_size_down=None,
            level_hidden_size_up=None,
            depth=2,
            num_top_convs=3,
            num_pool_convs=1,
            num_unpool_convs=1,
            act='relu',
            use_skips=True,
            #sum_skips=False,
            dropout=0.5,
            edge_dim=9,
            name='net_conv_pool',
            use_lin=True,
            use_crf=False,
            scale_edge_features=None):
        super(NetConvPool, self).__init__()
        self.depth = depth
        self.dropout = dropout
        self.use_skips = use_skips
        #self.sum_skips = sum_skips
        self.num_top_convs = num_top_convs
        self.num_pool_convs = num_pool_convs
        self.num_unpool_convs = num_unpool_convs
        self.edge_dim = edge_dim
        self.name = name
        self.lin = use_lin
        self.crf = use_crf

        # get activation function
        if (act == 'relu'):
            self.act = F.relu
        elif (act == 'elu'):
            self.act = F.elu
        elif (act == 'selu'):
            self.act = F.selu
        else:
            self.act = act  # assume we are passed a function

        # determine convolution and pooling types
        if depth > 0:
            self.pool_type = pool_args["name"]

        # build transforms
        if self.pool_type == "MeshPool":
            transforms = [GeometricEdgeFeatures()]
        else:
            transforms = [GenerateMeshNormals(), PointPairFeatures()]

        if scale_edge_features:
            transforms.append(ScaleEdgeFeatures(method=scale_edge_features))
        self.transforms = Compose(transforms)

        # containers to hold the layers
        self.top_convs = torch.nn.ModuleList()
        self.down_pools = torch.nn.ModuleList()
        self.down_convs = torch.nn.ModuleList()
        self.up_convs = torch.nn.ModuleList()

        # set hidden size of every level
        if level_hidden_size_down is None:
            level_hidden_size_down = [nhidden] * (depth + 1)

        if level_hidden_size_up is None:
            level_hidden_size_up = level_hidden_size_down[(depth - 1)::-1]
        level_hidden_size_up = [level_hidden_size_down[-1]
                                ] + level_hidden_size_up

        assert len(level_hidden_size_down) == depth + 1
        assert len(level_hidden_size_up) == depth + 1

        # down FC layer (dimentionality reduction)
        self.lin1 = nn.Linear(nIn, nhidden_lin)

        # top convolutions
        nin = nhidden_lin
        nout = level_hidden_size_down[0]
        for i in range(num_top_convs):
            self.top_convs.append(
                GMMConv(nin, nout, edge_dim, conv_args["kernel_size"]))
            nin = nout

        # down layers
        nin = nout
        for i in range(depth):
            nout = level_hidden_size_down[i + 1]
            # pooling
            self.down_pools.append(self.makePool(nin, **pool_args))

            # convolution
            for j in range(num_pool_convs):
                self.down_convs.append(
                    GMMConv(nin, nout, edge_dim, conv_args["kernel_size"]))
                nin = nout

        # up layers
        for i in range(depth):
            j = depth - i - 1
            nin = level_hidden_size_up[i] + level_hidden_size_down[
                j] if use_skips else level_hidden_size_up[i]
            nout = level_hidden_size_up[i + 1]

            # convolution
            for k in range(num_unpool_convs):
                self.up_convs.append(
                    GMMConv(nin, nout, edge_dim, conv_args["kernel_size"]))
                nin = nout

        if use_crf:
            # continuous CRF layer
            self.crf1 = ContinuousCRF(**crf_args)

        if use_lin:
            # final FC layers
            self.lin2 = nn.Linear(nout, nout)
            self.lin3 = nn.Linear(nout, nout)
            self.lin4 = nn.Linear(nout, nOut)