Exemplo n.º 1
0
 def __init__(self, num_parts: int = 50):
     super().__init__()
     self.num_parts = 50
     self.drop = nn.Dropout2d(p=0.2)
     self.fc1 = pt_utils.Conv2d(1456, 256, bn=True)
     self.fc2 = pt_utils.Conv2d(256, 256, bn=True)
     self.fc3 = pt_utils.Conv2d(256, 128, bn=True)
     self.fc4 = pt_utils.Conv2d(128,
                                self.num_parts,
                                bn=False,
                                activation=None)
Exemplo n.º 2
0
    def __init__(self, num_parts: int = 50):
        super().__init__()
        self.drop = nn.Dropout2d(p=0.4)
        self.num_parts = num_parts

        self.fc1 = pt_utils.Conv2d(128 + 1024 + 64 * 9 + 1024, 256,
                                   bn=True)  # has relu

        self.fc2 = pt_utils.Conv2d(256, 256, bn=True)

        self.fc3 = pt_utils.Conv2d(256, num_parts, bn=False, activation=None)
Exemplo n.º 3
0
    def __init__(self, d_out):  # d_in = d_out//2
        super(Building_block, self).__init__()
        self.mlp1 = pt_utils.Conv2d(10,
                                    d_out // 2,
                                    kernel_size=(1, 1),
                                    bn=True)
        self.att_pooling_1 = Att_pooling(d_out, d_out // 2)

        self.mlp2 = pt_utils.Conv2d(d_out // 2,
                                    d_out // 2,
                                    kernel_size=(1, 1),
                                    bn=True)
        self.att_pooling_2 = Att_pooling(d_out, d_out)
    def __init__(self, in_channel, out_channel, taylor_channel, K_knn):
        super().__init__()
        self.K_knn = K_knn
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.taylor_channel = taylor_channel

        self.conv1 = pt_utils.Conv2d(19,
                                     self.taylor_channel,
                                     bn=False,
                                     activation=None)

        self.conv2 = pt_utils.Conv2d(in_channel * taylor_channel,
                                     out_channel,
                                     kernel_size=[1, K_knn],
                                     bn=True)
Exemplo n.º 5
0
    def __init__(self, d_in, d_out):
        super(Dilated_res_block, self).__init__()

        self.mlp1 = pt_utils.Conv2d(d_in,
                                    d_out // 2,
                                    kernel_size=(1, 1),
                                    bn=True)
        self.lfa = Building_block(d_out)
        self.mlp2 = pt_utils.Conv2d(d_out,
                                    d_out * 2,
                                    kernel_size=(1, 1),
                                    bn=True,
                                    activation=None)
        self.shortcut = pt_utils.Conv2d(d_in,
                                        d_out * 2,
                                        kernel_size=(1, 1),
                                        bn=True,
                                        activation=None)
Exemplo n.º 6
0
    def __init__(self, K_channel=3, K_knn=20):
        '''
        input : B x 3 x N
        '''
        super().__init__()
        self.eps = 1e-3
        self.K_channel = K_channel
        self.K_knn = K_knn

        self.conv1 = _baseEdgeConv(3, 64, k=K_knn, pool=False) # B x 64 x N x k
        self.conv2 = pt_utils.Conv2d(64, 128, bn=True)
        self.conv3 = pt_utils.Conv2d(128, 1024, bn=True)

        self.fc1 = pt_utils.FC(1024, 512, bn=True)
        self.fc2 = pt_utils.FC(512, 256, bn=True)
        self.fc3 = pt_utils.FC(256, self.K_channel**2, bn=False, activation=None)
        self.fc3.fc.weight.data.fill_(0)
        self.fc3.fc.bias.data.copy_(torch.eye(self.K_channel).view(-1).float())
Exemplo n.º 7
0
    def __init__(self, num_classes=16, k: int = 30):
        super().__init__()
        self.eps = 1e-3
        self.k = k
        self.transform = transform_net(K_channel=3, K_knn=k)
        self.num_classes = num_classes

        # EdgeConv 1
        self.edgeconv1_1 = _baseEdgeConv(3, 64, k=k, pool=False)
        self.edgeconv1_2 = pt_utils.Conv2d(64, 64, bn=True)
        # max_pool and avg_pool then send to conv1_3
        self.edgeconv1_3 = pt_utils.Conv2d(128, 64, bn=True)
        # output1

        # EdgeConv2
        self.edgeconv2_1 = _baseEdgeConv(64, 64, k=k, pool=False)
        # max_pool and avg_pool then send to conv2_2
        self.edgeconv2_2 = pt_utils.Conv2d(128, 64, bn=True)
        # output2

        self.edgeconv3_1 = _baseEdgeConv(64, 64, k=k, pool=False)
        # max_pool and avg_pool then send to conv3_3
        self.edgeconv3_2 = pt_utils.Conv2d(128, 64, bn=True)
        # output3

        # [output1, output2, output3] -> mlp(1024)
        self.mlp = pt_utils.Conv2d(64 + 64 + 64, 1024, bn=True)

        # one_hot_label conv
        self.one_hot_expand = pt_utils.Conv2d(self.num_classes, 128, bn=True)
Exemplo n.º 8
0
    def __init__(self, config):
        super(Network, self).__init__()
        self.config = config
        self.class_weights = DP.get_class_weights('SemanticKITTI')

        self.fc0 = pt_utils.Conv1d(3, 8, kernel_size=1, bn=True)

        self.dilated_res_blocks = nn.ModuleList()
        d_in = 8
        for i in range(self.config.num_layers):
            d_out = self.config.d_out[i]
            self.dilated_res_blocks.append(Dilated_res_block(d_in, d_out))
            d_in = 2 * d_out

        d_out = d_in
        self.decoder_0 = pt_utils.Conv2d(d_in,
                                         d_out,
                                         kernel_size=(1, 1),
                                         bn=True)

        self.decoder_blocks = nn.ModuleList()
        for j in range(self.config.num_layers):
            if j < 3:
                d_in = d_out + 2 * self.config.d_out[-j - 2]
                d_out = 2 * self.config.d_out[-j - 2]
            else:
                d_in = 4 * self.config.d_out[-4]
                d_out = 2 * self.config.d_out[-4]
            self.decoder_blocks.append(
                pt_utils.Conv2d(d_in, d_out, kernel_size=(1, 1), bn=True))

        self.fc1 = pt_utils.Conv2d(d_out, 64, kernel_size=(1, 1), bn=True)
        self.fc2 = pt_utils.Conv2d(64, 32, kernel_size=(1, 1), bn=True)
        self.dropout = nn.Dropout(0.5)
        self.fc3 = pt_utils.Conv2d(32,
                                   self.config.num_classes,
                                   kernel_size=(1, 1),
                                   bn=False,
                                   activation=None)
    def __init__(self):
        super().__init__()

        inchannel = 3
        self.ssg1 = _BasePointnetSSGModule(512, 0.2, 32, [inchannel, 64, 64, 128])

        inchannel = 128 + 3
        self.ssg2 = _BasePointnetSSGModule(128, 0.4, 64, [inchannel, 128, 128, 256])

        inchannel = 256 + 3
        self.SA = pt_utils.SharedMLP([inchannel, 256, 512], bn=True)

        self.last_layer = pt_utils.Conv2d(512, 1024, bn=True)
    def __init__(self, k: int = 20, last_bn=True):
        super().__init__()
        self.eps = 1e-3
        self.k = k
        self.last_bn = last_bn
        self.transform = transform_net(K_channel=3, K_knn=self.k)

        # EdgeConv 1
        self.edgeconv1_1 = _baseEdgeConv(3, 64, k=k)
        self.edgeconv1_2 = _baseEdgeConv(64, 64, k=k)
        self.edgeconv1_3 = _baseEdgeConv(64, 64, k=k)

        # EdgeConv2
        self.edgeconv2_1 = _baseEdgeConv(64, 128, k=k)

        # mlp1
        self.mlp = pt_utils.Conv2d(
            320,
            1024,
            bn=last_bn,
            activation=None if not last_bn else nn.ReLU(inplace=True))
    def __init__(self):
        super().__init__()
        inchannel = 3
        self.msg1 = _BasePointnetMSGModule(
            npoint=512,
            radius=[0.1, 0.2, 0.4],
            nsamples=[16, 32, 128],
            mlps=[[inchannel, 32, 32, 64], [inchannel, 64, 64, 128], [inchannel, 64, 96, 128]]
        )

        inchannel = 64 + 128 + 128 + 3
        self.msg2 = _BasePointnetMSGModule(
            npoint=128,
            radius=[0.2, 0.4, 0.8],
            nsamples=[32, 64, 128],
            mlps=[[inchannel, 64, 64, 128], [inchannel, 128, 128, 256], [inchannel, 128, 128, 256]]
        )

        inchannel = 128 + 256 + 256 + 3

        self.SA = pt_utils.SharedMLP([inchannel, 256, 512], bn=True)
        self.last_layer = pt_utils.Conv2d(512, 1024, bn=True)
Exemplo n.º 12
0
 def __init__(self, d_in, d_out):
     super(Att_pooling, self).__init__()
     self.fc = nn.Conv2d(d_in, d_in, (1, 1), bias=False)
     self.mlp = pt_utils.Conv2d(d_in, d_out, kernel_size=(1, 1), bn=True)
Exemplo n.º 13
0
 def __init__(self, in_channel:int, out_channel:int, k:int=20, pool=True):
     super().__init__()
     self.eps = 1e-3
     self.k = k
     self.pool = pool
     self.net = pt_utils.Conv2d(in_channel*2, out_channel, bn=True)
Exemplo n.º 14
0
    def __init__(self, K, D, P, C_in, C_out, C_delta, depth_multiplier, sampling='random', with_global=False):
        super().__init__()

        '''
        in the first layer, C_in set to be 0, and the nn_fts_input will only be the delta feature 
        pts(points) : origin points -> B x 3 x N
        fts(features) : origin features -> B x C x N
        qrs(querys) : query points -> B x 3 x P
        nn_pts_local : # B x 3 x P x K
        nn_fts_input : B x C_in+C_delta x P x K

        return : sample point and features
        '''
        self.with_global = with_global
        self.sampling = sampling
        if sampling=='random':
            self.sample = random_indices(P) 
        elif sampling == 'fps':
            self.sample = FarthestPointSample(P)
        else :
            pass
        self.K = K
        self.D = D  
        self.P = P
        self.depth_multiplier = depth_multiplier

        #the input tot mlp_delta is nn_pts_local(B x 3 x P x K)
        self.mlp_delta = pt_utils.SharedMLP(
            [3, C_delta, C_delta],
            bn=True, activation=nn.ELU(inplace=True),
            act_before_bn=True
            )# B x C_delta x P x K


        # the input to X_transform is nn_pts_local(B x 3 x P x K)
        self.X_transform0 = pt_utils.Conv2d(
            3,
            K*K,
            kernel_size=(1, K),
            bn=True,
            bias=False,
            activation=nn.ELU(inplace=True),
            act_before_bn=True
            ) # B x K*K x P x 1
        
        self.X_transform1 = nn.Sequential(
            nn.Conv2d(K, K*K, kernel_size=(1, K), groups=K, bias=False),
            nn.ELU(inplace=True),
            nn.BatchNorm2d(K*K)
        ) # B x K*K x P x 1
        nn.init.xavier_uniform_(self.X_transform1[0].weight)

        self.X_transform2 = nn.Sequential(
            nn.Conv2d(K, K*K, kernel_size=(1, K), groups=K, bias=False),
            nn.BatchNorm2d(K*K)
        ) # B x K*K x P x 1
        nn.init.xavier_uniform_(self.X_transform2[0].weight)

        # depth_multiplier = torch.ceil(float(C_out)/(C_in + C_delta))
       

        self.conv = nn.Sequential(
            nn.Conv2d(C_in+C_delta, (C_in+C_delta)*depth_multiplier, kernel_size=(1, K), groups=(C_in+C_delta)),
            # nn.ELU(inplace=True),
            # nn.BatchNorm2d((C_in+C_delta)*depth_multiplier),
            nn.Conv2d((C_in+C_delta)*depth_multiplier, C_out, kernel_size=1, bias=False),
            nn.ELU(True),
            nn.BatchNorm2d(C_out)
        ) # equal to tf.layers.seperable_conv2d
        nn.init.xavier_uniform_(self.conv[0].weight)
        nn.init.xavier_uniform_(self.conv[1].weight)

        if self.with_global:
            self.conv_global = pt_utils.SharedMLP(
                [3, C_out // 4, C_out //4],
                bn=True,
                activation=nn.ELU(inplace=True),
                act_before_bn=True
            )