Beispiel #1
0
    def execute(self, pcd, prev_s):
        """
        Args:
            pcd: b, n, 3
            prev_s: b, c, n
        """
        b, n, _ = pcd.shape
        pcd_bcn = pcd.transpose(0, 2, 1)
        l0_xyz = pcd
        l0_points = pcd_bcn
        if self.if_noise:
            noise_points = init.gauss([b, 3, n], 'float', mean=0.0, std=self.noise_stdv)
            l0_points = jittor.concat([l0_points, noise_points], 1)
        l1_xyz, l1_points = self.sa_module_1(l0_xyz, l0_points)  # b, 512, 128 (bnc)
        l2_xyz, l2_points = self.sa_module_2(l1_xyz, l1_points)
        l3_xyz, l3_points = self.sa_module_3(l2_xyz, l2_points)

        l2_points = self.fp_module_3(l2_xyz, l3_xyz, l2_points, l3_points)
        l2_points, prev_s['l2'] = self.unit_3(l2_points, prev_s['l2'])

        l1_points = self.fp_module_2(l1_xyz, l2_xyz, l1_points, l2_points)
        l1_points, prev_s['l1'] = self.unit_2(l1_points, prev_s['l1'])

        l0_points = self.fp_module_1(l0_xyz, l1_xyz, concat([pcd_bcn, pcd_bcn], dim=1), l1_points)
        l0_points, prev_s['l0'] = self.unit_1(l0_points, prev_s['l0'])  # (B, 128, 2048)

        noise = init.gauss([b, 32, n], 'float', mean=0.0, std=1.0)
        feat = concat([l0_points, noise], dim=1)
        delta_xyz = self.tanh(self.mlp_conv(feat)) * 1.0 / 10 ** (self.step - 1)
        point_cloud = (pcd_bcn + delta_xyz).transpose(0, 2, 1)
        return point_cloud, delta_xyz
Beispiel #2
0
def test_cuda_knn():
    from jittor import init 
    jt.flags.use_cuda = 1
    inq_shape = [32, 128, 1024]
    input_q = init.gauss(inq_shape, dtype='float')
    inr_shape = [32, 128, 256]
    input_r = init.gauss(inr_shape, dtype='float')

    # print (input_x.shape)
    # x = input_x.permute(0, 2, 1)
    cuda_knn = KNN(k=200)

    import time
    for i in range (100):
        jt.sync_all(True)
        start_time = time.time()
        idx = knn_point(200, input_r.permute(0, 2, 1), input_q.permute(0, 2, 1)) 
        jt.sync_all(True)
        end_time = time.time()
        print ('python time', end_time - start_time) 
    print (idx.shape)
    print (idx[0,0,:])  

    for i in range (100):
        jt.sync_all(True)
        start_time = time.time()
        idx_cuda = cuda_knn(input_q, input_r)
        jt.sync_all(True)
        end_time = time.time()
        print ('cuda run time', end_time - start_time) 
    idx_cuda = idx_cuda.permute(0, 2, 1) 
    print (idx_cuda[0,0,:])
    print (idx_cuda.shape)
Beispiel #3
0
def main():
    model = PointNet(n_classes=40)
    input_point = init.gauss([2, 1024, 3], 'float', mean=0.0)
    input_feature = init.gauss([2, 1024, 3], 'float', mean=0.0)
    print (input_point.shape)
    print (input_feature.shape)
    outputs = model(input_point, input_feature)
    print (outputs.shape)
Beispiel #4
0
def main():
    model = PointNet2_partseg()
    input_point = init.gauss([2, 1024, 3], 'float', mean=0.0)
    input_feature = init.gauss([2, 1024, 3], 'float', mean=0.0)
    cls_label = init.gauss([2, 16], 'float', mean=0.0)

    print(input_point.shape)
    print(input_feature.shape)
    print(cls_label.shape)
    outputs = model(input_point, input_feature, cls_label)
    print(outputs.shape)
Beispiel #5
0
    def test_normal(self):
        from jittor import init
        n = 10000
        r = 0.155
        a = init.gauss([n], "float32", 1, 3)
        data = a.data

        assert (np.abs((data < (1 - 3)).mean() - r) < 0.1)
        assert (np.abs((data < (1)).mean() - 0.5) < 0.1)
        assert (np.abs((data < (1 + 3)).mean() - (1 - r)) < 0.1)

        np_res = np.random.normal(1, 0.1, (100, 100))
        jt_res = jt.normal(1., 0.1, (100, 100))
        assert (np.abs(np_res.mean() - jt_res.data.mean()) < 0.1)
        assert (np.abs(np_res.std() - jt_res.data.std()) < 0.1)

        np_res = torch.normal(torch.arange(1., 10000.), 1)
        jt_res = jt.normal(jt.arange(1, 10000), 1)
        assert (np.abs(np_res.mean() - jt_res.data.mean()) < 0.1)
        assert (np.abs(np_res.std() - jt_res.data.std()) < 1)

        np_res = np.random.randn(100, 100)
        jt_res = jt.randn(100, 100)
        assert (np.abs(np_res.mean() - jt_res.data.mean()) < 0.1)
        assert (np.abs(np_res.std() - jt_res.data.std()) < 0.1)

        np_res = np.random.rand(100, 100)
        jt_res = jt.rand(100, 100)
        assert (np.abs(np_res.mean() - jt_res.data.mean()) < 0.1)
        assert (np.abs(np_res.std() - jt_res.data.std()) < 0.1)
 def __init__(self,
              input_size,
              output_size,
              gain=2**0.5,
              use_wscale=False,
              lrmul=1,
              bias=True):
     super().__init__()
     he_std = gain * input_size**(-0.5)  # He init
     # Equalized learning rate and custom learning rate multiplier.
     if use_wscale:
         init_std = 1.0 / lrmul
         self.w_mul = he_std * lrmul
     else:
         init_std = he_std / lrmul
         self.w_mul = lrmul
     # self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std)
     # self.weight = jt.random([output_size, input_size], 'float', 'normal') * init_std
     self.weight = init.gauss([output_size, input_size],
                              'float32') * init_std
     if bias:
         # self.bias = torch.nn.Parameter(torch.zeros(output_size))
         # self.bias = jt.zeros(output_size)
         self.bias = init.constant([output_size], 'float32', 0.0)
         self.b_mul = lrmul
     else:
         self.bias = None
Beispiel #7
0
    def execute(self, point_cloud):
        """
        Args:
            point_cloud: Tensor, (B, 2048, 3)
        """
        b, npoint, _ = point_cloud.shape
        #pcd_bcn = point_cloud.permute(0, 2, 1)
        prev_s = {
            'l0': init.gauss((b, 128, npoint), 'float', mean=0.0, std=1.0),
            'l1': init.gauss((b, 128, 512), 'float', mean=0.0, std=1.0),
            'l2': init.gauss((b, 256, 128), 'float', mean=0.0, std=1.0)
        }
        pcd_out_1, delta1 = self.step_1(point_cloud, prev_s)
        pcd_out_2, delta2 = self.step_2(pcd_out_1, prev_s)
        pcd_out_3, delta3 = self.step_3(pcd_out_2, prev_s)

        return [pcd_out_1, pcd_out_2, pcd_out_3], [delta1, delta2, delta3]
 def __init__(self,
              input_channels,
              output_channels,
              kernel_size,
              stride=1,
              gain=2**0.5,
              use_wscale=False,
              lrmul=1,
              bias=True,
              intermediate=None,
              upscale=False,
              downscale=False):
     super().__init__()
     if upscale:
         self.upscale = Upscale2d()
     else:
         self.upscale = None
     if downscale:
         self.downscale = Downscale2d()
     else:
         self.downscale = None
     he_std = gain * (input_channels * kernel_size**2)**(-0.5)  # He init
     self.kernel_size = kernel_size
     if use_wscale:
         init_std = 1.0 / lrmul
         self.w_mul = he_std * lrmul
     else:
         init_std = he_std / lrmul
         self.w_mul = lrmul
     # self.weight = torch.nn.Parameter(
     #    torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std)
     # self.weight = jt.random([output_channels, input_channels, kernel_size, kernel_size], 'float', 'normal') * init_std
     self.weight = init.gauss(
         [output_channels, input_channels, kernel_size, kernel_size],
         'float32') * init_std
     if bias:
         # self.bias = torch.nn.Parameter(torch.zeros(output_channels))
         # self.bias = jt.zeros(output_channels)
         self.bias = init.constant([output_channels], 'float32', 0.0)
         self.b_mul = lrmul
     else:
         self.bias = None
     self.intermediate = intermediate
Beispiel #9
0
Datei: pct.py Projekt: wddwzc/PCT
        self.k_conv = nn.Conv1d(channels, channels // 4, 1, bias=False)
        self.q_conv.conv.weight = self.k_conv.conv.weight 
        self.v_conv = nn.Conv1d(channels, channels, 1)
        self.trans_conv = nn.Conv1d(channels, channels, 1)
        self.after_norm = nn.BatchNorm1d(channels)
        self.act = nn.ReLU()
        self.softmax = nn.Softmax(dim=-1)

    def execute(self, x):
        x_q = self.q_conv(x).permute(0, 2, 1) # b, n, c 
        x_k = self.k_conv(x)# b, c, n        
        x_v = self.v_conv(x)
        energy = nn.bmm(x_q, x_k) # b, n, n 
        attention = self.softmax(energy)
        attention = attention / (1e-9 + attention.sum(dim=1, keepdims=True))
        x_r = nn.bmm(x_v, attention) # b, c, n 
        x_r = self.act(self.after_norm(self.trans_conv(x - x_r)))
        x = x + x_r
        return x

if __name__ == '__main__':
    
    jt.flags.use_cuda=1
    input_points = init.gauss((16, 3, 1024), dtype='float32')  # B, D, N 


    network = Point_Transformer()
    out_logits = network(input_points)
    print (out_logits.shape)