コード例 #1
0
 def compose(self, mode, item):
     ref_cloud = self.data[item, ...]
     R, t = generate_random_rotation_matrix(
     ), generate_random_tranlation_vector()
     if mode == 'clean':
         ref_cloud = random_select_points(ref_cloud, m=self.npts)
         src_cloud_points = transform(ref_cloud[:, :3], R, t)
         src_cloud_normal = transform(ref_cloud[:, 3:], R)
         src_cloud = np.concatenate([src_cloud_points, src_cloud_normal],
                                    axis=-1)
         return src_cloud, ref_cloud, R, t
     elif mode == 'partial':
         source_cloud = random_select_points(ref_cloud, m=self.npts)
         ref_cloud = random_select_points(ref_cloud, m=self.npts)
         src_cloud_points = transform(source_cloud[:, :3], R, t)
         src_cloud_normal = transform(source_cloud[:, 3:], R)
         src_cloud = np.concatenate([src_cloud_points, src_cloud_normal],
                                    axis=-1)
         src_cloud = random_crop(src_cloud, p_keep=0.7)
         return src_cloud, ref_cloud, R, t
     elif mode == 'noise':
         source_cloud = random_select_points(ref_cloud, m=self.npts)
         ref_cloud = random_select_points(ref_cloud, m=self.npts)
         src_cloud_points = transform(source_cloud[:, :3], R, t)
         src_cloud_normal = transform(source_cloud[:, 3:], R)
         src_cloud = np.concatenate([src_cloud_points, src_cloud_normal],
                                    axis=-1)
         return src_cloud, ref_cloud, R, t
     else:
         raise NotImplementedError
コード例 #2
0
 def __getitem__(self, item):
     file = self.files[item]
     ref_cloud = readpcd(file, rtype='npy')
     ref_cloud = random_select_points(ref_cloud, m=self.npts)
     ref_cloud = pc_normalize(ref_cloud)
     R, t = generate_random_rotation_matrix(-20, 20), \
            generate_random_tranlation_vector(-0.5, 0.5)
     src_cloud = transform(ref_cloud, R, t)
     if self.train:
         ref_cloud = jitter_point_cloud(ref_cloud)
         src_cloud = jitter_point_cloud(src_cloud)
     return ref_cloud, src_cloud, R, t
コード例 #3
0
 def __init__(self, root, npts, train=True):
     super(CustomData, self).__init__()
     dirname = 'train_data' if train else 'val_data'
     path = os.path.join(root, dirname)
     self.train = train
     self.files = [
         os.path.join(path, item) for item in sorted(os.listdir(path))
     ]
     self.npts = npts
     l = len(self.files)
     self.Rs = [generate_random_rotation_matrix(-20, 20) for _ in range(l)]
     self.ts = [
         generate_random_tranlation_vector(-0.5, 0.5) for _ in range(l)
     ]
     self.caches = {}
コード例 #4
0
 def __init__(self, root, npts, train=True):
     super(ModelNet40, self).__init__()
     self.npts = npts
     self.train = train
     files = [
         os.path.join(root, 'ply_data_train{}.h5'.format(i))
         for i in range(5)
     ]
     if not train:
         files = [
             os.path.join(root, 'ply_data_test{}.h5'.format(i))
             for i in range(2)
         ]
     self.data, self.labels = self.decode_h5(files)
     l = len(self.data)
     self.Rs = [generate_random_rotation_matrix() for _ in range(l)]
     self.ts = [generate_random_tranlation_vector() for _ in range(l)]
コード例 #5
0
    def compute_spatial_filter(self, tSigma1, tSigma2, d=None):
        # tSigma1: (num_epochs, num_features, num_features)
        # tSigma2: (num_epochs, num_features, num_features)
        assert tSigma1.shape == tSigma2.shape
        assert tSigma1.shape[1] == tSigma1.shape[2]
        n_features = tSigma1.shape[1]
        tSigma1_orig = tSigma1
        tSigma2_orig = tSigma2

        if d is None:
            d = 3

        # Average covariance across trials before computing whitening matrix
        Sigma1 = np.mean(tSigma1, axis=0)
        Sigma2 = np.mean(tSigma2, axis=0)
        P = fractional_matrix_power(Sigma1 + Sigma2 + (np.eye(Sigma1.shape[0])*1e-10), -0.5) # TODO: P should be symmetric. check.
        R = generate_random_rotation_matrix(n_features)

        print P

        # Whiten the covariances
        wSigma1 = left_right_multiply_covariance(tSigma1, P)
        wSigma2 = left_right_multiply_covariance(tSigma2, P)
        tSigma1 = left_right_multiply_covariance(wSigma1, R)
        tSigma2 = left_right_multiply_covariance(wSigma2, R)

        prev_obj_value = np.finfo(np.float).min
        for i in range(self.n_iter):

            obj_value = self.objective_func(R, wSigma1, wSigma2, d)
            gradient = self.gradient_func(R, P, tSigma1_orig, tSigma2_orig, d)
            t = self._compute_optimal_step_size(gradient, obj_value, wSigma1, wSigma2, d)

            # TODO: Figure out what the line search algorithm should be.
            if t != 0:
                U = expm(t * gradient)
                R = np.dot(U, R)

                tSigma1_orig = left_right_multiply_covariance(tSigma1_orig, U)
                tSigma2_orig = left_right_multiply_covariance(tSigma2_orig, U)

            # TODO: I don't think you need the abs, because the objective
            # function should keep increasing each iteration.
            if np.abs(obj_value - prev_obj_value) < self.tolerance:
                print "Converged with objective value: {}".format(obj_value)
                break

            prev_obj_value = obj_value

            if self.verbose:
                print "Iteration {}/{}; Objective: {}".format(i+1, self.n_iter, prev_obj_value)

        # Take first `d' eigenvectors
        Sigma1 = np.mean(tSigma1, axis=0) # TODO: not sure if correct to average across trials here
        V = np.dot(R, P)[:d,:].T

        print V

        W, G = np.linalg.eigh(np.dot(V.T, Sigma1), V)
        W, G = self._sort_eig_descending(W, G)
        assert G.shape == (d,d)

        V_star = np.dot(V, G)
        return V_star