コード例 #1
0
def jacobian_vect(odometry_vector, ndt_cloud, test_pc):
    """
    Function to return the Jacobian of the likelihood objective for the odometry calculation
    :param odometry_vector: The point at which the Jacobian is to be evaluated
    :param ndt_cloud: The NDT cloud with respect to which the odometry is required
    :param test_pc: The point cloud for which the optimization is being performed
    :return: jacobian_val: The Jacobian matrix (of the objective w.r.t the odometry vector)
    """
    jacobian_val = np.zeros(6)
    transformed_pc = utils.transform_pc(odometry_vector, test_pc)
    points_in_voxels = ndt_cloud.bin_in_voxels(transformed_pc)
    for key, value in points_in_voxels.items():
        if key in ndt_cloud.stats:
            # Vectorized implementation
            mu = ndt_cloud.stats[key]['mu']
            sigma = ndt_cloud.stats[key]['sigma']
            sigma_inv = np.linalg.inv(sigma)
            q = value - mu
            if q.ndim < 2:
                q = np.atleast_2d(q)
            delq_delt = find_delqdelt_vect(odometry_vector, value)
            first_term = np.matmul(q, np.matmul(sigma_inv, delq_delt.T))
            exp_term = np.exp(-0.5 *
                              np.diag(np.matmul(q, np.matmul(sigma_inv, q.T))))
            g = np.sum(np.diagonal(first_term, axis1=1, axis2=2) * exp_term,
                       axis=1)
            jacobian_val += g
    global jacob_neval
    jacob_neval += 1
    return jacobian_val
コード例 #2
0
def interp_objective(odometry_vector, ndt_cloud, test_pc):
    """
    Combine interpolated likelihood calculation with point cloud transformation to obtain objective function
    :param odometry_vector: Candidate odometry vector
    :param ndt_cloud: The NDT cloud with respect to which the odometry is required
    :param test_pc: Input point cloud
    :return: objective_val: Maximization objective which is the likelihood of transformed point cloud for the given NDT
    """
    assert (ndt_cloud.cloud_type == 'interpolate')
    global obj_neval
    global jacob_neval
    global hess_neval
    transformed_pc = utils.transform_pc(odometry_vector, test_pc)
    obj_value = -1 * ndt_cloud.find_likelihood(transformed_pc)
    if hess_neval % 5 == 0:
        print('Objective iteration: {:4d}'.format(obj_neval),
              'Jacobian iteration: {:4d}'.format(jacob_neval),
              'Hessian iteration: {:4d}'.format(hess_neval),
              'Objective Value: {:10.4f}'.format(obj_value))
    """
    , ' Odometry:',
          ' x: {:2.5f}'.format(odometry_vector[0]), ' y: {:2.5f}'.format(odometry_vector[1]),
          ' z: {:2.5f}'.format(odometry_vector[2]), ' Phi: {:2.5f}'.format(odometry_vector[3]),
          ' Theta:{:2.5f}'.format(odometry_vector[4]), ' Psi: {:2.5f}'.format(odometry_vector[5])
    """
    obj_neval += 1
    return obj_value
コード例 #3
0
def search_initial(ndt_cloud, test_pc, limit=0.5, case_num=10):
    """
    Returns an initial x, y guess based on a global grid evaluation of the objective function
    :param ndt_cloud:
    :param test_pc:
    :param limit: The search limit for the grid search (about the origin)
    :param case_num: Number of x and y coordinates (respectively) for which the grid search is performec
    :return: initial_odom: Initial guess of the minimizing point
    """
    t0 = time.time()
    print('Searching for initial point')
    x_search = np.linspace(-limit, limit, case_num)
    grid_objective = np.zeros([case_num, case_num])
    y_search = np.linspace(-limit, limit, case_num)
    for i in range(case_num):
        for j in range(case_num):
            odom = np.zeros(6)
            odom[0] = x_search[i]
            odom[1] = y_search[j]
            transformed_pc = utils.transform_pc(odom, test_pc)
            grid_objective[i, j] = ndt_cloud.find_likelihood(transformed_pc)
    x_ind, y_ind = np.unravel_index(np.argmax(grid_objective),
                                    [case_num, case_num])
    initial_odom = np.array([x_search[x_ind], y_search[y_ind], 0, 0, 0, 0])
    print('The search time is ', time.time() - t0)
    print('The initial odometry is ', initial_odom)
    return initial_odom
コード例 #4
0
 def get_ref_points(self, samplept, r, noise_level, model_resolution, flip_nr=False):
     
     i = self.tree.query_ball_point(samplept[0:3], r=r)
     distances, indices = self.tree.query(samplept[0:3], k=len(i))
     
     #query_time = time.time() - start_time
     #print "query time: {0:.2f}".format(query_time)
     local_points1 = self.data[indices]
     
     #if local_points1.shape[0] > 5000:
     #    local_points1, _ = Sampler.sample(local_points1, -1, 5000, sampling_algorithm=SampleAlgorithm.Uniform)
     
     
     if noise_level > 0:
         local_points = utils.add_noise_normal(local_points1, mr=model_resolution, std=noise_level)
     else:
         local_points = local_points1
     #print "local points shape: ", local_points.shape
     
     if len(i) <= 8:
         return []
     if self.use_point_as_mean:
         mu = samplept[0:3]
     else:
         #TODO: compute real mean
         mu = np.zeros(3)
         point_count = local_points.shape[0]
         mu[0] = np.sum(local_points[:, 0]) / point_count
         mu[1] = np.sum(local_points[:, 1]) / point_count
         mu[2] = np.sum(local_points[:, 2]) / point_count
     if (self.use_normals_pc): #and type(self.normals).__module__ == np.__name__: #(self.normals != None):
         print "meshlab normal"
         min_d = np.argmin(distances)
         #if distances[min_d] > 0:
         #    raise
         nr = self.normals[indices[min_d]] 
         #nr = samplept[3:6]
         #TODO: get_weighted_normal
     else:# calc normals
         cov_mat = utils.build_cov(local_points, mean=mu)
         w, v = LA.eigh(cov_mat)
         min_id = np.argmin(w)
         #print 'eigenvalues: ', w
         nr1 = np.transpose(v[:, min_id])
         nr = utils.normalize(nr1)
         if flip_nr:
             nr = [-x for x in nr]
         
     z_axis = np.array([0, 0, 1])
     origin = np.zeros(3)
     local_pose = utils.align_vectors(mu, nr, origin, z_axis)
     ref_points = utils.transform_pc(local_points, pose=local_pose)
     return ref_points, local_points
コード例 #5
0
def odometry(ndt_cloud,
             test_pc,
             max_iter_pre=15,
             max_iter_post=10,
             integrity_filter=0.7):
    """
    Function to find the best transformation (in the form of a translation, Euler angle vector)
    :param ndt_cloud: Reference NDT point cloud.
    :param test_pc: Point cloud which is to be matched with the ndt_cloud.
    :param max_iter_pre: Maximum number of iterations before removing low consensus voxels.
    :param max_iter_post: Maximum number of iterations after removing low consensus voxels.
    :param integrity_filter: Value of voxel consensus metric below which voxels are removed.
    :return odom_vector: Odometry vector as a result of optimization
    """
    global obj_neval
    obj_neval = 0
    global jacob_neval
    jacob_neval = 0
    global hess_neval
    hess_neval = 0
    test_xyz = test_pc[:, :3]
    initial_odom = np.zeros(6)
    res1 = minimize(objective,
                    initial_odom,
                    method='Newton-CG',
                    jac=jacobian_vect,
                    hess=hessian_vect,
                    args=(ndt_cloud, test_xyz),
                    options={
                        'disp': True,
                        'maxiter': max_iter_pre
                    })
    temp_odom_vector = res1.x
    transformed_xyz = utils.transform_pc(temp_odom_vector, test_xyz)
    ndt_cloud.find_integrity(transformed_xyz)
    ndt_cloud.filter_voxels_integrity(integrity_limit=integrity_filter)
    if max_iter_post != 0:
        res2 = minimize(objective,
                        temp_odom_vector,
                        method='Newton-CG',
                        jac=jacobian_vect,
                        hess=hessian_vect,
                        args=(ndt_cloud, test_xyz),
                        options={
                            'disp': True,
                            'maxiter': max_iter_post
                        })
        odom_vector = res2.x
    else:
        odom_vector = np.copy(temp_odom_vector)
    # Return odometry in navigational frame of reference
    return odom_vector
コード例 #6
0
def process_rotation(dir_temp, X, rot_i, global_i, ref_points, num_rotations, r, noise_i, relR_i, nr_i, augs, thetas, z_axis, patch_dim):
    aug_num = augs[noise_i, relR_i, rot_i, nr_i]
    theta = thetas[rot_i]
    rot2d = utils.angle_axis_to_rotation(theta, z_axis)
    rot_points = utils.transform_pc(ref_points, rot2d)
    use_tsdf = False
    #rz = np.max ([np.max(ref_points[:, 2]), -np.min(ref_points[:, 2])])
    rz = r
    xs1 = np.asarray(((rot_points[:, 0] + r) / (2 * r))*(patch_dim - 1), np.int16)
    ys1 = np.asarray(((rot_points[:, 1] + r) / (2 * r))*(patch_dim - 1), np.int16)
    zs1 = np.asarray(((rot_points[:, 2] + rz) / (2 * rz))*(patch_dim - 1), np.int16)
    above32 = np.logical_and (np.logical_and(xs1<patch_dim, ys1<patch_dim), zs1<patch_dim) 
    xs = xs1[above32]
    ys = ys1[above32]
    zs = zs1[above32]
    X1 = np.zeros((patch_dim, patch_dim, patch_dim, 1), np.int8)
    X1[xs, ys, zs, 0] = 1
    #np.save(dir_temp + "sample_" + str(global_i) + "_" + str(aug_num), X1)
    
    """
    return None
    try:
        X1[xs, ys, zs, 0] = 1
        #np.save(dir_temp + "sample_" + str(global_i) + "_" + str(aug_num), X1)
        #print "file saved: ", "sample_" + str(global_i) + "_" + str(aug_num)
    except IndexError as inst:
        print ("index out of range")
        print inst
        for rot_pt in rot_points:               
            x = int(((rot_pt[0] + r) / (2 * r))*(patch_dim - 1))
            y = int(((rot_pt[1] + r) / (2 * r))*(patch_dim - 1))
            z = int(((rot_pt[2] + rz) / (2 * rz))*(patch_dim - 1))
            if (z >= 0) and (z < patch_dim) and (y >= 0) and (y < patch_dim) and (x >= 0) and (x < patch_dim):
                X1[x, y, z, 0] = 1
        #np.save(dir_temp + "sample_" + str(global_i) + "_" + str(aug_num), X1)
    except Exception as inst:
        print ("Unexpected exception: ", type(inst))
        print(inst.args)
        print(inst)
        #print ("Exception message: ", inst)
        raise
        """
        
    if(use_tsdf):
        X_tsdf = np.zeros((patch_dim, patch_dim, patch_dim, 1), np.float)
        X_tsdf[:, :, :, 0] = ndimage.morphology.distance_transform_edt(1 - X1[:, :, :, 0])
        X_tsdf[:, :, :, 0] /= np.max(X_tsdf[:, :, :, 0])
        np.save(dir_temp + "sample_" + str(global_i) + "_" + str(aug_num), X_tsdf)
    else:
        np.save(dir_temp + "sample_" + str(global_i) + "_" + str(aug_num), X1)    
コード例 #7
0
 def read_ply(self, file_name):
     num_samples = self.num_samples // len(self.files_list)
     if self.file_index == len(self.files_list) - 1:
         num_samples = num_samples + (self.num_samples - (num_samples * len(self.files_list)))
     
     root, ext = os.path.splitext(file_name)
     if not os.path.isfile(root + ".npy"):
         ply = PlyData.read(file_name)
         vertex = ply['vertex']
         (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
         points = zip(x.ravel(), y.ravel(), z.ravel())
         np.save(root + ".npy", points)
     else:
         points = np.load(root + ".npy")
     
     #load normals
     if os.path.isfile(root + "_normals" + ".ply"):
         if not os.path.isfile(root + "_normals" + ".npy"):
             ply1 = PlyData.read(root + "_normals" + ".ply")
             vertex = ply1['vertex']
             (nx, ny, nz) = (vertex[t] for t in ('nx', 'ny', 'nz'))
             self.normals = np.asarray(zip(nx.ravel(), ny.ravel(), nz.ravel()))
             np.save(root + "_normals" + ".npy", self.normals)
         else:
             self.normals = np.load(root + "_normals" + ".npy")
     
     if self.add_noise:
         self.data = utils.add_noise_normal(points, std=self.nois_std)
     else:
         self.data = np.asarray(points)
     
     self.pc_diameter = utils.get_pc_diameter(self.data)
     self.l = self.relL*self.pc_diameter
     
     rot = utils.angle_axis_to_rotation(self.rotation_angle, self.rotation_axis)
     self.data = utils.transform_pc(self.data, rot)
     
     #plotutils.show_pc(self.data)
     #mlab.show()
             
     #TODO: better sampling
     print "sampling file: ", file_name
     self.samples, self.sample_indices = Sampler.sample(self.data, -1, min_num_point=-1, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     #self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     #self.samples = self.samples[0:num_samples]
     #self.sample_indices = self.sample_indices[0:num_samples]
     
     self.tree = spatial.KDTree(self.data)
     return self.data
コード例 #8
0
ファイル: mapping.py プロジェクト: mfkiwl/consensus-ndt
def combine_pc_for_map(keyframe_pcs, mapping_odom, map_ndt):
    """
    Function to update the map using the mapping solution
    :param keyframe_pcs: PCs belonging to the current keyframe
    :param mapping_odom: Solution to the mapping objective function
    :param map_ndt: NDT approximation for the map upto the previous keyframe
    :return map_ndt: Updated parameter map_ndt with the given odometry function
    """
    mapping_odom = np.atleast_2d(mapping_odom)
    for idx, pc in enumerate(keyframe_pcs):
        # Transform points to the LiDAR's reference when the measurements were taken
        pc = pc[:, :3]  # To trim the pointcloud incase it isn't trimmed
        inv_odom = utils.invert_odom_transfer(mapping_odom[idx, :])
        trans_pc = utils.transform_pc(inv_odom, pc)
        map_ndt.update_cloud(trans_pc)
    return map_ndt
コード例 #9
0
def interp_jacobian(odometry_vector, ndt_cloud, test_pc):
    """    
    Compute Jacobian of the interpolated likelihood objective for the odometry calculation (non-vectorized)
    :param odometry_vector: The point at which the Jacobian is to be evaluated
    :param ndt_cloud: The NDT cloud with respect to which the odometry is required
    :param test_pc: The point cloud for which the optimization is being performed
    :return: jacob_val: The Jacobian matrix (of the objective w.r.t the odometry vector)
    """
    assert (ndt_cloud.cloud_type == 'interpolate')
    N = test_pc.shape[0]
    transformed_xyz = utils.transform_pc(odometry_vector, test_pc[:, :3])
    neighbours = ndt_cloud.find_neighbours(transformed_xyz)
    weights = ndt_cloud.find_interp_weights(transformed_xyz, neighbours)[:, :N]

    vect_nearby_init = np.array(np.hsplit(neighbours, 8))
    vert_stack = np.reshape(vect_nearby_init, [N * 8, 3])
    vert_idx = ndt_cloud.pairing_cent2int(vert_stack)
    vect_nearby_idx = np.reshape(vert_idx.T, [8, N]).T
    vect_mus = np.zeros([8, N, 3, 1])
    vect_inv_sigmas = 10000 * np.ones([8, N, 3, 3])

    delq_delt = find_delqdelt_vect(odometry_vector, transformed_xyz)
    # shape N, 3, 6

    for key in ndt_cloud.stats:
        indices = vect_nearby_idx == ndt_cloud.stats[key]['idx']
        mu = ndt_cloud.stats[key]['mu']
        inv_sigma = np.linalg.inv(ndt_cloud.stats[key]['sigma'])
        vect_mus[indices.T, :, 0] = mu
        vect_inv_sigmas[indices.T, :, :] = inv_sigma
    diff = np.zeros_like(vect_mus)
    diff[:, :, :,
         0] = -vect_mus[:, :, :,
                        0] + transformed_xyz[:N, :]  # shape 8, N, 3, 1
    diff_transpose = np.transpose(diff, [0, 1, 3, 2])  # shape 8, N, 1, 3
    lkds = np.exp(-0.5 * np.matmul(np.matmul(diff_transpose, vect_inv_sigmas),
                                   diff))[:, :, 0, 0]  # shape 8, N
    wgt_lkd = weights * lkds  # shape 8, N
    vect_wgt_lkd = np.repeat(wgt_lkd[:, :, np.newaxis, np.newaxis], 6, axis=3)
    # shape 8, N, 1, 6 (likelihood value repeated along last axis)
    vect_delq_delt = np.transpose(
        np.repeat(delq_delt[:, :, :, None], 8, axis=3),
        (3, 0, 1, 2))  # shape 8, N, 3, 6
    vect_jacob = vect_wgt_lkd * np.matmul(
        np.matmul(diff_transpose, vect_inv_sigmas), vect_delq_delt)
    jacob_val = np.sum(np.sum(vect_jacob[:, :, 0, :], axis=0), axis=0)
    return jacob_val
コード例 #10
0
def hessian_vect(odometry_vector, ndt_cloud, test_pc):
    """
    Vectorized implementation of the function to return an approximation of the Hessian of the likelihood objective
    for the odometry calculation
    :param odometry_vector: The point at which the Hessian is evaluated
    :param ndt_cloud: The NDT cloud with respect to which the odometry is required
    :param test_pc: The point cloud for which the optimization is being carried out
    :return: hessian_val: The Hessian matrix of the objective w.r.t. the odometry vector
    """
    hessian_val = np.zeros([6, 6])
    transformed_pc = utils.transform_pc(odometry_vector, test_pc)
    points_in_voxels = ndt_cloud.bin_in_voxels(transformed_pc)
    for key, value in points_in_voxels.items():
        if key in ndt_cloud.stats:
            if value.ndim == 1:
                value = np.atleast_2d(value)
            mu = ndt_cloud.stats[key]['mu']
            sigma = ndt_cloud.stats[key]['sigma']
            sigma_inv = np.linalg.inv(sigma)
            q = value - mu
            delq_delt = find_delqdelt_vect(odometry_vector, value)
            del2q_deltnm = find_del2q_deltnm_vect(odometry_vector, value)
            exp_term = np.diag(
                np.exp(-0.5 * np.matmul(q, np.matmul(sigma_inv, q.T))))
            temp1 = np.einsum('abi,jbc->aijc', delq_delt,
                              np.matmul(sigma_inv, delq_delt.T))
            term1 = np.sum(np.diagonal(temp1, axis1=0, axis2=3) * exp_term,
                           axis=2)
            term2 = np.sum(
                np.diagonal(np.matmul(q, np.matmul(sigma_inv, del2q_deltnm.T)),
                            axis1=2,
                            axis2=3) * exp_term,
                axis=2)
            temp3 = np.diagonal(
                -np.matmul(q, np.matmul(sigma_inv, delq_delt.T)),
                axis1=1,
                axis2=2) * exp_term
            temp4 = np.diagonal(np.matmul(q, np.matmul(sigma_inv,
                                                       delq_delt.T)),
                                axis1=1,
                                axis2=2)
            term3 = np.matmul(temp3, temp4.T)
            temp_hess = term1 + term2 + term3
            hessian_val += temp_hess
    global hess_neval
    hess_neval += 1
    return hessian_val
コード例 #11
0
ファイル: PlyReader.py プロジェクト: hassnov/Conv3D
    def read_ply(self, file_name, num_samples=1000, sample_class_start=0, add_noise =False,
                  noise_prob=0.3, noise_factor=0.02, noise_std=0.1, sampling_algorithm=SampleAlgorithm.Uniform,
                  rotation_axis=[0, 0, 1], rotation_angle=0):
         
        root, ext = os.path.splitext(file_name)
        if not os.path.isfile(root + ".npy"):
            ply = PlyData.read(file_name)
            vertex = ply['vertex']
            (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
            points = zip(x.ravel(), y.ravel(), z.ravel())
            np.save(root + ".npy", points)
        else:
            points = np.load(root + ".npy")
        
        #load normals
        if os.path.isfile(root + "_normals" + ".ply"):
            if not os.path.isfile(root + "_normals" + ".npy"):
                ply1 = PlyData.read(root + "_normals" + ".ply")
                vertex = ply1['vertex']
                (nx, ny, nz) = (vertex[t] for t in ('nx', 'ny', 'nz'))
                self.normals = np.asarray(zip(nx.ravel(), ny.ravel(), nz.ravel()))
                np.save(root + "_normals" + ".npy", self.normals)
            else:
                self.normals = np.load(root + "_normals" + ".npy")
        
        if add_noise:
            print "adding noise to model.."
            mr = utils.model_resolution(np.array(points))
            #mr = 0.404
            print "model resolution: ", mr
            self.data = utils.add_noise_normal(np.array(points), mr, noise_std)
        else:
            self.data = np.asarray(points)
        rot = utils.angle_axis_to_rotation(rotation_angle, rotation_axis)
        self.data = utils.transform_pc(self.data, rot)
        #plotutils.show_pc(self.data)
        #mlab.show()
#TODO: better sampling
        self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples-1, file_name=file_name, pose=rot, sampling_algorithm=sampling_algorithm)
        self.tree = spatial.KDTree(self.data) 
        self.sample_class_start = sample_class_start
        self.sample_class_current = sample_class_start
        self.num_samples = self.samples.shape[0]
        print "num samples: ", self.num_samples
        logging.basicConfig(filename='example.log',level=logging.DEBUG)
        return self.data
コード例 #12
0
def find_del2q_deltnm(odometry_vector, q):
    """
    Function to return double partial derivative of point w.r.t. odometry vector parameters
    :param odometry_vector:
    :param q: Initial point
    :return:
    """
    del2q_deltnm = np.zeros([3, 6, 6])
    delta = 1.5e-08
    for i in range(6):
        odometry_new = np.zeros(6)
        odometry_new[i] = odometry_new[i] + delta
        q_new = np.transpose(utils.transform_pc(odometry_new, q))
        # Assuming that the incremental change allows us to directly add a rotation instead of an incremental change
        odometry_new += odometry_vector
        del2q_deltnm[:, :, i] = (find_delqdelt(odometry_new, q_new.T) -
                                 find_delqdelt(odometry_vector, q)) / delta
    return del2q_deltnm
コード例 #13
0
ファイル: PlyReader.py プロジェクト: hassnov/Conv3D
    def patch_3d(self, point_in):
        X = np.zeros((1, self.patch_dim, self.patch_dim, self.patch_dim, 1), np.int32)
        Y = np.zeros((1), np.int32)
        r = self.l / np.sqrt(2)
        i = self.tree.query_ball_point(point_in[0:3], r=r)
        distances, indices = self.tree.query(point_in[0:3], k=len(i))
        local_points = self.data[indices]
        if len(i) <= 8:
            return None
        if self.use_point_as_mean:
            mu = point_in[0:3]
        else:
            #TODO: compute real mean
            mu = point_in[0:3]
        if (self.use_normals_pc) and (point_in.shape == 6):
            nr = point_in[3:6]
            #TODO: get_weighted_normal
        else:# calc normals
            cov_mat = utils.build_cov(local_points, mean=mu)
            w, v = LA.eigh(cov_mat)
            min_id = np.argmin(w)
            nr = np.transpose(v[:, min_id])
            nr = utils.normalize(nr)
            #nr = [-x for x in nr]
# at this point the plane is defined by (mu, nr)
# we transform the mean to be (0,0,0) and the normal to be  (0,0,1)
# to obtain canonical frame
        z_axis = np.array([0, 0, 1])
        origin = np.zeros(3)
        local_pose = utils.align_vectors(mu, nr, origin, z_axis)
        ref_points = utils.transform_pc(local_points, pose=local_pose)

        #theta = -np.pi
        #rot2d = utils.angle_axis_to_rotation(theta, z_axis)
        #rot_points = utils.transform_pc(ref_points, rot2d)
        for ref_pt in ref_points:
            x = int(((ref_pt[0] + self.l) / (2 * self.l))*(self.patch_dim - 1))
            y = int(((ref_pt[1] + self.l) / (2 * self.l))*(self.patch_dim - 1))
            z = int(((ref_pt[2] + self.l) / (2 * self.l))*(self.patch_dim - 1))
            if (z >= 0) and (z < self.patch_dim) and (y >= 0) and (y < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                X[0, x, y, z, 0] = 1
        Y[0] = -1
        return X, Y
コード例 #14
0
def objective(odometry_vector, ndt_cloud, test_pc):
    """
    A function that combines functions for transformation of a point cloud and the computation of likelihood (score)
    :param odometry_vector: Candidate odometry vector
    :param ndt_cloud: The NDT cloud with respect to which the odometry is required
    :param test_pc: Input point cloud
    :return: objective_val: Maximization objective which is the likelihood of transformed point cloud for the given NDT
    """
    global obj_neval
    global jacob_neval
    global hess_neval
    transformed_pc = utils.transform_pc(odometry_vector, test_pc)
    obj_value = -1 * ndt_cloud.find_likelihood(transformed_pc)
    if hess_neval % 5 == 0:
        print('Objective iteration: {:4d}'.format(obj_neval),
              'Jacobian iteration: {:4d}'.format(jacob_neval),
              'Hessian iteration: {:4d}'.format(hess_neval),
              'Objective Value: {:10.4f}'.format(obj_value))
    obj_neval += 1
    return obj_value
コード例 #15
0
ファイル: sampling.py プロジェクト: hassnov/Conv3D
 def sample_ISS(file_name, min_num_point, pose):
     root, ext = os.path.splitext(file_name)
     in_file = root + ".ply"
     out_file = root + "_iss.ply"
     if (not os.path.isfile(out_file)):
         print "file doen't exits.................."
         args = ["./iss_detect", in_file, out_file]
         popen = subprocess.Popen(args, stdout=subprocess.PIPE)
         popen.wait()
         output = popen.stdout.read()
         print output
     pc = np.load(root + '.npy')
     tree = spatial.KDTree(pc)
     ply = PlyData.read(out_file)
     vertex = ply['vertex']
     (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
     pc_iss = np.asarray(zip(x.ravel(), y.ravel(), z.ravel()))
     indices = np.zeros((pc_iss.shape[0],))
     for pt_i, samplept in enumerate(pc_iss):
         _, index = tree.query(samplept, k=1)
         indices[pt_i] = index    
     pc_iss = utils.transform_pc(pc_iss, pose)
     
     #min_num_point = min(int(pc_iss.shape[0] / 10), 200)
     if min_num_point < 0:
         #min_num_point = min(int(pc_iss.shape[0] / 10), 200)
         #min_num_point = min(int(pc_iss.shape[0] / 1), 300)
         #min_num_point = min(int(pc_iss.shape[0] / 1), 500)
         min_num_point = int(pc_iss.shape[0] / 1) 
         
     if min_num_point >= pc_iss.shape[0]:
         return pc_iss, indices
     
     sample_step = int(pc_iss.shape[0] / min_num_point)
     pc_iss_samples, _ = Sampler.sample_uniform(pc_iss, sample_step)
     indices_samples, _ = Sampler.sample_uniform(indices, sample_step)
     assert(pc_iss_samples.shape[0] == indices_samples.shape[0])
     print ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,pc_iss shape:", pc_iss_samples.shape
     return pc_iss_samples, indices_samples
コード例 #16
0
ファイル: plyTest.py プロジェクト: hassnov/Conv3D
def create_ref_points_files(pc, sample_points, l, path='ref_points/', use_normals_pc=True, use_point_as_mean=False, flip_view_point=False, sigma=0.7071):
    r = l / np.sqrt(2)
    print 'r= ', r
    tree = spatial.KDTree(pc)
    for point_number, samplept in enumerate(sample_points):
        start_time = time.time()
        i = tree.query_ball_point(samplept[0:3], r=r)
        distances, indices = tree.query(samplept[0:3], k=len(i))
        local_points = pc[indices]
        if len(i) <= 8:
            continue
        if use_point_as_mean:
            mu = samplept[0:3]
        else:
#TODO: compute real mean
            mu = samplept[0:3]
        if (use_normals_pc) and (samplept.shape == 6):
            nr = samplept[3:6]
#TODO: get_weighted_normal
        else:# calc normals
            cov_mat = utils.build_cov(local_points, mean=mu)
            w, v = LA.eigh(cov_mat)
            min_id = np.argmin(w)
            nr = np.transpose(v[:, min_id])
            nr = utils.normalize(nr)
        
# at this point the plane is defined by (mu, nr)
# we transform the mean to be (0,0,0) and the normal to be  (0,0,1)
# to obtain canonical frame
        z_axis = np.array([0, 0, 1])
        origin = np.zeros(3)
        local_pose = utils.align_vectors(mu, nr, origin, z_axis)
        ref_points = utils.transform_pc(local_points, pose=local_pose)
        fname = 'ref_' + str(point_number)
        fname = os.path.join(path, fname)
        #np.save(fname, ref_points)
        duration = time.time() - start_time
        print 'duration: ', duration
コード例 #17
0
def find_del2q_deltnm_vect(odometry_vector, points):
    """
    Vectorized implementation of function to return double partial derivative of point w.r.t. odometry vector parameters
    :param odometry_vector: Vector at which Hessian is being calculated
    :param q: Points which are being compared to the NDT Cloud
    :return: del2q_deltnm: Nx3x6x6 matrix of del2/deltndeltm
    """
    N = points.shape[0]
    del2q_deltnm_old = np.zeros([N, 3, 6, 6])
    delta = 1.5e-08
    original_delq_delt = find_delqdelt_vect(odometry_vector, points)
    for i in range(6):
        odometry_new = np.zeros(6)
        odometry_new[i] = odometry_new[i] + delta
        points_new = utils.transform_pc(odometry_new, points)
        # Assuming that the incremental change allows us to directly add a rotation instead of an incremental change
        odometry_new += odometry_vector
        del2q_deltnm_old[:, :, :,
                         i] = (find_delqdelt_vect(odometry_new, points_new) -
                               original_delq_delt) / delta
    del2q_deltnm_new = np.zeros([N, 3, 6, 6])
    delta = 1.5e-8
    return del2q_deltnm_old
コード例 #18
0
def compute_rot_sym(obb1, obb2):
    pc1_center = obb1[:3]
    pc2_center = obb2[:3]
    rotmat1 = Quaternion(obb1[6], obb1[7], obb1[8], obb1[9]).rotation_matrix
    rotmat2 = Quaternion(obb2[6], obb2[7], obb2[8], obb2[9]).rotation_matrix
    pc1 = transform_pc(unit_cube, torch.tensor(obb1,
                                               dtype=torch.float32)).numpy()
    pc2 = transform_pc(unit_cube, torch.tensor(obb2,
                                               dtype=torch.float32)).numpy()

    min_error = 1e8
    min_pt = None
    min_nor = None
    min_angle = None
    for axe_id in range(3):
        pc1_axis1 = rotmat1[axe_id]
        pc1_axis2 = rotmat1[(axe_id + 1) % 3]
        pc2_axis1 = rotmat2[axe_id]
        pc2_axis2 = rotmat2[(axe_id + 1) % 3]

        pt, nor, angle = compute_params(pc1_center, pc2_center,
                                        pc1_center + pc1_axis1,
                                        pc1_center + pc1_axis2,
                                        pc2_center + pc2_axis1,
                                        pc2_center + pc2_axis2)
        new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
        error = get_chamfer_distance(new_pc1, pc2)
        if error < min_error:
            min_error = error
            min_pt = pt
            min_nor = nor
            min_angle = angle

        pt, nor, angle = compute_params(pc1_center, pc2_center,
                                        pc1_center + pc1_axis1,
                                        pc1_center + pc1_axis2,
                                        pc2_center - pc2_axis1,
                                        pc2_center + pc2_axis2)
        new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
        error = get_chamfer_distance(new_pc1, pc2)
        if error < min_error:
            min_error = error
            min_pt = pt
            min_nor = nor
            min_angle = angle

        pt, nor, angle = compute_params(pc1_center, pc2_center,
                                        pc1_center + pc1_axis1,
                                        pc1_center + pc1_axis2,
                                        pc2_center + pc2_axis1,
                                        pc2_center - pc2_axis2)
        new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
        error = get_chamfer_distance(new_pc1, pc2)
        if error < min_error:
            min_error = error
            min_pt = pt
            min_nor = nor
            min_angle = angle

        pt, nor, angle = compute_params(pc1_center, pc2_center,
                                        pc1_center + pc1_axis1,
                                        pc1_center + pc1_axis2,
                                        pc2_center - pc2_axis1,
                                        pc2_center - pc2_axis2)
        new_pc1 = atob_rot_sym(pc1, pt, nor, angle)
        error = get_chamfer_distance(new_pc1, pc2)
        if error < min_error:
            min_error = error
            min_pt = pt
            min_nor = nor
            min_angle = angle

    s = np.sin(min_angle)
    c = np.cos(min_angle)
    nx = min_nor[0]
    ny = min_nor[1]
    nz = min_nor[2]
    rotmat = np.array([[c + (1 - c) * nx * nx, (1 - c) * nx * ny - s * nz, (1 - c) * nx * nz + s * ny], \
                       [(1 - c) * nx * ny + s * nz, c + (1 - c) * ny * ny, (1 - c) * ny * nz - s * nx], \
                       [(1 - c) * nx * nz - s * ny, (1 - c) * ny * nz + s * nx, c + (1 - c) * nz * nz]], dtype=np.float32)
    mat1to2 = np.zeros((3, 4), dtype=np.float32)
    mat1to2[:3, :3] = rotmat
    mat1to2[:, 3] = np.dot(np.eye(3) - rotmat, min_pt)

    s = -s
    rotmat = np.array([[c + (1 - c) * nx * nx, (1 - c) * nx * ny - s * nz, (1 - c) * nx * nz + s * ny], \
                       [(1 - c) * nx * ny + s * nz, c + (1 - c) * ny * ny, (1 - c) * ny * nz - s * nx], \
                       [(1 - c) * nx * nz - s * ny, (1 - c) * ny * nz + s * nx, c + (1 - c) * nz * nz]], dtype=np.float32)
    mat2to1 = np.zeros((3, 4), dtype=np.float32)
    mat2to1[:3, :3] = rotmat
    mat2to1[:, 3] = np.dot(np.eye(3) - rotmat, min_pt)

    return mat1to2, mat2to1
コード例 #19
0
ファイル: PlyReader.py プロジェクト: hassnov/Conv3D
    def next_batch_2d(self, batch_size, num_rotations=20, num_classes=-1, num_channels=3):
        if num_classes == -1:
            num_classes = self.num_samples
        if self.index + batch_size < self.samples.shape[0]:
            pc_samples = self.samples[self.index:self.index+batch_size]
            self.index += batch_size
        else:
            pc_samples = self.samples[self.index:self.samples.shape[0]]
            self.index = self.index + batch_size -self.samples.shape[0]
            #self.sample_class_current = self.sample_class_start
            pc_samples = np.vstack((pc_samples, self.samples[0:self.index]))
            
        X = np.zeros((batch_size*  num_rotations, self.patch_dim, self.patch_dim, num_channels))
        Y = np.zeros((batch_size*  num_rotations))
        
        r = self.l / np.sqrt(2)
        
        for point_number, samplept in enumerate(pc_samples):
            i = self.tree.query_ball_point(samplept[0:3], r=r)
            distances, indices = self.tree.query(samplept[0:3], k=len(i))
            local_points = self.data[indices]
            if len(i) <= 8:
                continue
            if self.use_point_as_mean:
                mu = samplept[0:3]
            else:
                #TODO: compute real mean
                mu = samplept[0:3]
            if (self.use_normals_pc) and (samplept.shape == 6):
                nr = samplept[3:6]
                #TODO: get_weighted_normal
            else:# calc normals
                cov_mat = utils.build_cov(local_points, mean=mu)
                w, v = LA.eigh(cov_mat)
                min_id = np.argmin(w)
                nr = np.transpose(v[:, min_id])
                nr = utils.normalize(nr)
                #nr = [-x for x in nr]
            
    # at this point the plane is defined by (mu, nr)
    # we transform the mean to be (0,0,0) and the normal to be  (0,0,1)
    # to obtain canonical frame
            z_axis = np.array([0, 0, 1])
            origin = np.zeros(3)
            local_pose = utils.align_vectors(mu, nr, origin, z_axis)
            ref_points = utils.transform_pc(local_points, pose=local_pose)

            for aug_num, theta in enumerate(utils.my_range(-np.pi, np.pi, (2*np.pi)/num_rotations)):
                if aug_num == num_rotations:
                    break
                rot2d = utils.angle_axis_to_rotation(theta, z_axis)
                rot_points = utils.transform_pc(ref_points, rot2d)
                #plotutils.show_pc(rot_points)
                #plotutils.draw_normal(origin, z_axis)
                #mlab.show()
                #patch = np.zeros((self.patch_dim, self.patch_dim, self.patch_dim), dtype='int32')
                if num_channels == 3:
                    for rot_pt in rot_points:
                        #project on z
                        x = int(((rot_pt[0] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        y = int(((rot_pt[1] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        #x = int(self.patch_dim*(rot_pt[0] / self.l) + self.patch_dim / 2)
                        #y = int(self.patch_dim*(rot_pt[1] / self.l) + self.patch_dim / 2)
                        if (y >= 0) and (y < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                            X[point_number*num_rotations + aug_num, x, y, 2] = 1 # rot_pt[2]
                        
                        #project on y
                        #x = int(self.patch_dim*(rot_pt[0] / self.l) + self.patch_dim / 2)
                        #z = int(self.patch_dim*(rot_pt[2] / self.l) + self.patch_dim / 2)
                        x = int(((rot_pt[0] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        z = int(((rot_pt[2] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        if (z >= 0) and (z < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                            X[point_number*num_rotations + aug_num, z, x, 1] = 1 #rot_pt[1]
                        
                        #project on x
                        #y = int(self.patch_dim*(rot_pt[1] / self.l) + self.patch_dim / 2)
                        #z = int(self.patch_dim*(rot_pt[2] / self.l) + self.patch_dim / 2)
                        y = int(((rot_pt[1] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        z = int(((rot_pt[2] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        if (z >= 0) and (z < self.patch_dim) and (y >= 0) and (y < self.patch_dim):
                            X[point_number*num_rotations + aug_num, z, y, 0] = 1 #rot_pt[0]
                else:
                    for rot_pt in rot_points:
                        #project on z
                        #x = int(self.patch_dim*(rot_pt[0] / self.l) + self.patch_dim / 2)
                        #y = int(self.patch_dim*(rot_pt[1] / self.l) + self.patch_dim / 2)
                        x = int(((rot_pt[0] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        y = int(((rot_pt[1] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                        if (y >= 0) and (y < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                            X[point_number*num_rotations + aug_num, x, y, 0] = rot_pt[2]
                    #plt.imshow(X[point_number*num_rotations + aug_num].reshape([self.patch_dim, self.patch_dim,]), cmap = 'gray', interpolation = 'bicubic')
                    #plt.title('label: ' + str(self.sample_class_current % num_classes))
                    #plt.show()
                Y[point_number*num_rotations + aug_num] = self.sample_class_current % num_classes
                #patches.append(patch)
            self.sample_class_current += 1
        return X, Y
コード例 #20
0
ファイル: PlyReader2.py プロジェクト: hassnov/Conv3D
 def next_batch_3d(self, batch_size, num_rotations=20):
     logging.info('index: ' + str(self.index) + '    current_label: ' + str(self.sample_class_current % self.num_samples) )
     if self.index + batch_size <= self.samples.shape[0]:
         pc_samples = self.samples[self.index:self.index+batch_size]
         self.index += batch_size
     else:
         pc_samples = self.samples[self.index:self.samples.shape[0]]
         self.index = self.index + batch_size -self.samples.shape[0]
         self.next_file()
         pc_samples = np.vstack((pc_samples, self.samples[0:self.index]))
     
     X = np.zeros((batch_size *  num_rotations * self.nr_count, self.patch_dim, self.patch_dim, self.patch_dim, 1), np.int32)
     Y = np.zeros((batch_size *  num_rotations * self.nr_count), np.int32)
     
     #r = self.l / np.sqrt(2)
     r = self.l# / np.sqrt(2)
     for point_number, samplept in enumerate(pc_samples):
         
         i = self.tree.query_ball_point(samplept[0:3], r=r)
         _, indices = self.tree.query(samplept[0:3], k=len(i))
         #query_time = time.time() - start_time
         #print "query time: {0:.2f}".format(query_time)
         local_points = self.data[indices]
         #if local_points.shape[0] > 1000:
         #    local_points, _ = Sampler.sample(local_points, -1, 1000, sampling_algorithm=self.sampling_algorithm)
         if len(i) <= 8:
             continue
         if self.use_point_as_mean:
             mu = samplept[0:3]
         else:
             #TODO: compute real mean
             mu = np.zeros(3)
             point_count = local_points.shape[0]
             mu[0] = np.sum(local_points[:, 0]) / point_count
             mu[1] = np.sum(local_points[:, 1]) / point_count
             mu[2] = np.sum(local_points[:, 2]) / point_count
         if (self.use_normals_pc) and (samplept.shape == 6):
             nr = samplept[3:6]
             #TODO: get_weighted_normal
         else:# calc normals
             cov_mat = utils.build_cov(local_points, mean=mu)
             w, v = LA.eigh(cov_mat)
             min_id = np.argmin(w)
             #print 'eigenvalues: ', w
             nr1 = np.transpose(v[:, min_id])
             nr1 = utils.normalize(nr1)
             if self.nr_count > 1:
                 nr2 = [-x for x in nr1]
                 nrs = [nr1, nr2]
             else:
                 nrs = [nr1]
         
 # at this point the plane is defined by (mu, nr)
 # we transform the mean to be (0,0,0) and the normal to be  (0,0,1)
 # to obtain canonical frame
         z_axis = np.array([0, 0, 1])
         origin = np.zeros(3)
         for nr_num, nr in enumerate(nrs):
             local_pose = utils.align_vectors(mu, nr, origin, z_axis)
             ref_points = utils.transform_pc(local_points, pose=local_pose)
             #plotutils.show_pc(ref_points, np.zeros((1, 3)), mode='sphere', scale_factor=0.001)
             #mlab.show()
             start_time = time.time()
             for aug_num, theta in enumerate(utils.my_range(-np.pi, np.pi, (2*np.pi)/num_rotations)):
                 if aug_num == num_rotations:
                     break
                 rot2d = utils.angle_axis_to_rotation(theta, z_axis)
                 rot_points = utils.transform_pc(ref_points, rot2d)
                 #patch = np.zeros((self.patch_dim, self.patch_dim, self.patch_dim), dtype='int32')
                 #TODO: use numpy.apply_along_axis
                 rz = r / 3
                 Y[point_number*num_rotations*self.nr_count + aug_num + nr_num*num_rotations] = self.sample_class_current % self.num_samples
                 temp_file = 'temp/' + str(self.sample_class_current) + '_' + str(num_rotations) + '_' + str(aug_num) + '_nr'+ str(nr_num) +'.npy'
                 if os.path.isfile(temp_file):
                     X[point_number*num_rotations*self.nr_count + aug_num + nr_num*num_rotations] = np.load(temp_file)
                     #print 'file loaded: ', temp_file
                     continue
                 
                 for rot_pt in rot_points:
                     x = int(((rot_pt[0] + r) / (2 * r))*(self.patch_dim - 1))
                     y = int(((rot_pt[1] + r) / (2 * r))*(self.patch_dim - 1))
                     z = int(((rot_pt[2] + rz) / (2 * rz))*(self.patch_dim - 1))
                     if (z >= 0) and (z < self.patch_dim) and (y >= 0) and (y < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                         X[point_number*num_rotations*self.nr_count + aug_num + nr_num*num_rotations, x, y, z, 0] = 1
                     
                 #X[point_number*num_rotations + aug_num, :, :, :, 0] = ndimage.morphology.distance_transform_edt(1 - X[point_number*num_rotations + aug_num, :, :, :, 0])
                 #X[point_number*num_rotations + aug_num, :, :, :, 0] /= np.max(X[point_number*num_rotations + aug_num, :, :, :, 0])
                         
                 np.save(temp_file, X[point_number*num_rotations*self.nr_count + aug_num + nr_num*num_rotations])
                 
             #fig = plotutils.plot_patch_3D(X[point_number*num_rotations + 0], name='patch label ' + str(self.sample_class_current % num_classes))
             #plt.show()
             #TODO: start from start not 0 with sample_class_current                
         self.sample_class_current = (self.sample_class_current + 1) % self.num_samples
     return X, Y
コード例 #21
0
ファイル: PlyReader.py プロジェクト: hassnov/Conv3D
 def next_batch_3d_sdf(self, batch_size, num_rotations=20, num_classes=-1):
     if num_classes == -1:
         num_classes = self.num_samples
     logging.info('index: ' + str(self.index) + '    current_label: ' + str(self.sample_class_current % num_classes) )
     if self.index + batch_size < self.samples.shape[0]:
         pc_samples = self.samples[self.index:self.index+batch_size]
         self.index += batch_size
     else:
         pc_samples = self.samples[self.index:self.samples.shape[0]]
         self.index = self.index + batch_size -self.samples.shape[0]
         #self.sample_class_current = self.sample_class_start
         pc_samples = np.vstack((pc_samples, self.samples[0:self.index]))
     
     X = np.zeros((batch_size*  num_rotations, self.patch_dim, self.patch_dim, self.patch_dim, 1), np.float32)
     Y = np.zeros((batch_size*  num_rotations), np.int32)
     
     r = self.l #/ np.sqrt(2)
     rz = r / 3
     rz = r
     for point_number, samplept in enumerate(pc_samples):
         i = self.tree.query_ball_point(samplept[0:3], r=r)
         distances, indices = self.tree.query(samplept[0:3], k=len(i))
         local_points = self.data[indices]
         if len(i) <= 8:
             continue
         if self.use_point_as_mean:
             mu = samplept[0:3]
         else:
             #TODO: compute real mean
             mu = samplept[0:3]
             point_count = local_points.shape[0]
             mu[0] = np.sum(local_points[:, 0]) / point_count
             mu[1] = np.sum(local_points[:, 1]) / point_count
             mu[2] = np.sum(local_points[:, 2]) / point_count
         if (self.use_normals_pc) and (samplept.shape == 6):
             nr = samplept[3:6]
             #TODO: get_weighted_normal
         else:# calc normals
             cov_mat = utils.build_cov(local_points, mean=mu)
             w, v = LA.eigh(cov_mat)
             min_id = np.argmin(w)
             #print 'eigenvalues: ', w
             nr = np.transpose(v[:, min_id])
             nr = utils.normalize(nr)
             #nr = [-x for x in nr]
         
 # at this point the plane is defined by (mu, nr)
 # we transform the mean to be (0,0,0) and the normal to be  (0,0,1)
 # to obtain canonical frame
         z_axis = np.array([0, 0, 1])
         origin = np.zeros(3)
         local_pose = utils.align_vectors(mu, nr, origin, z_axis)
         ref_points = utils.transform_pc(local_points, pose=local_pose)
         #plotutils.show_pc(ref_points)
         #mlab.show()
         for aug_num, theta in enumerate(utils.my_range(-np.pi, np.pi, (2*np.pi)/num_rotations)):
             if aug_num == num_rotations:
                 break
             rot2d = utils.angle_axis_to_rotation(theta, z_axis)
             rot_points = utils.transform_pc(ref_points, rot2d)
             #patch = np.zeros((self.patch_dim, self.patch_dim, self.patch_dim), dtype='int32')
             for rot_pt in rot_points:
                 x = int(((rot_pt[0] + r) / (2 * r))*(self.patch_dim - 1))
                 y = int(((rot_pt[1] + r) / (2 * r))*(self.patch_dim - 1))
                 z = int(((rot_pt[2] + rz) / (2 * rz))*(self.patch_dim - 1))
                 
                 #x = int(((rot_pt[0] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                 #y = int(((rot_pt[1] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                 #z = int(((rot_pt[2] + self.l) / (2 * self.l))*(self.patch_dim - 1))
                 
                 #x = int(self.patch_dim*(rot_pt[0] / self.l) + self.patch_dim / 2)
                 #y = int(self.patch_dim*(rot_pt[1] / self.l) + self.patch_dim / 2)
                 #z = int(self.patch_dim*(rot_pt[2] / self.l) + self.patch_dim / 2)
                 if (z >= 0) and (z < self.patch_dim) and (y >= 0) and (y < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                     #patch[x, y, z] = 1
                     #X[point_number*num_rotations + aug_num, x + self.patch_dim * (y + self.patch_dim * z)] = 1
                     X[point_number*num_rotations + aug_num, x, y, z, 0] = 1
             #X[point_number*num_rotations + aug_num, :] = patch.reshape((np.power(self.patch_dim, 3),))
             Y[point_number*num_rotations + aug_num] = self.sample_class_current % num_classes
             X[point_number*num_rotations + aug_num, :, :, :, 0] = ndimage.morphology.distance_transform_edt(1 - X[point_number*num_rotations + aug_num, :, :, :, 0])
             X[point_number*num_rotations + aug_num, :, :, :, 0] /= np.max(X[point_number*num_rotations + aug_num, :, :, :, 0])
         #fig = plotutils.plot_patch_TSDF(X[point_number*num_rotations, :, :, :, 0], name='patch label ' + str(self.sample_class_current % num_classes))
         #fig = plotutils.plot_patch_3D(X[point_number*num_rotations + 0], name='patch label ' + str(self.sample_class_current % num_classes))
         #plt.show()
         #mlab.show()
         #TODO: start from start not 0 with sample_class_current                
         self.sample_class_current += 1
     return X, Y
コード例 #22
0
def interp_hessian(odometry_vector, ndt_cloud, test_pc):
    """
    Compute approximation of the Hessian of the interpolated likelihood objective (non-vectorized)
    for the odometry calculation
    :param odometry_vector: The point at which the Hessian is evaluated
    :param ndt_cloud: The NDT cloud with respect to which the odometry is required
    :param test_pc: The point cloud for which the optimization is being carried out
    :return: hessian_val: The Hessian matrix of the objective w.r.t. the odometry vector
    """
    assert (ndt_cloud.cloud_type == 'interpolate')
    N = test_pc.shape[0]
    transformed_xyz = utils.transform_pc(odometry_vector, test_pc[:, :3])
    neighbours = ndt_cloud.find_neighbours(transformed_xyz)
    weights = ndt_cloud.find_interp_weights(transformed_xyz, neighbours)[:, :N]

    vect_nearby_init = np.array(np.hsplit(neighbours, 8))
    vert_stack = np.reshape(vect_nearby_init, [N * 8, 3])
    vert_idx = ndt_cloud.pairing_cent2int(vert_stack)
    vect_nearby_idx = np.reshape(vert_idx.T, [8, N]).T
    vect_mus = np.zeros([8, N, 3, 1])
    vect_inv_sigmas = 10000 * np.ones([8, N, 3, 3])

    delq_delt = find_delqdelt_vect(odometry_vector, transformed_xyz)
    vect_delq_delt = np.transpose(
        np.repeat(delq_delt[:, :, :, None], 8, axis=3), (3, 0, 1, 2))
    vect_delq_delt_trans = np.transpose(vect_delq_delt, (0, 1, 3, 2))

    del2q_deltnm = find_del2q_deltnm_vect(odometry_vector,
                                          transformed_xyz)  # shape ()
    vect_del2q_deltnm = np.transpose(
        np.repeat(del2q_deltnm[:, :, :, :, np.newaxis], 8, axis=4),
        (4, 0, 1, 2, 3))

    for key in ndt_cloud.stats:
        indices = vect_nearby_idx == ndt_cloud.stats[key]['idx']
        mu = ndt_cloud.stats[key]['mu']
        inv_sigma = np.linalg.inv(ndt_cloud.stats[key]['sigma'])
        vect_mus[indices.T, :, 0] = mu
        vect_inv_sigmas[indices.T, :, :] = inv_sigma

    diff = np.zeros_like(vect_mus)
    diff[:, :, :,
         0] = -vect_mus[:, :, :,
                        0] + transformed_xyz[:N, :]  # shape 8, N, 3, 1
    diff_transpose = np.transpose(diff, [0, 1, 3, 2])  # shape 8, N, 1, 3
    lkds = np.exp(-0.5 * np.matmul(np.matmul(diff_transpose, vect_inv_sigmas),
                                   diff))[:, :, 0, 0]  # shape 8, N
    wgt_lkd = weights * lkds
    temp11 = np.matmul(diff_transpose, vect_inv_sigmas)
    term1 = np.zeros([8, N, 6, 6])
    for mult_idx in range(3):
        temp21 = temp11[:, :, 0, mult_idx]
        temp22 = np.repeat(np.repeat(temp21[:, :, np.newaxis, np.newaxis],
                                     6,
                                     axis=2),
                           6,
                           axis=3)
        term1 += temp22 * vect_del2q_deltnm[:, :, mult_idx, :, :]
    term2 = np.matmul(vect_delq_delt_trans,
                      np.matmul(vect_inv_sigmas, vect_delq_delt))
    term31 = np.matmul(vect_delq_delt_trans, np.matmul(vect_inv_sigmas, diff))
    term32 = np.matmul(diff_transpose,
                       np.matmul(vect_inv_sigmas, vect_delq_delt))
    vect_wgt_lkd = np.repeat(np.repeat(wgt_lkd[:, :, np.newaxis, np.newaxis],
                                       6,
                                       axis=2),
                             6,
                             axis=3)
    vect_hessian_val = vect_wgt_lkd * (term1 + term2 -
                                       np.matmul(term31, term32))
    hessian_val = np.sum(np.sum(vect_hessian_val, axis=0), axis=0)
    return hessian_val
コード例 #23
0
ファイル: PlyReader2.py プロジェクト: hassnov/Conv3D
 def read_ply(self, file_name):
     num_samples = self.num_samples // len(self.files_list)
     if self.file_index == len(self.files_list) - 1:
         num_samples = num_samples + (self.num_samples - (num_samples * len(self.files_list)))
     
     root, ext = os.path.splitext(file_name)
     if not os.path.isfile(root + ".npy"):
         ply = PlyData.read(file_name)
         vertex = ply['vertex']
         (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
         points = zip(x.ravel(), y.ravel(), z.ravel())
         np.save(root + ".npy", points)
     else:
         points = np.load(root + ".npy")
         
     if self.add_noise:
         self.data = utils.add_noise(points, prob=self.noise_prob, factor=self.noise_factor)
     else:
         self.data = np.asarray(points)
     
     #if self.data.shape[0] > 2e5:
     #        self.data, _ = Sampler.sample(self.data, -1, 2e5, sampling_algorithm=self.sampling_algorithm)
         
     pc_diameter = utils.get_pc_diameter(self.data)
     self.l = self.relL*pc_diameter
     
     rot = utils.angle_axis_to_rotation(self.rotation_angle, self.rotation_axis)
     
     
     self.data = utils.transform_pc(self.data, rot)
     #plotutils.show_pc(self.data)
     #mlab.show()
             
     #TODO: better sampling
     print "sampling file: ", file_name
     self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     self.samples = self.samples[0:num_samples]
     self.sample_indices = self.sample_indices[0:num_samples]
     
     self.tree = spatial.KDTree(self.data)
     
     #TODO:Intergrate with num_samples for consistency
     if self.filter_bad_samples:
         temp_file_samples = 'temp/' + os.path.basename(file_name) + '_' + str(num_samples) + '_filter' + str(self.filter_threshold) + '.npy'
         print 'samples file: ', temp_file_samples 
         if os.path.isfile(temp_file_samples):
             self.sample_indices = np.load(temp_file_samples)
             self.samples = self.data[self.sample_indices]
         else:
             self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples*2, sampling_algorithm=self.sampling_algorithm)
             self.samples = self.samples[0:num_samples*2]
             self.sample_indices = self.sample_indices[0:num_samples*2]
             sample_indices_temp = []
             for idx in self.sample_indices:
                 if self.is_good_sample(self.data[idx], self.filter_threshold):
                     sample_indices_temp.append(idx)
                     if len(sample_indices_temp) >= num_samples:
                         break   
             assert (len(sample_indices_temp) >= num_samples)
             self.sample_indices = np.asarray(sample_indices_temp[0:num_samples])
             self.samples = self.data[self.sample_indices]
             np.save(temp_file_samples, self.sample_indices)
             #plotutils.show_pc(self.samples)
             #mlab.show()
     
     logging.basicConfig(filename='example.log',level=logging.DEBUG)
     return self.data
コード例 #24
0
def main(args):

    scenes_dir = args[1]
    base_path = args[2]
    scenes_count = int(args[3])
    train_rotations = int(args[4])
    num_samples = int(args[5])
    ratio = float(args[6])
    rel_support_radii = float(args[7])
    patch_dim = 32
    relL = 0.05
    config_files = fill_files_list(os.path.join(scenes_dir, "configs.txt"), scenes_dir)
    #config_files = None
    fpfh_models = []
    fpfh_scenes = []
    for s in range(0, scenes_count):
        model_paths, model_trans_paths, scene_path = get_scene(base_path, scenes_dir, s, configs=config_files)
        #if "Scene3" in scene_path or "Scene4" in scene_path:
        #    continue
        print "scene path: ", scene_path
        reader_scene = PlyReader_fpfh.PlyReader()
        reader_scene.read_ply(scene_path, num_samples=num_samples, add_noise=False, noise_std=0.5, noise_prob=0, noise_factor=0,
                           rotation_axis=[0, 0, 1], rotation_angle=utils.rad(0), 
                           sampling_algorithm=SampleAlgorithm.ISS_Detector)
        pc_diameter = utils.get_pc_diameter(reader_scene.data)
        l_scene = relL*pc_diameter
        support_radii = rel_support_radii*pc_diameter
        #support_radii = 0.0114401621899
        print "supprot_radii", support_radii
        reader_scene.set_variables(l=l_scene, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50, use_point_as_mean=False)
        print "num before filtering: ", reader_scene.samples.shape[0]
        reader_scene.samples, reader_scene.sample_indices = filter_scene_samples(reader_scene.data,
                                                                                  reader_scene.samples, reader_scene.sample_indices,
                                                                                   support_radii/2, threshold=100)
        print "num after filtering: ", reader_scene.samples.shape[0]
        reader_scene_samles_all = reader_scene.samples
        for model_num in range(len(model_paths)):
            model_path = model_paths[model_num]
            model_trans_path = model_trans_paths[model_num]
            #if not ("bun" in model_path):
            #    continue 
            print "trans mat path: ", model_trans_path
            print "model_path: ", model_path
            
            #if not "Arm" in model_path:
            #    print "skipping: ", model_path
            #    continue
            
                
            trans_mat = numpy.loadtxt(model_trans_path, ndmin=2)
            reader = PlyReader_fpfh.PlyReader()
            reader.read_ply(model_path, num_samples=num_samples, add_noise=False,
                             sampling_algorithm=SampleAlgorithm.ISS_Detector)
            pc_diameter = utils.get_pc_diameter(reader.data)
            l = relL*pc_diameter
            reader.set_variables(l=l, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50, use_point_as_mean=True)
            
            ##for testing only
            #reader.set_variables(l=l_scene, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50)
            reader_scene.set_variables(l=l, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50, use_point_as_mean=False)
            #reader_scene.samples = utils.transform_pc(reader.samples, trans_mat)
            #reader_scene.sample_indices = get_indices(reader_scene.data, reader_scene.samples)
            
            
            #plotutils.show_pc(reader_scene.data, reader_scene.samples, mode='sphere', scale_factor=0.003)
            
            #plotutils.show_pc(reader.data, reader.samples, mode='sphere', scale_factor=0.003)
            #reader.next_batch_3d(1, 10, len(reader.samples))
            #plotutils.show_pc(reader_scene.data)
            #mlab.show()
            """
            #root, ext = os.path.splitext(scene_path)
            if not (model_path in fpfh_models):
                root, ext = os.path.splitext(model_path)
                fpfh.compute_fpfh_all(root, l*0.95, l)
                fpfh_models.append(model_path)
            else:
                print "model exists: ", model_path
            
            if not (scene_path in fpfh_scenes):
                root, ext = os.path.splitext(scene_path)
                fpfh.compute_fpfh_all(root, l*0.95, l)
                fpfh_scenes.append(scene_path)
            else:
                print "scene exists: ", scene_path
            continue
            """
            
            reader_scene.samples = utils.transform_pc(reader.samples, trans_mat)
            reader_scene.sample_class_current = 0
            reader_scene.index = 0
            
            
            
            num_cf, reader.samples, reader_scene.samples = utils.num_corresponding_features(reader.samples, reader_scene_samles_all, reader_scene.data, trans_mat, support_radii)
            reader.sample_indices = get_indices(reader.data, reader.samples)
            reader_scene.sample_indices = get_indices(reader_scene.data, reader_scene.samples)
            print "num_corresponding_features: ", num_cf
            
            print s
            
            
            desc1 = reader.fpfh[numpy.array(reader.sample_indices, numpy.int32)]
            desc2 = reader_scene.fpfh[numpy.array(reader_scene.sample_indices, numpy.int32)]
            
            """
            if desc1.shape[0] < desc2.shape[0]:                    
                matches = utils.match_des_test(desc1, desc2, ratio)
                print "match_des_test"
            else:
                matches = utils.match_des(desc1, desc2, ratio)
                print "match_des"
            """
            matches = utils.match_des(desc1, desc2, ratio)
            print "match_des"
            #numpy.save("scene_debug/scene_matches.npy", matches)
            print 'num_matches: ', len(matches)
            #correct, wrong = utils.correct_matches_support_radii(reader.samples, reader_scene.samples, matches,
            #                                       pose=trans_mat, N=100000, support_radii=support_radii)
            
            correct, wrong, _ = utils.correct_matches_support_radii(reader_scene.samples, reader.samples, matches,
                                                   pose=trans_mat, N=100000, support_radii=support_radii)
            #correct, wrong = utils.correct_matches(reader.samples, reader_scene.samples, matches, N=100000)
            best10 = num_samples//10
            print 'N=', best10
            print 'total sample count', reader.samples.shape[0]
            correct10 = -1
            #correct10, wrong10 = utils.correct_matches_support_radii(reader.samples, reader_scene.samples, matches,
            #                                           pose=trans_mat, N=best10, support_radii=support_radii)
            recall = (len(matches)/float(num_cf)*correct)
            scene_name = os.path.split(scene_path)[1]
            with open("results_fpfh_retrieval.txt", "a") as myfile:
                myfile.write('train rotations: ' + str(train_rotations) + '    num samples: ' + str(num_samples) + '    scene: ' + scene_name + "    correct: {0:.4f}".format(correct) + "    correct best 10: {0:.4f}".format(correct10) + "  after filtering count: " + str(reader.samples.shape[0]) + "  num matches: " + str(len(matches))  + " ratio : {0:.1f}".format(ratio) + " recall final : {0:.4f}".format(recall) + '\n')
                myfile.close()
            with open("precision_fpfh_retrieval.txt", "a") as myfile:
                myfile.write("{0:.4f}".format(correct) + '\n')
                myfile.close()
            with open("recall_fpfh_retrieval.txt", "a") as myfile:
                myfile.write("{0:.4f}".format(recall) + '\n')
                myfile.close()
            #plotutils.show_matches(reader.data, reader_noise.data, reader.samples, reader_noise.samples, matches, N=200)
        print 'done'
コード例 #25
0
ファイル: scene_test.py プロジェクト: hassnov/Conv3D
def main(args):
    models_dir = '/home/titan/hasan/tr_models/'
    BATCH_SIZE=10
    num_rotations=1
    samples_per_batch = BATCH_SIZE * num_rotations
    
    
    #base_path = "/home/hasan/Downloads"
    scene_i = 0
    scene_count = 18
    #scenes_dir = r"/home/hasan/Downloads/3D models/Stanford/Retrieval"
    scenes_dir = args[1]
    base_path = args[2]
    scenes_count = int(args[3])
    train_rotations = int(args[4])
    num_samples = int(args[5])
    ratio = float(args[6])
    rel_support_radii = float(args[7])
    patch_dim = 32
    relL = 0.05
    first = True
    for s in range(0, scenes_count):
        model_paths, model_trans_paths, scene_path = get_scene(base_path, scenes_dir, s)
        
        print "scene path: ", scene_path
        reader_scene = PlyReader.PlyReader()
        reader_scene.read_ply(scene_path, num_samples=num_samples, add_noise=False, noise_std=0.5, noise_prob=0, noise_factor=0,
                           rotation_axis=[0, 0, 1], rotation_angle=utils.rad(0), 
                           sampling_algorithm=SampleAlgorithm.ISS_Detector)
        pc_diameter = utils.get_pc_diameter(reader_scene.data)
        l_scene = relL*pc_diameter
        support_radii = rel_support_radii*pc_diameter
        #print "supprot_radii", support_radii
        reader_scene.set_variables(l=l_scene, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50)
        
        for model_num in range(len(model_paths)):
            model_path = model_paths[model_num]
            model_trans_path = model_trans_paths[model_num]
            #if not ("bun" in model_path):
            #    continue 
            print "trans mat path: ", model_trans_path
            print "model_path: ", model_path
            
            trans_mat = numpy.loadtxt(model_trans_path, ndmin=2)
            reader = PlyReader.PlyReader()
            reader.read_ply(model_path, num_samples=num_samples, add_noise=False, sampling_algorithm=SampleAlgorithm.ISS_Detector)
            pc_diameter = utils.get_pc_diameter(reader.data)
            l = relL*pc_diameter
            reader.set_variables(l=l, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50)
            reader_scene.samples = utils.transform_pc(reader.samples, trans_mat)
            reader_scene.sample_indices = -1
            
            ### just for testing
            reader_scene.set_variables(l=l, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50)
            num_cf = utils.num_corresponding_features(reader.samples, reader_scene.samples, trans_mat, support_radii)
            print "num_corresponding_features: ", num_cf
            return 0
            
            #numpy.save("scene_debug/scene_scene.npy", reader_scene.data)
            #numpy.save("scene_debug/scene_scene_samples.npy", reader_scene.samples)
            #numpy.save("scene_debug/scene_model_samples.npy", reader.samples)
            
            print "s: ", s
            #continue
            samples_count = reader.compute_total_samples(num_rotations)
            batches_per_epoch = samples_count/BATCH_SIZE
            
            with tf.Graph().as_default() as graph:
                #net_x = tf.placeholder("float", X.shape, name="in_x")
                #net_y = tf.placeholder(tf.int64, Y.shape, name="in_y")
                
                net_x = tf.placeholder("float", [samples_per_batch, patch_dim, patch_dim, patch_dim, 1], name="in_x")
                net_y = tf.placeholder(tf.int64, [samples_per_batch,], name="in_y")
                
                logits, regularizers, conv1, pool1, h_fc0, h_fc1 = convnnutils.build_graph_3d_5_5_3_3_3(net_x, 0.5, 3057, train=False)
                
                #loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, net_y))
                #loss += 5e-4 * regularizers
                
                print 'logits shape: ',logits.get_shape().as_list(), ' net_y shape: ', net_y.get_shape().as_list()
                print 'X shape: ',  net_x.get_shape().as_list()
                
                global_step = tf.Variable(0, trainable=False)
                
                correct_prediction = tf.equal(tf.argmax(logits,1), net_y)
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        
                # Create initialization "op" and run it with our session 
                init = tf.initialize_all_variables()
                gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.15)
                sess = tf.Session(config=tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options))
                sess.run(init)
                
                # Create a saver and a summary op based on the tf-collection
                saver = tf.train.Saver(tf.all_variables())
                #saver.restore(sess, os.path.join(models_dir,'32models_'+ str(train_rotations) +'_5_5_3_3_3443_ae_kmeans_copy2.ckpt'))   # Load previously trained weights
                #if first:
                saver.restore(sess, os.path.join(models_dir,'5models_360aug_5_5_3_3_3_3057_noise_nr_NoClusters_copy0.66.ckpt'))   # Load previously trained weights
                first = False
                print [v.name for v in tf.all_variables()]
                b = 0
                
                c1_shape = conv1.get_shape().as_list()
                p1_shape = pool1.get_shape().as_list()
                f0_shape = h_fc0.get_shape().as_list()
                f1_shape = h_fc1.get_shape().as_list()
                samples_count_mod_patch = reader.samples.shape[0] - reader.samples.shape[0] % BATCH_SIZE
                c1_1s = numpy.zeros((reader.samples.shape[0], c1_shape[1] * c1_shape[2] * c1_shape[3] * c1_shape[4]), dtype=numpy.float32)
                p1_1s = numpy.zeros((reader.samples.shape[0], p1_shape[1] * p1_shape[2] * p1_shape[3] * p1_shape[4]), dtype=numpy.float32)
                f0_1s = numpy.zeros((samples_count_mod_patch, f0_shape[1]), dtype=numpy.float32)
                f1_1s = numpy.zeros((reader.samples.shape[0], f1_shape[1]), dtype=numpy.float32)
                
                c1_2s = numpy.zeros((reader.samples.shape[0], c1_shape[1] * c1_shape[2] * c1_shape[3] * c1_shape[4]), dtype=numpy.float32)
                p1_2s = numpy.zeros((reader.samples.shape[0], p1_shape[1] * p1_shape[2] * p1_shape[3] * p1_shape[4]), dtype=numpy.float32)
                f0_2s = numpy.zeros((samples_count_mod_patch, f0_shape[1]), dtype=numpy.float32)
                f1_2s = numpy.zeros((reader.samples.shape[0], f1_shape[1]), dtype=numpy.float32)
                
                #for b in range(100):
                #numpy.save("scene_debug/scene_scene_samples.npy", reader_scene.samples)
                #numpy.save("scene_debug/scene_model_samples.npy", reader.samples)
                for b in range(samples_count // BATCH_SIZE):
                    start_time = time.time()
                    X, Y= reader.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations)
                    #numpy.save('scene_debug/scene_testsample_' + str(reader.sample_class_current), X)
                    patch_time = time.time() - start_time
                    X2, Y2= reader_scene.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations)
                    #numpy.save('scene_debug/scene_testcorres_' + str(reader_scene.sample_class_current), X)
                    
                    i = b*num_rotations*BATCH_SIZE
                    i1 = (b + 1)*num_rotations*BATCH_SIZE
                    start_eval = time.time()
                    c1_1, p1_1, f0_1, f1_1 = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X, net_y: Y})
                    eval_time = time.time() - start_eval
                    #c1_1s[i:i1] = numpy.reshape(c1_1, (samples_per_batch,c1_1s.shape[1]))
                    #p1_1s[i:i1] = numpy.reshape(p1_1, (samples_per_batch, p1_1s.shape[1]))
                    f0_1s[i:i1] = numpy.reshape(f0_1, (samples_per_batch, f0_1s.shape[1]))
                    #f1_1s[i:i1] = numpy.reshape(f1_1, (samples_per_batch, f1_1s.shape[1]))
                    
                    c1_2, p1_2, f0_2, f1_2 = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X2, net_y: Y2})
                    #c1_2s[i:i1] = numpy.reshape(c1_2, (samples_per_batch, c1_2s.shape[1]))
                    #p1_2s[i:i1] = numpy.reshape(p1_2, (samples_per_batch, p1_2s.shape[1]))
                    f0_2s[i:i1] = numpy.reshape(f0_2, (samples_per_batch, f0_2s.shape[1]))
                    #f1_2s[i:i1] = numpy.reshape(f1_2, (samples_per_batch, f1_2s.shape[1]))
                    duration = time.time() - start_time
                    #if b % 10 == 0:
                    #    matches = utils.match_des(f1_1s[i:i1], f1_2s[i:i1], ratio)
                    #    utils.correct_matches_support_radii(reader.samples[i:i1], reader_scene.samples[i:i1],
                    #                                         matches, pose=trans_mat, N=samples_count,
                    #                                          support_radii=support_radii)
                    
                    
                    print "point:", b, "  patch time: {0:.2f}".format(patch_time) ,"    eval time: {0:.2f}".format(eval_time), "   Duration (sec): {0:.2f}".format(duration)#, "    loss:    ", error, "    Accuracy: ", acc #, "   Duration (sec): ", duration
                print 'total'
                
                #numpy.save('scene_debug/scene_f0_1s.npy', f0_1s)
                #numpy.save('scene_debug/scene_f0_2s.npy', f0_2s)
                matches = utils.match_des(f0_1s, f0_2s, ratio)
                #numpy.save("scene_debug/scene_matches.npy", matches)
                print 'num_matches: ', len(matches)
                correct, wrong = utils.correct_matches_support_radii(reader.samples, reader_scene.samples, matches,
                                                       pose=trans_mat, N=100000, support_radii=support_radii)
                #correct, wrong = utils.correct_matches(reader.samples, reader_scene.samples, matches, N=100000)
                best10 = num_samples//10
                print 'N=', best10
                print 'total sample count', reader.samples.shape[0]
                correct10, wrong10 = utils.correct_matches_support_radii(reader.samples, reader_scene.samples, matches,
                                                           pose=trans_mat, N=best10, support_radii=support_radii)
                recall = (len(matches)/float(samples_count_mod_patch)*correct)
                scene_name = os.path.split(scene_path)[1]
                with open("results_scene.txt", "a") as myfile:
                    myfile.write('train rotations: ' + str(train_rotations) + '    num samples: ' + str(num_samples) + '    scene: ' + scene_name + "    correct: {0:.4f}".format(correct) + "    correct best 10: {0:.4f}".format(correct10) + "  after filtering count: " + str(reader.samples.shape[0]) + "  num matches: " + str(len(matches))  + " ratio : {0:.1f}".format(ratio) + " recall final : {0:.4f}".format(recall) + '\n')
                    myfile.close()
                with open("precision_scene.txt", "a") as myfile:
                    myfile.write("{0:.4f}".format(correct) + '\n')
                    myfile.close()
                with open("recall_scene.txt", "a") as myfile:
                    myfile.write("{0:.4f}".format(recall) + '\n')
                    myfile.close()
                #plotutils.show_matches(reader.data, reader_noise.data, reader.samples, reader_noise.samples, matches, N=200)
            print 'done'
コード例 #26
0
ファイル: PlyReader.py プロジェクト: hassnov/Conv3D
 def next_batch_3d(self, batch_size, num_rotations=20, num_classes=-1, r_in=-1, increment=True):
     if num_classes == -1:
         num_classes = self.num_samples
     #logging.info('index: ' + str(self.index) + '    current_label: ' + str(self.sample_class_current % num_classes) )
     curr_index = self.index
     if self.index + batch_size < self.samples.shape[0]:
         pc_samples = self.samples[self.index:self.index+batch_size]
         self.index += batch_size
     else:
         pc_samples = self.samples[self.index:self.samples.shape[0]]
         self.index = self.index + batch_size -self.samples.shape[0]
         #self.sample_class_current = self.sample_class_start
         pc_samples = np.vstack((pc_samples, self.samples[0:self.index]))
     
     X = np.zeros((batch_size*  num_rotations, self.patch_dim, self.patch_dim, self.patch_dim, 1), np.int32)
     Y = np.zeros((batch_size*  num_rotations), np.int32)
     
     r = self.l# / np.sqrt(2)
     if r_in > 0:
         r = r_in
     #if relR > 0:
     #    r = utils.get_pc_diameter(self.data)*relR
         
     curr_sample_class_current = self.sample_class_current
     #r = self.l / np.sqrt(2)
     for point_number, samplept in enumerate(pc_samples):
         i = self.tree.query_ball_point(samplept[0:3], r=r)
         distances, indices = self.tree.query(samplept[0:3], k=len(i))
         local_points = self.data[indices]
         if len(i) <= 8:
             continue
         if self.use_point_as_mean:
             mu = samplept[0:3]
         else:
             #TODO: compute real mean
             mu = np.zeros(3)
             point_count = local_points.shape[0]
             mu[0] = np.sum(local_points[:, 0]) / point_count
             mu[1] = np.sum(local_points[:, 1]) / point_count
             mu[2] = np.sum(local_points[:, 2]) / point_count
         if (self.use_normals_pc) and (samplept.shape == 6):
         #if (self.use_normals_pc):
             #nr = samplept[3:6]
             min_d = np.argmin(distances)
             #if distances[min_d] > 0:
             #    raise
             nr = self.normals[indices[min_d]] 
             #TODO: get_weighted_normal
         else:# calc normals
             #raise
             cov_mat = utils.build_cov(local_points, mean=mu)
             w, v = LA.eigh(cov_mat)
             min_id = np.argmin(w)
             #print 'eigenvalues: ', w
             nr = np.transpose(v[:, min_id])
             nr = utils.normalize(nr)
             #nr = [-x for x in nr]
         
 # at this point the plane is defined by (mu, nr)
 # we transform the mean to be (0,0,0) and the normal to be  (0,0,1)
 # to obtain canonical frame
         z_axis = np.array([0, 0, 1])
         origin = np.zeros(3)
         local_pose = utils.align_vectors(mu, nr, origin, z_axis)
         ref_points = utils.transform_pc(local_points, pose=local_pose)
         #plotutils.show_pc(ref_points, np.zeros((1, 3)), mode='sphere', scale_factor=0.001)
         #mlab.show()
         #rz = np.max ([np.max(ref_points[:, 2]), -np.min(ref_points[:, 2])])
         rz = r
         for aug_num, theta in enumerate(utils.my_range(-np.pi, np.pi, (2*np.pi)/num_rotations)):
             if aug_num == num_rotations:
                 break
             rot2d = utils.angle_axis_to_rotation(theta, z_axis)
             rot_points = utils.transform_pc(ref_points, rot2d)
             #patch = np.zeros((self.patch_dim, self.patch_dim, self.patch_dim), dtype='int32')
             #rz = r / 3
             
             xs1 = np.asarray(((rot_points[:, 0] + r) / (2 * r))*(self.patch_dim - 1), np.int16)
             ys1 = np.asarray(((rot_points[:, 1] + r) / (2 * r))*(self.patch_dim - 1), np.int16)
             zs1 = np.asarray(((rot_points[:, 2] + rz) / (2 * rz))*(self.patch_dim - 1), np.int16)
             
             above32 = np.logical_and (np.logical_and(xs1<self.patch_dim, ys1<self.patch_dim), zs1<self.patch_dim) 
             xs = xs1[above32]
             ys = ys1[above32]
             zs = zs1[above32]
             X[point_number*num_rotations + aug_num, xs, ys, zs, 0] = 1
             Y[point_number*num_rotations + aug_num] = self.sample_class_current % num_classes
             continue
             try:
                 X[point_number*num_rotations + aug_num, xs, ys, zs, 0] = 1
             except IndexError as inst:
                 #print ("index out of range")
                 print inst.args
                 for rot_pt in rot_points:               
                     x = int(((rot_pt[0] + r) / (2 * r))*(self.patch_dim - 1))
                     y = int(((rot_pt[1] + r) / (2 * r))*(self.patch_dim - 1))
                     z = int(((rot_pt[2] + rz) / (2 * rz))*(self.patch_dim - 1))
                     if (z >= 0) and (z < self.patch_dim) and (y >= 0) and (y < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                         X[point_number*num_rotations + aug_num, x, y, z, 0] = 1
             except Exception as inst:
                 print ("Unexpected exception: ", type(inst))
                 print(inst.args)
                 print(inst)
                 #print ("Exception message: ", inst)
                 raise
             
             continue          
             for rot_pt in rot_points:
                                     
                 x = int(((rot_pt[0] + r) / (2 * r))*(self.patch_dim - 1))
                 y = int(((rot_pt[1] + r) / (2 * r))*(self.patch_dim - 1))
                 z = int(((rot_pt[2] + rz) / (2 * rz))*(self.patch_dim - 1))
                 
                 if (z >= 0) and (z < self.patch_dim) and (y >= 0) and (y < self.patch_dim) and (x >= 0) and (x < self.patch_dim):
                     #patch[x, y, z] = 1
                     #X[point_number*num_rotations + aug_num, x + self.patch_dim * (y + self.patch_dim * z)] = 1
                     X[point_number*num_rotations + aug_num, x, y, z, 0] = 1
             #X[point_number*num_rotations + aug_num, :] = patch.reshape((np.power(self.patch_dim, 3),))
             Y[point_number*num_rotations + aug_num] = self.sample_class_current % num_classes
         #fig = plotutils.plot_patch_3D(X[point_number*num_rotations + 0], name='patch label ' + str(self.sample_class_current % num_classes))
         #plt.show()
         #TODO: start from start not 0 with sample_class_current                
         self.sample_class_current += 1
     if not increment:
         self.sample_class_current = curr_sample_class_current
         self.index = curr_index
     return X, Y
コード例 #27
0
def main(args):
    models_dir = '/home/hasan/hasan/tr_models/'
    BATCH_SIZE=10
    num_rotations=1
    samples_per_batch = BATCH_SIZE * num_rotations
    
    
    #base_path = "/home/hasan/Downloads"
    scene_i = 0
    #scene_count = 51
    #scenes_dir = r"/home/hasan/Downloads/3D models/Stanford/Retrieval"
    scenes_dir = args[1]
    base_path = args[2]
    scenes_count = int(args[3])
    train_rotations = int(args[4])
    num_samples = int(args[5])
    ratio = float(args[6])
    rel_support_radii = float(args[7])
    rel_support_radii = float(args[7])
    network_name = args[8]
    trained_classes = int(args[9])
    save_prefix = args[10]
    patch_dim = 32
    relL = 0.05
    #config_files = fill_files_list(os.path.join(scenes_dir, "configs.txt"), scenes_dir)
    config_files = None
    
    for s in range(0, scenes_count):
        model_paths, model_trans_paths, scene_path = get_scene(base_path, scenes_dir, s, configs=config_files)
        #if "Scene3" in scene_path or "Scene4" in scene_path:
        #    continue
        
        print "scene path: ", scene_path
        reader_scene = PlyReader.PlyReader()
        reader_scene.read_ply(scene_path, num_samples=num_samples, add_noise=False, noise_std=0.5, noise_prob=0, noise_factor=0,
                           rotation_axis=[0, 0, 1], rotation_angle=utils.rad(0), 
                           sampling_algorithm=SampleAlgorithm.ISS_Detector)
        pc_diameter = utils.get_pc_diameter(reader_scene.data)
        l_scene = relL*pc_diameter
        support_radii = rel_support_radii*pc_diameter
        #support_radii = 0.0114401621899
        print "scene diameter: ", pc_diameter
        print "supprot_radii", support_radii
        reader_scene.set_variables(l=l_scene, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50, use_point_as_mean=False)
        print "num before filtering: ", reader_scene.samples.shape[0]
        #reader_scene.samples = filter_scene_samples(reader_scene.data, reader_scene.samples, support_radii/2, threshold=500)
        #print "num after filtering: ", reader_scene.samples.shape[0]
        #reader_scene_samples_full = reader_scene.samples
        
        for model_num in range(len(model_paths)):
            model_path = model_paths[model_num]
            model_trans_path = model_trans_paths[model_num]
            #if not ("bun" in model_path):
            #    continue 
            print "trans mat path: ", model_trans_path
            print "model_path: ", model_path
            #if not "arm" in model_path:
            #    print "skipping: ", model_path
            #    continue
            trans_mat = numpy.loadtxt(model_trans_path, ndmin=2)
            reader = PlyReader.PlyReader()
            reader.read_ply(model_path, num_samples=num_samples, add_noise=False,
                             sampling_algorithm=SampleAlgorithm.ISS_Detector)
            pc_diameter = utils.get_pc_diameter(reader.data)
            print "model diameter: ", pc_diameter
            l = relL*pc_diameter
            reader.set_variables(l=l, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50, use_point_as_mean=False)
            
            #reader_scene.samples = utils.transform_pc(reader.samples, trans_mat)
            
            reader_scene.sample_indices = -1
            reader_scene.index = 0
            reader_scene.sample_class_current = 0
            ### just for testing
            print "l: {0}, l_scene: {1}".format(l, l_scene)
            reader_scene.set_variables(l=l, patch_dim=patch_dim, filter_bad_samples=False, filter_threshold=50, use_point_as_mean=False)
            
            reader_scene.samples = utils.transform_pc(reader.samples, trans_mat)
            num_cf, _, _ = utils.num_corresponding_features(reader.samples, reader_scene.samples, reader_scene.data, trans_mat, support_radii)
            #reader.samples = model_corres
            #reader_scene.samples = scene_corres
            print "num_corresponding_features: ", num_cf
            if (num_cf < 10):
                print "skippint num_cf: ", num_cf
                continue
            
            #numpy.save("scene_debug/scene_scene.npy", reader_scene.data)
            #numpy.save("scene_debug/scene_scene_samples.npy", reader_scene.samples)
            #numpy.save("scene_debug/scene_model_samples.npy", reader.samples)
            
            print "s: ", s
            #continue
            samples_count = reader.compute_total_samples(num_rotations)
            batches_per_epoch = samples_count/BATCH_SIZE
            
            with tf.Graph().as_default() as graph:
                #net_x = tf.placeholder("float", X.shape, name="in_x")
                #net_y = tf.placeholder(tf.int64, Y.shape, name="in_y")
                
                net_x = tf.placeholder("float", [samples_per_batch, patch_dim, patch_dim, patch_dim, 1], name="in_x")
                net_y = tf.placeholder(tf.int64, [samples_per_batch,], name="in_y")
                
                logits, regularizers, conv1, pool1, h_fc0, h_fc1 = convnnutils.build_graph_3d_5_5_3_3_3(net_x, 0.5, trained_classes, train=False)
                #logits, regularizers, conv1, pool1, h_fc0, h_fc1 = convnnutils.build_graph_3d_5_5_3_3_3(net_x, 0.5, 3057, train=False)
                #logits, regularizers, conv1, pool1, h_fc0, h_fc1 = convnnutils.build_graph_3d_5_5_3_3_3(net_x, 0.5, 5460, train=False)
                #logits, regularizers, conv1, pool1, h_fc0, h_fc1 = convnnutils.build_graph_3d_5_5_3_3_3_4000(net_x, 0.5, 5460, train=False)
                #logits, regularizers, conv1, pool1, h_fc0, h_fc1 = convnnutils.build_graph_3d_5_5_3_3_3_4000(net_x, 0.5, 113, train=False)
                #logits, regularizers, conv1, pool1, h_fc0, h_fc1 = convnnutils.build_graph_3d_5_5_3_3_small(net_x, 0.5, 537, train=False)
                
                #loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, net_y))
                #loss += 5e-4 * regularizers
                
                print 'logits shape: ',logits.get_shape().as_list(), ' net_y shape: ', net_y.get_shape().as_list()
                print 'X shape: ',  net_x.get_shape().as_list()
                
                global_step = tf.Variable(0, trainable=False)
                
                correct_prediction = tf.equal(tf.argmax(logits,1), net_y)
                accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
        
                # Create initialization "op" and run it with our session 
                init = tf.initialize_all_variables()
                gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.12)
                sess = tf.Session(config=tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options))
                sess.run(init)
                
                # Create a saver and a summary op based on the tf-collection
                saver = tf.train.Saver(tf.all_variables())
                #saver.restore(sess, os.path.join(models_dir,'32models_'+ str(train_rotations) +'_5_5_3_3_3443_ae_kmeans_copy2.ckpt'))   # Load previously trained weights
                #if first:
                #saver.restore(sess, os.path.join(models_dir,'9models_270aug_5_5_3_3_3_7267_noise_NoClusters_copy.ckpt'))
                
                
                saver.restore(sess, os.path.join(models_dir,network_name))
                #saver.restore(sess, os.path.join(models_dir,'5models_720aug_5_5_3_3_3_3057_90rots_noise_relR_nr_100clusters_fpfh.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'1models_bunny_720aug_5_5_3_3_3_3057_90rots_noise_relR_nr_100clusters_fpfh_wd2.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'5models_720aug_5_5_3_3_3_3057_90rots_noise_relR_nr_200clusters_fpfh.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'6models_720aug_5_5_3_3_3_2070_90rots_noise_relR_nr_NoClusters.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'5models_720aug_5_5_3_3_3_3057_noise_nr_relR_NoClusters_copy.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'9models_540aug_5_5_3_3_3_5460_45rots_noise_relR_nr_NoClusters.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'1models_540aug_5_5_3_3_537_arm_45rots_noise__relR_nr_NoClusters.ckpt'))
                
                #saver.restore(sess, os.path.join(models_dir,'9models_540aug_5_5_3_3_3_5460_45rots_noise_relR_nr_800Clusters_copy.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'9models_540aug_5_5_3_3_3_5460_90rots_relR_nr_NoClusters_copy.ckpt'))
                
                #saver.restore(sess, os.path.join(models_dir,'5models_360aug_5_5_3_3_3_3057_noise_nr_NoClusters_copy0.66.ckpt'))
                #saver.restore(sess, os.path.join(models_dir,'45models_360aug_5_5_3_3_3_10000_nr_noise_NoClusters.ckpt'))  
                #saver.restore(sess, os.path.join(models_dir,'5models_360aug_5_5_3_3057_noise_nr_NoClusters_copy.ckpt'))
                
                first = False
                print [v.name for v in tf.all_variables()]
                b = 0
                
                c1_shape = conv1.get_shape().as_list()
                p1_shape = pool1.get_shape().as_list()
                f0_shape = h_fc0.get_shape().as_list()
                f1_shape = h_fc1.get_shape().as_list()
                samples_count_mod_patch = reader.samples.shape[0] - reader.samples.shape[0] % BATCH_SIZE
                c1_1s = numpy.zeros((reader.samples.shape[0], c1_shape[1] * c1_shape[2] * c1_shape[3] * c1_shape[4]), dtype=numpy.float32)
                p1_1s = numpy.zeros((reader.samples.shape[0], p1_shape[1] * p1_shape[2] * p1_shape[3] * p1_shape[4]), dtype=numpy.float32)
                f0_1s = numpy.zeros((samples_count_mod_patch, f0_shape[1]*3), dtype=numpy.float32)
                f1_1s = numpy.zeros((samples_count_mod_patch, f1_shape[1]), dtype=numpy.float32)
                
                samples_count_scene_mod_patch = reader_scene.samples.shape[0] - reader_scene.samples.shape[0] % BATCH_SIZE
                c1_2s = numpy.zeros((reader.samples.shape[0], c1_shape[1] * c1_shape[2] * c1_shape[3] * c1_shape[4]), dtype=numpy.float32)
                p1_2s = numpy.zeros((reader.samples.shape[0], p1_shape[1] * p1_shape[2] * p1_shape[3] * p1_shape[4]), dtype=numpy.float32)
                f0_2s = numpy.zeros((samples_count_scene_mod_patch, f0_shape[1]*3), dtype=numpy.float32)
                f1_2s = numpy.zeros((samples_count_scene_mod_patch, f1_shape[1]), dtype=numpy.float32)
                
                for b in range(reader_scene.samples.shape[0] // BATCH_SIZE):
                    i = b*num_rotations*BATCH_SIZE
                    i1 = (b + 1)*num_rotations*BATCH_SIZE 
                    X2, Y2= reader_scene.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=False)
                    X207, Y207 = reader_scene.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=False, r_in=reader_scene.l*(0.04/relL))
                    X203, Y203 = reader_scene.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=True, r_in=reader_scene.l*(0.03/relL))
                    #X202, Y202 = reader_scene.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=True, r_in=reader_scene.l*(0.02/relL))
                    """
                    numpy.save('scene_debug/sample_scene_X_' + str(b), X2)
                    numpy.save('scene_debug/sample_scene_X02_' + str(b), X202)
                    numpy.save('scene_debug/sample_scene_X03_' + str(b), X203)
                    numpy.save('scene_debug/sample_scene_X05_' + str(b), X207)
                    """
                    _, _, f0_2, f1_2 = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X2, net_y: Y2})
                    _, _, f0_207, _ = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X207, net_y: Y207})
                    _, _, f0_203, _ = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X203, net_y: Y203})
                    #_, _, f0_202, _ = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X202, net_y: Y202})
                    #assert (numpy.all(f0_2 == f0_207))
                    #print b, ", ",
                    #f0_2 = numpy.hstack((f0_2, f0_207, f0_203, f0_202))                    
                    f0_2 = numpy.hstack((f0_2, f0_207, f0_203))
                    f0_2s[i:i1] = numpy.reshape(f0_2, (samples_per_batch, f0_2s.shape[1]))
                    f1_2s[i:i1] = numpy.reshape(f1_2, (samples_per_batch, f1_2s.shape[1]))
                    #print b

                for b in range(reader.samples.shape[0] // BATCH_SIZE):
                    start_time = time.time()
                    X, Y= reader.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=False)
                    X07, Y07 = reader.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=False, r_in=reader.l*(0.04/relL))
                    X03, Y03 = reader.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=True, r_in=reader.l*(0.03/relL))
                    #X02, Y02 = reader.next_batch_3d(BATCH_SIZE, num_rotations=num_rotations, increment=True, r_in=reader.l*(0.02/relL))
                    patch_time = time.time() - start_time
                    """                 
                    numpy.save('scene_debug/sample_model_X_' + str(b), X)
                    numpy.save('scene_debug/sample_model_X02_' + str(b), X02)
                    numpy.save('scene_debug/sample_model_X03_' + str(b), X03)
                    numpy.save('scene_debug/sample_model_X05_' + str(b), X07)
                    """
                    i = b*num_rotations*BATCH_SIZE
                    i1 = (b + 1)*num_rotations*BATCH_SIZE
                    start_eval = time.time()
                    _, _, f0_1, f1_1 = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X, net_y: Y})
                    _, _, f0_107, _ = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X07, net_y: Y07})
                    _, _, f0_103, _ = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X03, net_y: Y03})
                    #_, _, f0_102, _ = sess.run([conv1, pool1, h_fc0, h_fc1], feed_dict={net_x:X02, net_y: Y02})
                    eval_time = time.time() - start_eval 
                    #assert (numpy.all(f0_1 == f0_107))                  
                    #f0_1 = numpy.hstack((f0_1, f0_107, f0_103, f0_102))
                    f0_1 = numpy.hstack((f0_1, f0_107, f0_103))
                    f0_1s[i:i1] = numpy.reshape(f0_1, (samples_per_batch, f0_1s.shape[1]))
                    f1_1s[i:i1] = numpy.reshape(f1_1, (samples_per_batch, f1_1s.shape[1]))                   
                    duration = time.time() - start_time
                    print "point:", b, "  patch time: {0:.2f}".format(patch_time) ,"    eval time: {0:.2f}".format(eval_time), "   Duration (sec): {0:.2f}".format(duration)#, "    loss:    ", error, "    Accuracy: ", acc #, "   Duration (sec): ", duration
                    
                    
                print 'total'
                
                #numpy.save('scene_debug/scene_f0_1s.npy', f0_1s)
                #numpy.save('scene_debug/scene_f0_2s.npy', f0_2s)
                desc1 = f0_1s
                desc2 = f0_2s
                
                
                ratios = [1, 0.9, 0.8, 0.7, 0.6, 0.5]
                matches_arr = [None]*len(ratios)
                for ratio_i, ratio1 in enumerate(ratios):
                    if desc1.shape[0] < desc2.shape[0]:                    
                        matches_arr[ratio_i] = utils.match_des_test(desc1, desc2, ratio1)
                        print "match_des_test"
                    else:
                        matches_arr[ratio_i] = utils.match_des(desc1, desc2, ratio1)
                        print "match_des"
                    
                #matches = utils.match_des(desc1, desc2, ratio)
                #print "match_des"
                #numpy.save('scene_debug/matches', matches)
                #numpy.save("scene_debug/scene_matches.npy", matches)
                #print 'num_matches: ', len(matches)
                #correct, wrong = utils.correct_matches_support_radii(reader.samples, reader_scene.samples, matches,
                #                                       pose=trans_mat, N=100000, support_radii=support_radii)
                correct_arr = [None]*len(ratios)
                recall_arr = [None]*len(ratios)
                match_res_arr = [None]*len(ratios)
                for matches_i, matches in enumerate(matches_arr):
                    correct_arr[matches_i], _, match_res_arr[matches_i] = utils.correct_matches_support_radii(reader_scene.samples, reader.samples, matches,
                                                           pose=trans_mat, N=100000, support_radii=support_radii)
                #numpy.save('scene_debug/matche_res', match_res)
                #correct, wrong = utils.correct_matches(reader.samples, reader_scene.samples, matches, N=100000)
                best10 = num_samples//10
                print 'N=', best10
                print 'total sample count', reader.samples.shape[0]
                correct10 = -1
                #correct10, wrong10 = utils.correct_matches_support_radii(reader.samples, reader_scene.samples, matches,
                #                                           pose=trans_mat, N=best10, support_radii=support_radii)
                for ratio_i, _ in enumerate(ratios):
                    recall_arr[ratio_i] = (len(matches_arr[ratio_i])/float(num_cf))*correct_arr[ratio_i]
                scene_name = os.path.split(scene_path)[1]
                for ratio_i, ratio1 in enumerate(ratios):
                    with open("results_ret_5models_{1}_clustering_same{0}.txt".format(ratio1, save_prefix), "a") as myfile:
                        myfile.write('train rotations: ' + str(train_rotations) + '    num samples: ' + str(num_samples) + '    scene: ' + scene_name + "    correct: {0:.4f}".format(correct_arr[ratio_i]) + "    correct best 10: {0:.4f}".format(correct10) + "  after filtering count: " + str(reader.samples.shape[0]) + "  num matches: " + str(len(matches_arr[ratio_i]))  + " ratio : {0:.1f}".format(ratio1) + " recall final : {0:.4f}".format(recall_arr[ratio_i]) + '\n')
                        myfile.close()
                    with open("precision_ret_5models_{1}_clustering_same{0}.txt".format(ratio1, save_prefix), "a") as myfile:
                        myfile.write("{0:.4f}".format(correct_arr[ratio_i]) + '\n')
                        myfile.close()
                    with open("recall_ret_5models_{1}_clustering_same{0}.txt".format(ratio1, save_prefix), "a") as myfile:
                        myfile.write("{0:.4f}".format(recall_arr[ratio_i]) + '\n')
                        myfile.close()
                #plotutils.show_matches(reader.data, reader_noise.data, reader.samples, reader_noise.samples, matches, N=200)
            print 'done'
コード例 #28
0
ファイル: plyTest.py プロジェクト: hassnov/Conv3D
def extract_patches_vox(pc, sample_points, l, num_rotations=20, patch_dim=24, use_normals_pc=True, use_point_as_mean=False, flip_view_point=False, sigma=0.7071):
    r = l / np.sqrt(2)
    print 'r= ', r
    print 'l=', l,'    r=', r
    tree = spatial.KDTree(pc)
    patches = []
    #show_pc(sample_points)
    #mlab.show()
    #show_pc(pc)
    #mlab.show()
    for point_number, samplept in enumerate(sample_points):
        patches = []
        i = tree.query_ball_point(samplept[0:3], r=r)
        distances, indices = tree.query(samplept[0:3], k=len(i))
        local_points = pc[indices]
        if len(i) <= 8:
            continue
        if use_point_as_mean:
            mu = samplept[0:3]
        else:
            #TODO: compute real mean
            mu = samplept[0:3]
        if (use_normals_pc) and (samplept.shape == 6):
            nr = samplept[3:6]
            #TODO: get_weighted_normal
        else:# calc normals
            cov_mat = utils.build_cov(local_points, mean=mu)
            w, v = LA.eigh(cov_mat)
            min_id = np.argmin(w)
            nr = np.transpose(v[:, min_id])
            nr = utils.normalize(nr)
            #nr = [-x for x in nr]
        
# at this point the plane is defined by (mu, nr)
# we transform the mean to be (0,0,0) and the normal to be  (0,0,1)
# to obtain canonical frame
        z_axis = np.array([0, 0, 1])
        origin = np.zeros(3)
        local_pose = utils.align_vectors(mu, nr, origin, z_axis)
        ref_points = utils.transform_pc(local_points, pose=local_pose)
        #np.save('patches/ref_' + str(point_number), ref_points)
        #print 'ref points: ', ref_points.shape
        
        print 'point number: ', point_number
        show_pc(pc, local_points, mode='sphere')
        draw_normal(mu, nr, tube_radius=0.0005)
        mlab.show()
        
        show_pc(ref_points, np.array([0, 0, 0]).reshape((1, 3)))
        draw_normal(origin, z_axis)
        mlab.show()
        for theta in utils.my_range(-np.pi, np.pi, (2*np.pi)/num_rotations):
            rot2d = utils.angle_axis_to_rotation(np.cos(theta), z_axis)
            rot_points = utils.transform_pc(ref_points, rot2d)
            print 'rot_max: ', rot_points[:, 0].max()
            patch = np.zeros((patch_dim, patch_dim, patch_dim), dtype='int32')
            #xs = []
            #ys = []
            #zs = []
            for rot_pt in rot_points:
                #x = int(patch_dim*(rot_pt[0] / (2*l)) + patch_dim / 2)
                #y = int(patch_dim*(rot_pt[1] / (2*l)) + patch_dim / 2)
                #z = int(patch_dim*(rot_pt[2] / (2*l)) + patch_dim / 2)
                x = int(((rot_pt[0] + l) / (2 * l))*(patch_dim - 1))
                y = int(((rot_pt[1] + l) / (2 * l))*(patch_dim - 1))
                z = int(((rot_pt[2] + l) / (2 * l))*(patch_dim - 1))
                if (z >= 0) and (z < patch_dim) and (y >= 0) and (y < patch_dim) and (x >= 0) and (x < patch_dim):
                    patch[x, y, z] = 1
                    #if (x not in xs) or (y not in ys) or (z not in zs):
                     #   xs.append(int(x))
                     #   ys.append(int(y))
                      #  zs.append(int(z))
                else:
                    print 'rot_pt[0] / (l*2) = ', rot_pt[0] / (r*2), '    rot_pt[1] / (l*2) = ',  rot_pt[1] / (r*2)
            plot_patch(patch)
            plt.show()
            patches.append(patch)
                                 
#header = 'patch: ' + str(point_number) + ', rotation: ' + str(theta)
#np.savetxt('E:/Fac/BMC Master/Thesis/Models/bunny/reconstruction/plytest/patchesPython/patch_' +
#          str(point_number), patch, delimiter=',', header=header)
        #fig = plt.figure()
        #ax = fig.add_subplot(111, projection='3d')
        #ax.scatter3D(xs, ys, zs)
        #plt.title('Patch')
        #ax.set_xlabel('X')
        #ax.set_ylabel('Y')
        #ax.set_zlabel('Z')
        #plt.show()
            #show_pc(patch)
        #print 'patches ', len(patches)
        #np.save('patches/patches_' + str(point_number), np.array (patches))
    return patches
コード例 #29
0
def main(args):
    """
    For a method of NDT approximation, this function samples random initial displacements 
    between given ranges and solves the Consensus and Naive NDT odometry.
    The time taken, rotation and displacement error of the Consensus and Naive NDT odometry 
    methdos is compared.
    """

    print('Setting model parameters')

    run_no = 1
    plot_fig = args.plot_figs

    run_mode = args.run_mode
    total_iters = args.total_iters
    iter1 = args.iter1
    iter2 = args.iter2
    num_pcs = args.num_pcs
    num_odom_vects = args.num_odom_vects
    test_mode = args.test_mode

    max_x = 0.4
    max_y = 0.4
    max_z = 0.1
    max_phi = 10
    max_theta = 10
    max_psi = 30

    odom_limits = np.array([max_x, max_y, max_z, max_phi, max_theta, max_psi])

    # Choose the voxel lengths at which NDT approximation will be calculated. If a single value is used, only 1 NDT approximation will be performed
    scale_array = np.array([2., 1.]) # np.array([2., 1., 0.5]) # np.array([1.])

    assert(total_iters == iter1 + iter2)

    print('Loading dataset')
    pcs = data_utils.load_uiuc_pcs(0, num_pcs-1, mode=run_mode)

    # Choose the different values of the voxel consensus metric which'll be used to remove low consensus voxels
    integrity_filters = np.array([0.3, 0.4, 0.5, 0.6, 0.7, 0.8]) # np.array([0.5, 0.8])

    num_int_vals = np.size(integrity_filters)

    print('Creating placeholder variables for storing errors')

    odom_vectors = np.zeros([num_int_vals, num_pcs, num_odom_vects, 6])

    vanilla_time = np.zeros([num_int_vals, num_pcs, num_odom_vects])
    vanilla_pos_error = np.zeros_like(vanilla_time)
    vanilla_rot_error = np.zeros_like(vanilla_time)

    consensus_time = np.zeros_like(vanilla_time)
    consensus_pos_error = np.zeros_like(vanilla_pos_error)
    consensus_rot_error = np.zeros_like(vanilla_rot_error)

    for pc_idx, ref_pc in enumerate(pcs):
        for odom_idx in range(num_odom_vects):
            
            rand_num = 2*(np.random.rand(6) - 0.5) # Choose a random odometry vector to test convergence of algorithm
            test_odom = odom_limits*rand_num
            inv_test_odom = utils.invert_odom_transfer(test_odom)

            print('Creating transformed test point cloud')
            trans_pc = utils.transform_pc(test_odom, ref_pc)

            print('\nRunning vanilla multi-scale NDT for PC:', pc_idx, 'odometry: ', odom_idx, '\n')

            vanilla_odom, test_van_time, _ = ndt.multi_scale_ndt_odom(np.copy(ref_pc), np.copy(trans_pc), scale_array, 0.5,
                                                                   test_mode, total_iters, 0)

            for cv_idx, cv in enumerate(integrity_filters):
                print('\nExperiment for C_v:', cv, ' pc number:', pc_idx, 'odometry:', odom_idx, '\n')
                print('Running consensus multi-scale NDT')
                consensus_odom, test_con_time, _ = ndt.multi_scale_ndt_odom(np.copy(ref_pc), np.copy(trans_pc),
                                                                         scale_array, cv, test_mode, iter1, iter2)

                print('Computing and storing error and timing values')

                consensus_odom_diff = consensus_odom - inv_test_odom
                consensus_time[cv_idx, pc_idx, odom_idx] = test_con_time
                consensus_pos_error[cv_idx, pc_idx, odom_idx] = np.linalg.norm(consensus_odom_diff[:3])
                consensus_rot_error[cv_idx, pc_idx, odom_idx] = np.linalg.norm(consensus_odom_diff[3:])

            vanilla_odom_diff = vanilla_odom - inv_test_odom

            odom_vectors[:, pc_idx, odom_idx, :] = inv_test_odom

            vanilla_time[:, pc_idx, odom_idx] = test_van_time
            vanilla_pos_error[:, pc_idx, odom_idx] = np.linalg.norm(vanilla_odom_diff[:3])
            vanilla_rot_error[:, pc_idx, odom_idx] = np.linalg.norm(vanilla_odom_diff[3:])

        if pc_idx % 10 == 0:
            print('Saving computed values')
            np.save('consensus_values_' + test_mode + '_' + str(run_no), integrity_filters)
            np.save('odometry_vectors' + test_mode + '_' + str(run_no), odom_vectors)
            np.save("vanilla_time_" + test_mode + '_' + str(run_no), vanilla_time)
            np.save("vanilla_pos_error_" + test_mode + '_' + str(run_no), vanilla_pos_error)
            np.save("vanilla_rot_error_" + test_mode + '_' + str(run_no), vanilla_rot_error)

            np.save("consensus_time_" + test_mode + '_' + str(run_no), consensus_time)
            np.save("consensus_pos_error_" + test_mode + '_' + str(run_no), consensus_pos_error)
            np.save("consensus_rot_error_" + test_mode + '_' + str(run_no), consensus_rot_error)



    if plot_fig:
        plt.close('all')
        plot_vanilla_time = utils.plot_averaged(vanilla_time)
        plot_vanilla_pos_error = utils.plot_averaged(vanilla_pos_error)
        plot_vanilla_rot_error = utils.plot_averaged(vanilla_rot_error)

        plot_consensus_time = utils.plot_averaged(consensus_time)
        plot_consensus_pos_error = utils.plot_averaged(consensus_pos_error)
        plot_consensus_rot_error = utils.plot_averaged(consensus_rot_error)

        plt.figure()
        plt.plot(integrity_filters, plot_vanilla_time, label='Vanilla Timing')
        plt.plot(integrity_filters, plot_consensus_time, label='Consensus Timing')
        plt.title("Timing comparison")
        plt.legend(loc="upper right")

        plt.figure()
        plt.plot(integrity_filters, plot_vanilla_pos_error, label='Vanilla Position Error')
        plt.plot(integrity_filters, plot_consensus_pos_error, label='Consensus Position Error')
        plt.title("Position Error comparison")
        plt.legend(loc="upper right")

        plt.figure()
        plt.plot(integrity_filters, plot_vanilla_rot_error, label='Vanilla Rotation Error')
        plt.plot(integrity_filters, plot_consensus_rot_error, label='Consensus Rotation Error')
        plt.title('Rotation Error comparison')
        plt.legend(loc="upper right")

        plt.show()

    print('Saving computed values')
    np.save('consensus_values_' + test_mode + '_' + str(run_no), integrity_filters)
    np.save('odometry_vectors' + test_mode + '_' + str(run_no), odom_vectors)
    np.save("vanilla_time_" + test_mode + '_' + str(run_no), vanilla_time)
    np.save("vanilla_pos_error_" + test_mode + '_' + str(run_no), vanilla_pos_error)
    np.save("vanilla_rot_error_" + test_mode + '_' + str(run_no), vanilla_rot_error)

    np.save("consensus_time_" + test_mode + '_' + str(run_no), consensus_time)
    np.save("consensus_pos_error_" + test_mode + '_' + str(run_no), consensus_pos_error)
    np.save("consensus_rot_error_" + test_mode + '_' + str(run_no), consensus_rot_error)

    return 0