Exemple #1
0
def mandelbulb_evolution(K=200,
                         p=8.0,
                         N=10,
                         bounds=((-1.5, 1.5), (-1.5, 1.5), (-1.5, 1.5)),
                         optimize=False):
    M = T.ftensor4()
    C = T.ftensor4()
    n = T.fscalar()

    def step(X):
        r = T.sqrt(
            T.square(X[:, :, :, 0]) + T.square(X[:, :, :, 1]) +
            T.square(X[:, :, :, 2]))
        phi = T.arctan2(X[:, :, :, 1], X[:, :, :, 0])
        theta = T.arctan2(
            T.sqrt(T.square(X[:, :, :, 0]) + T.square(X[:, :, :, 1])),
            X[:, :, :, 2])

        X_ = T.stack((T.pow(r, n) * T.sin(n * theta) * T.cos(n * phi),
                      T.pow(r, n) * T.sin(n * theta) * T.sin(n * theta),
                      T.pow(r, n) * T.cos(n * theta)),
                     axis=-1)

        return X_ + C

    f_ = theano.function([M, C, n], step(M), allow_input_downcast=True)

    import open3d
    from scipy.misc import imresize
    from skimage import measure

    def f(X, c, q):
        vert_list = []

        for j in range(N):
            print 'step {%d}' % j
            X = f_(X, c, q)

            R = np.sqrt(np.square(X[:,:,:,0]) + \
                        np.square(X[:,:,:,1]) + \
                        np.square(X[:,:,:,2]))

            B = 1.0 * (R < 2.0)

            verts, faces = measure.marching_cubes_classic(B, 0)

            for k in range(3):
                verts[:, k] = (bounds[k][1] - bounds[k][0]) * (
                    1.0 * verts[:, k]) / K + bounds[k][0]

            faces = measure.correct_mesh_orientation(B, verts, faces)

            vert_list.append(verts)

        return vert_list

    x, y, z = np.meshgrid(
        np.linspace(bounds[0][0], bounds[0][1], K, dtype=np.float32),
        np.linspace(bounds[1][0], bounds[1][1], K, dtype=np.float32),
        np.linspace(bounds[2][0], bounds[2][1], K, dtype=np.float32))

    X = np.stack((x, y, z), axis=-1)

    v = f(np.zeros((K, K, K, 3)), X, p)

    images = []

    def render_pointcloud(pcd):
        vis = open3d.Visualizer()
        vis.create_window()
        vis.add_geometry(pcd)
        ctr = vis.get_view_control()
        ctr.rotate(202.0, 202.0)
        vis.update_geometry()
        vis.poll_events()
        vis.update_renderer()
        image = np.asarray(vis.capture_screen_float_buffer(False))
        return image

    for k in range(len(v) - 1) + range(len(v) - 1)[::-1]:
        pcd = open3d.PointCloud()
        pcd.points = open3d.Vector3dVector(np.array(v[k + 1]))
        open3d.estimate_normals(pcd,
                                search_param=open3d.KDTreeSearchParamHybrid(
                                    radius=0.1, max_nn=30))

        image = render_pointcloud(pcd)
        image = np.array(255 * render_pointcloud(pcd), dtype=np.uint8)

        images.append(image)

    gif_name = 'MandelbulbEvolution.gif'
    output_file = os.path.join(__file__.split('.')[0], gif_name)
    imageio.mimsave(output_file, images, duration=0.2)
            if cam2_depth_array[m2, n2] > 0:
                x2, y2, z2 = rgbdTools.getPosition(cam2, cam2_depth_array, m2,
                                                   n2)
            else:
                continue
            cam1_point.append([x1, y1, z1])
            cam2_point.append([x2, y2, z2])

    Transformation = registration.rigid_transform_3D(np.asarray(cam1_point),
                                                     np.asarray(cam2_point))
    print(Transformation)

    geometrie_added = False
    vis = o3d.Visualizer()
    vis.create_window("Pointcloud")
    pointcloud = o3d.PointCloud()

    try:
        time_beigin = time.time()
        while True:
            time_start = time.time()
            pointcloud.clear()

            frames1 = pipeline1.wait_for_frames()
            frames2 = pipeline2.wait_for_frames()

            aligned_frames1 = align.process(frames1)
            aligned_frames2 = align.process(frames2)

            color_frame1 = aligned_frames1.get_color_frame()
            depth_frame1 = aligned_frames1.get_depth_frame()
    snap_steps = [int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path) if f[-5:] == '.meta']

    # Find which snapshot to restore
    chosen_step = np.sort(snap_steps)[-1]
    chosen_snap = os.path.join(path, 'snapshots', 'snap-{:d}'.format(chosen_step))
    tester = RegTester(model, restore_snap=chosen_snap)

    # calculate descriptors
    tester.generate_descriptor(model, dataset)

    # Load the descriptors and estimate the transformation parameters using RANSAC
    src_pcd = open3d.read_point_cloud("demo_data/cloud_bin_0.ply")
    src_data = np.load("demo_data/cloud_bin_0.npz")
    src_features = open3d.registration.Feature()
    src_features.data = src_data["features"].T
    src_keypts = open3d.PointCloud()
    src_keypts.points = open3d.Vector3dVector(src_data["keypts"])
    src_scores = src_data["scores"]

    tgt_pcd = open3d.read_point_cloud("demo_data/cloud_bin_1.ply")
    tgt_data = np.load("demo_data/cloud_bin_1.npz")
    tgt_features = open3d.registration.Feature()
    tgt_features.data = tgt_data["features"].T
    tgt_keypts = open3d.PointCloud()
    tgt_keypts.points = open3d.Vector3dVector(tgt_data["keypts"])
    tgt_scores = tgt_data["scores"]

    result_ransac = execute_global_registration(src_keypts, tgt_keypts, src_features, tgt_features, 0.05)

    # First plot the original state of the point clouds
    draw_registration_result(src_pcd, tgt_pcd, np.identity(4))
Exemple #4
0
 def __init__(self, xyz):
     self.pcd = open3d.PointCloud()
     self.pcd.points = open3d.Vector3dVector(xyz)
     self.pcd_tree = open3d.KDTreeFlann(self.pcd)
Exemple #5
0
def convert(input_dir,
            obj_file_name_1,
            mtl_file_name_1,
            obj_file_name_2,
            mtl_file_name_2,
            ply_sf_file_name,
            ply_nm_file_name,
            n,
            nosf=False,
            write_ascii=True,
            u=None,
            v=None,
            random_indices=None):
    materials_1 = parse_mtl(mtl_file_name_1)
    vertices_1, faces_1, textures_1, parts_1 = parse_obj(
        input_dir, obj_file_name_1, materials_1)

    materials_2 = parse_mtl(mtl_file_name_2)
    vertices_2, faces_2, textures_2, parts_2 = parse_obj(
        input_dir, obj_file_name_2, materials_2)

    if u is None or v is None or random_indices is None:
        points_1, colors_1, normals_1, random_indices, u, v = sample(
            vertices_1, faces_1, textures_1, parts_1, n, input_dir,
            materials_1)
    else:
        points_1, colors_1, normals_1, random_indices, u, v = sample(
            vertices_1,
            faces_1,
            textures_1,
            parts_1,
            n,
            input_dir,
            materials_1,
            u=u,
            v=v,
            random_indices=random_indices)
    points_2, colors_2, normals_2, random_indices, u, v = sample(
        vertices_2,
        faces_2,
        textures_2,
        parts_2,
        n,
        input_dir,
        materials_2,
        random_indices=random_indices,
        u=u,
        v=v)

    flow_xyz = points_2 - points_1

    # == SCENE FLOW ==
    pc = open3d.PointCloud()
    pc.points = open3d.Vector3dVector(points_1)
    pc.colors = open3d.Vector3dVector(colors_1)
    pc.normals = open3d.Vector3dVector(flow_xyz)
    open3d.write_point_cloud(ply_sf_file_name, pc, write_ascii=write_ascii)

    # == NORMALS ==
    pc = open3d.PointCloud()
    pc.points = open3d.Vector3dVector(points_1)
    pc.colors = open3d.Vector3dVector(colors_1)
    pc.normals = open3d.Vector3dVector(normals_1)
    open3d.write_point_cloud(ply_nm_file_name, pc, write_ascii=write_ascii)

    return random_indices, u, v
Exemple #6
0
# Basic inference and visualization loop
MAX_SAMPLES = 15
for samples in range(MAX_SAMPLES):
    random_index = randrange(len(test_dataset_seg))
    print('[Sample {} / {}]'.format(random_index, len(test_dataset_seg)))

    # clean visualization
    visualizer.clear()
    clear_output()

    # get next sample
    point_set, seg = test_dataset_seg.__getitem__(random_index)

    # create cloud for visualization
    cloud = o3.PointCloud()
    cloud.points = o3.Vector3dVector(point_set)
    cloud.colors = o3.Vector3dVector(read_pointnet_colors(seg.numpy()))

    # perform inference in GPU
    points = Variable(point_set.unsqueeze(0))
    points = points.transpose(2, 1)
    if torch.cuda.is_available():
        points = points.cuda()
    pred_logsoft, _ = classifier(points)

    # move data back to cpu for visualization
    pred_logsoft_cpu = pred_logsoft.data.cpu().numpy().squeeze()
    pred_soft_cpu = np.exp(pred_logsoft_cpu)
    pred_class = np.argmax(pred_soft_cpu)
Exemple #7
0

period = 20
o3d.set_verbosity_level(o3d.VerbosityLevel.Debug)
if True:
    import glob
    pose_files = glob.glob('stream/*.pose')
    num_frames = len(pose_files)
    frame_trace = [i for i in range(num_frames) if i % period == 0]
    #frame_trace += [i for i in range(450, 480, 2)]
    #frame_trace += [445]
    #compareA = args.compareA #对比的两个点云
    #compareB = args.compareB
    frame_trace = sorted(frame_trace)

pcd_partial = o3d.PointCloud()
last_T = None


def custom_draw_geometry_with_view_tracking(mesh, generate, period):
    def track_view(vis):
        global frame, poses
        global num_frames, last_T
        global parameter
        ctr = vis.get_view_control()
        if frame == 0:
            params = ctr.convert_to_pinhole_camera_parameters()
            intrinsics = params.intrinsic
            extrinsics = params.extrinsic

            pose = np.array(
Exemple #8
0
    probing_points_transformed[0:3, :], buccal)
front_selected_points = select_closest_point(
    probing_points_transformed[3:6, :], front)
lingual_selected_points = select_closest_point(
    probing_points_transformed[6:9, :], lingual)
occlusal_selected_points = select_closest_point(
    probing_points_transformed[9:12, :], occlusal)
destination_points = np.ones(3)
destination_points = np.vstack((destination_points, buccal_selected_points))
destination_points = np.vstack((destination_points, front_selected_points))
destination_points = np.vstack((destination_points, lingual_selected_points))
destination_points = np.vstack((destination_points, occlusal_selected_points))
destination_points = np.asarray(destination_points)[1:, :]

# estimate surface normals at points
surface_pcd = o3d.PointCloud()
surface_pcd.points = o3d.Vector3dVector(surface)
o3d.geometry.estimate_normals(
    surface_pcd,
    search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.6, max_nn=30))
surface_normal = np.asarray(surface_pcd.normals)
print('shape of points is', np.shape(surface))
print('shape of normals is', np.shape(surface_normal))

# prepare destination points normals
buccal_selected_points_idx = np.zeros(len(
    buccal_selected_points))  # the idx in surface to find the right normals
front_selected_points_idx = np.zeros(len(front_selected_points))
lingual_selected_points_idx = np.zeros(len(lingual_selected_points))
occlusal_selected_points_idx = np.zeros(len(occlusal_selected_points))
Exemple #9
0
    (166, 6),
    dtype=float)  # (X,Y,Z) + (X,Y,Z) of the normal vector at that point
count = 0
with open('Pointclouds/bunny', 'r') as pcdfile:
    for line in pcdfile:
        j = 0
        for word in line.split():
            data_frompcd[count][j] = float(word)
            j = j + 1
        count = count + 1

# print(data_frompcd)

pcd_arr = data_frompcd[:, 0:3]  # the normal vector information is stripped out

original_pcd = open3d.PointCloud()
original_pcd.points = open3d.Vector3dVector(pcd_arr)
original_pcd.paint_uniform_color([0, 0, 1])  # entirely blue
# open3d.draw_geometries([point_cloud])

# Will be used as step size variables:
d_pos = 0.2
d_neg = 0.2

#######################################################################################
# Follow the normal vector to create training data outside the original surface:
points_out_0 = [data_frompcd[:, 0] + d_neg * data_frompcd[:, 3]]

points_out_1 = [data_frompcd[:, 1] + d_neg * data_frompcd[:, 4]]

points_out_2 = [data_frompcd[:, 2] + d_neg * data_frompcd[:, 5]]
Exemple #10
0
    check_pc5 = np.vstack((check_pc5, landmark_fiducial4))
    plot_3d_pc(check_pc5)

    print('check corrected ios image')
    new_mesh = mesh.Mesh.from_file(
        "G:\\My Drive\\Project\\IntraOral Scanner Registration\\IOS_scan_raw_data\\IOS Splint\\demo_scan\\2nd_splint\\scan3\\5242021-Demo_2nd_splint_trial3-lowerjaw.stl"
    )
    original_stl_points = np.copy(new_mesh.points)[:, 0:3]
    original_stl_points = transpose_pc(original_stl_points, trans_rigid0)
    original_stl_points = transpose_pc(original_stl_points, trans_rigid)

    original_stl_points2 = np.copy(new_mesh.points)[:, 0:3]
    #original_stl_points2 = transpose_pc(original_stl_points2, trans_rigid0)
    original_stl_points2 = transpose_pc(original_stl_points2, trans_rigid2)

    pc_stl_plot = o3d.PointCloud()
    pc_dicom_plot = o3d.PointCloud()
    pc_stl_plot.points = o3d.Vector3dVector(original_stl_points)
    pc_stl_plot.paint_uniform_color([0, 0.651, 0.929])  # yellow
    pc_dicom_plot.points = o3d.Vector3dVector(original_stl_points2)
    pc_dicom_plot.paint_uniform_color([1, 0.706, 0])  # blue
    o3d.visualization.draw_geometries([pc_dicom_plot, pc_stl_plot])

    # convert python ct point coordinate to Yomiplan
    # Y is flipped with y_data = (slice_properties[4] - surface_idx[i][0]) * slice_properties[1][0]
    # Z is flipped with z_data = (slice_properties[2] - z) * slice_properties[0]
    # In the current dicom, slice_properties are
    # [slice_thickness, slice_pixel_spacing, n, slice_rows, slice_columns, slice_columns, slope, intercept]
    # ["0.2", [0.200, 0.200], 304, 800, 800, "1.0", "0.0"]

    arch_ct.update_spline()
Exemple #11
0
def register2Fragments(id1, id2, keyptspath, descpath, resultpath, logpath, gtLog, desc_name, inlier_ratio, distance_threshold):
    """
    Register point cloud {id1} and {id2} using the keypts location and descriptors.
    """
    cloud_bin_s = f'cloud_bin_{id1}'
    cloud_bin_t = f'cloud_bin_{id2}'
    write_file = f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'
    if os.path.exists(os.path.join(resultpath, write_file)):
        return 0, 0, 0
    source_keypts = get_keypts(keyptspath, cloud_bin_s)
    target_keypts = get_keypts(keyptspath, cloud_bin_t)
    source_desc = get_desc(descpath, cloud_bin_s, desc_name)
    target_desc = get_desc(descpath, cloud_bin_t, desc_name)
    source_desc = np.nan_to_num(source_desc)
    target_desc = np.nan_to_num(target_desc)
    # Select {num_keypts} points based on the scores. The descriptors and keypts are already sorted based on the detection score.
    num_keypts = 250
    source_keypts = source_keypts[-num_keypts:, :]
    source_desc = source_desc[-num_keypts:, :]
    target_keypts = target_keypts[-num_keypts:, :]
    target_desc = target_desc[-num_keypts:, :]
    # Select {num_keypts} points randomly.
    # num_keypts = 250
    # source_indices = np.random.choice(range(source_keypts.shape[0]), num_keypts)
    # target_indices = np.random.choice(range(target_keypts.shape[0]), num_keypts)
    # source_keypts = source_keypts[source_indices, :]
    # source_desc = source_desc[source_indices, :]
    # target_keypts = target_keypts[target_indices, :]
    # target_desc = target_desc[target_indices, :]
    key = f'{cloud_bin_s.split("_")[-1]}_{cloud_bin_t.split("_")[-1]}'
    if key not in gtLog.keys():
        # skip the pairs that have less than 30% overlap.
        num_inliers = 0
        inlier_ratio = 0
        gt_flag = 0
    else:
        # build correspondence set in feature space.
        corr = build_correspondence(source_desc, target_desc)

        # calculate the inlier ratio, this is for Feature Matching Recall.
        gt_trans = gtLog[key]
        frag1 = source_keypts[corr[:, 0]]
        frag2_pc = open3d.PointCloud()
        frag2_pc.points = open3d.utility.Vector3dVector(target_keypts[corr[:, 1]])
        frag2_pc.transform(gt_trans)
        frag2 = np.asarray(frag2_pc.points)
        distance = np.sqrt(np.sum(np.power(frag1 - frag2, 2), axis=1))
        num_inliers = np.sum(distance < distance_threshold)
        if num_inliers / len(distance) < inlier_ratio:
            print(key)
            print("num_corr:", len(corr), "inlier_ratio:", num_inliers / len(distance))
        inlier_ratio = num_inliers / len(distance)
        gt_flag = 1

        # calculate the transformation matrix using RANSAC, this is for Registration Recall.
        source_pcd = open3d.PointCloud()
        source_pcd.points = open3d.utility.Vector3dVector(source_keypts)
        target_pcd = open3d.PointCloud()
        target_pcd.points = open3d.utility.Vector3dVector(target_keypts)
        s_desc = open3d.registration.Feature()
        s_desc.data = source_desc.T
        t_desc = open3d.registration.Feature()
        t_desc.data = target_desc.T
        result = open3d.registration_ransac_based_on_feature_matching(
            source_pcd, target_pcd, s_desc, t_desc,
            0.05,
            open3d.TransformationEstimationPointToPoint(False), 3,
            [open3d.CorrespondenceCheckerBasedOnEdgeLength(0.9),
             open3d.CorrespondenceCheckerBasedOnDistance(0.05)],
            open3d.RANSACConvergenceCriteria(50000, 1000))

        # write the transformation matrix into .log file for evaluation.
        with open(os.path.join(logpath, f'{desc_name}_{timestr}.log'), 'a+') as f:
            trans = result.transformation
            trans = np.linalg.inv(trans)
            s1 = f'{id1}\t {id2}\t  37\n'
            f.write(s1)
            f.write(f"{trans[0,0]}\t {trans[0,1]}\t {trans[0,2]}\t {trans[0,3]}\t \n")
            f.write(f"{trans[1,0]}\t {trans[1,1]}\t {trans[1,2]}\t {trans[1,3]}\t \n")
            f.write(f"{trans[2,0]}\t {trans[2,1]}\t {trans[2,2]}\t {trans[2,3]}\t \n")
            f.write(f"{trans[3,0]}\t {trans[3,1]}\t {trans[3,2]}\t {trans[3,3]}\t \n")

    # write the result into resultpath so that it can be re-shown.
    s = f"{cloud_bin_s}\t{cloud_bin_t}\t{num_inliers}\t{inlier_ratio:.8f}\t{gt_flag}"
    with open(os.path.join(resultpath, f'{cloud_bin_s}_{cloud_bin_t}.rt.txt'), 'w+') as f:
        f.write(s)
    return num_inliers, inlier_ratio, gt_flag
Exemple #12
0
def open3d_icp_normal(pa,
                      pb,
                      trans_init=np.eye(4),
                      max_distance=0.02,
                      b_graph=False):
    '''
    (Transformation A->B)

    pa : numpy array
    pb : numpy array

    return: 
        transformation
            - 4x4 numpy array
        result
            - transformation
            - fitness
            - inlier_rmse
            - correspondence_set
    '''

    import open3d as op3
    import copy

    def draw_registration_result(source, target, transformation):
        source_temp = copy.deepcopy(source)
        target_temp = copy.deepcopy(target)
        source_temp.paint_uniform_color([1, 0.706, 0])
        target_temp.paint_uniform_color([0, 0.651, 0.929])
        source_temp.transform(transformation)
        op3.draw_geometries([source_temp, target_temp])

    pc1 = op3.PointCloud()
    pc1.points = op3.Vector3dVector(pa)

    pc2 = op3.PointCloud()
    pc2.points = op3.Vector3dVector(pb)

    op3.estimate_normals(pc1,
                         search_param=op3.KDTreeSearchParamHybrid(radius=10.0,
                                                                  max_nn=30))
    op3.estimate_normals(pc2,
                         search_param=op3.KDTreeSearchParamHybrid(radius=10.0,
                                                                  max_nn=30))

    # op3.draw_geometries([pc1, pc2])

    # max_distance = 0.2
    # trans_init = np.asarray(
    # [[0.862, 0.011, -0.507,  0.5],
    # [-0.139, 0.967, -0.215,  0.7],
    # [0.487, 0.255,  0.835, -1.4],
    # [0.0, 0.0, 0.0, 1.0]])
    # trans_init = np.eye(4)
    # tsl = pb.mean(axis=0) - pa.mean(axis=0)
    # trans_init = TransformationMatrixFromEulerAngleTranslation(euler, tsl)
    # trans_init = transform

    #### # print("Apply point-to-point ICP")
    #### reg_p2p = op3.registration_icp(pc1, pc2, max_distance, trans_init,
    ####         # op3.TransformationEstimationPointToPoint())
    ####         op3.TransformationEstimationPointToPoint(),
    ####         op3.ICPConvergenceCriteria(max_iteration = 2000))
    #### # print(reg_p2p)
    #### # print("Transformation is:")
    #### # print(reg_p2p.transformation)
    #### # print(EulerAngleTranslationFromTransformationMatrix(reg_p2p.transformation))
    #### # print("")

    # print("Apply point-to-plane ICP")
    reg_p2l = op3.registration_icp(
        pc1, pc2, max_distance, trans_init,
        op3.TransformationEstimationPointToPlane(),
        op3.ICPConvergenceCriteria(max_iteration=2000))
    # print(reg_p2l)
    # print("Transformation is:")
    # print(reg_p2l.transformation)
    # print("")

    if b_graph:
        draw_registration_result(pc1, pc2, reg_p2l.transformation)

    # print(reg_p2p.transformation)
    # print(reg_p2p.fitness)
    # print(reg_p2p.inlier_rmse)
    # print(reg_p2p.correspondence_set)

    return reg_p2l.transformation, reg_p2l
Exemple #13
0
    visi_tgt_fit = list(set(visi_tgt) - set(hf_list))

    verts_gt_visi = np.array([verts_gt[ind] for ind in visi_gt])
    verts_tgt_visi = np.array([verts_tgt[ind] for ind in visi_tgt])
    verts_tgt_visi_nhf = np.array([verts_tgt[ind] for ind in visi_tgt_fit])

    # compute scale
    ave_dist_gt = np.linalg.norm(np.mean(verts_gt_visi, axis=0))
    ave_dist_tgt = np.linalg.norm(np.mean(verts_tgt_visi, axis=0))
    scale = ave_dist_gt / ave_dist_tgt
    verts_tgt *= scale
    verts_tgt_visi *= scale
    verts_tgt_visi_nhf *= scale

    # do ICP
    source = open3d.PointCloud()
    target = open3d.PointCloud()
    source.points = open3d.Vector3dVector(verts_tgt_visi_nhf)
    target.points = open3d.Vector3dVector(verts_gt_visi)

    reg_p2p = open3d.registration_icp(
        source,
        target,
        10,
        np.identity(4),
        open3d.TransformationEstimationPointToPoint(),
        open3d.ICPConvergenceCriteria(max_iteration=10000),
    )
    trans_mat = reg_p2p.transformation
    verts_tgt_fit = np.zeros(verts_tgt.shape)
    verts_tgt_visi_fit = np.zeros(verts_tgt_visi.shape)
Exemple #14
0
def preprocess_point_cloud(points, voxel_size):
    source = o3d.PointCloud()
    source.points = o3d.Vector3dVector(points)
    print(":: Downsample with a voxel size %.3f." % voxel_size)
    pcd_down = o3d.geometry.voxel_down_sample(source, voxel_size)
    return pcd_down.points
import open3d as op
import numpy as np
import subprocess

#model1 = np.loadtxt();

points = np.random.rand(10000, 3)

point_cloud = op.PointCloud()
point_cloud.points = op.Vector3dVector(points)
op.draw_geometries([point_cloud])
Exemple #16
0
#SEED = [94, 267, 265]  # tooth 21
#SEED = [59, 246, 265]  # tooth 22
SEED = [316, 354, 265]  # tooth 17
#[323, 360, 285]
z_tho = 230
SRG_LOOPS = 45
CUTOFF = -200

region_candidate = region(SEED)
srg_3d(region_candidate, scan, z_threshold=z_tho)
region_candidate.update_final_boundary(z_threshold=z_tho)

import open3d as o3d

source = o3d.PointCloud()
source.points = o3d.Vector3dVector(region_candidate.boundary_final)
source.paint_uniform_color([0, 0.651, 0.929])  # yellow
o3d.draw_geometries([source])

#plt.figure()
#ax = plt.axes(projection='3d')
#print('shape of boundary is', np.shape(region_candidate.boundary_final))
#Yomiplot.plot_3d_points(region_candidate.boundary_final, ax, color='green', alpha=0.3, axe_option=False)

#plt.show()

exit()

#img2 = cv2.imread(HU)
HU_square = np.square(HU)
Exemple #17
0
n_size = 100

points = MakePoints(plane.f_rep, bbox=bbox, grid_step=30)
p_size = points.shape[0]
print(p_size)

xmin, xmax, ymin, ymax, zmax, zmin = AABB
noise = np.array([[Random(xmin, xmax),
                   Random(ymin, ymax),
                   Random(zmin, zmax)] for i in range(n_size)])

points = np.concatenate([points, noise])
size = points.shape[0]

#点群をnp配列⇒open3d形式に
pointcloud = open3d.PointCloud()
pointcloud.points = open3d.Vector3dVector(points)

# color
colors3 = np.asarray(pointcloud.colors)
print(colors3.shape)
colors3 = np.array([[255, 130, 0] if i < p_size else [0, 0, 255]
                    for i in range(size)]) / 255
pointcloud.colors = open3d.Vector3dVector(colors3)

# 法線推定
open3d.estimate_normals(pointcloud,
                        search_param=open3d.KDTreeSearchParamHybrid(
                            radius=5, max_nn=100))

# 法線の方向を視点ベースでそろえる
Exemple #18
0
def process_lidar_all(data_path,store_path_general,frame_val,mdy_dict,mst_s_dict,mst_v_dict,vid_to_id_map,res_scale = 4):
    id_vehicles_with_sensor = mst_s_dict.keys()
    id_vehicles = mst_v_dict.keys()
    observation_matrix = np.zeros([len(id_vehicles),len(id_vehicles)])*np.nan
    distance_matrix = np.zeros([len(id_vehicles),len(id_vehicles)])
    bounding_box_cy = np.zeros([len(id_vehicles),len(id_vehicles),4])
    bounding_box_3d = np.zeros([len(id_vehicles),len(id_vehicles),9,3])
    world_lp = []
    for ego_vid in id_vehicles_with_sensor:
        store_path = os.path.join(store_path_general,str(ego_vid))
        ego_pid = vid_to_id_map[ego_vid]
        ego_s_mst = mst_s_dict[ego_vid][f_sensor_string]
        ego_sen_id = ego_s_mst['id']
        ego_mdy_dict = mdy_dict_frame[ego_sen_id]
        ego_location = ego_mdy_dict['translation']
        # rdu.get_sensor_data_file_path(f, SEN_LIDAR_LABEL, ego_vid, data_path, LIDAR_EXT))
        input_data_path = os.path.join(data_path,str(ego_vid),str(frame_val)+'_'+str(ego_vid)+LIDAR_EXT)
        lidar_data = od.read_point_cloud(input_data_path)
        lidar_np = np.asarray(lidar_data.points)

        lp_w = gu.ego_to_world_sensor(lidar_np.transpose(),ego_mdy_dict)
        lidar_cy = lu.lidar_to_cy(lidar_np, res_scale)
        lidar_colored = lidar_cy
        lidarp_cy = PImage.fromarray(np.uint8(lidar_colored), 'RGB')
        print(frame_val, ego_vid)
        file_name = os.path.join(store_path, '%d_%d.png' % (frame_val, ego_vid))
        lidarp_cy.save(file_name)
        world_lp +=[lp_w]
    world_lp_all = np.concatenate(world_lp,axis=1)
    # axis_point = np.zeros([3,300])
    # axis_point[0,0:10] = np.arange(10)
    # axis_point[1, 10:20] = np.arange(10)
    # axis_point[2, 20:30] = np.arange(10)
    # x_axis = np.arange(-10.0,10.0,.2)
    # xy = np.meshgrid(x_axis,x_axis)
    # xyz_plane = np.stack(xy+[np.zeros_like(xy[0])],axis=0)
    # xyz_plane = xyz_plane[:].reshape([3, -1, 1]).squeeze()
    # world_lp_all = np.concatenate([world_lp_all,axis_point],axis=1)


    world_lp_all_ego = gu.world_to_ego_sensor(world_lp_all,ego_mdy_dict)
    # world_lp_all_ego = world_lp_all_ego.transpose()
    # world_lp_all_ego = np.concatenate([world_lp_all_ego,axis_point,xyz_plane],axis=1)
    world_lp_all_ego = world_lp_all_ego.transpose()
    bc = {}
    bc['minX'] = -80;
    bc['maxX'] = 80;
    bc['minY'] = -80;
    bc['maxY'] = 80
    bc['minZ'] = -10;
    bc['maxZ'] = 10
    world_clean = world_lp_all_ego
    world_clean = lu.removePoints(world_lp_all_ego,bc)
    img = lu.makeBVFeature(world_clean,None,Discretization=160.0/1024.0)
    img = img*255
    # a = np.int8(img)
    # a = np.array(np.zeros([500,500,3]))
    # # a[400:512,400:512,1]=1
    # a[400:500, 400:500, 1] = 255
    # a = np.int8(a)
    pimg = PImage.fromarray(img.astype('uint8'), 'RGB')
    # b = np.asarray(pimg)
    pimg.show()
    pcd = od.PointCloud()
    pcd.points = od.Vector3dVector(world_clean)
    od.draw_geometries([pcd])
    # if ego_vid == 550 and frame_val == 278260:
    #     all_points = np.concatenate(all_points, axis=1)
    #     # all_points = np.concatenate([points, all_points], axis=1)
    #     pcd = od.PointCloud()
    #     pcd.points = od.Vector3dVector(np.transpose(all_points))
    #     od.draw_geometries([pcd])
    # img_od_cyl = od.Image(lidar_cy.astype(np.uint8))
    return {'frame':frame_val,'observation':observation_matrix,'distance':distance_matrix,'bb_2d':bounding_box_cy,'bb_3d':bounding_box_3d}
import pykitti
import open3d
import time
import numpy as np

basedir = "/home/ylao/data/kitti"
date = "2011_09_26"
drive = "0001"

pcd = open3d.PointCloud()

vis = open3d.Visualizer()
vis.create_window()
vis.add_geometry(pcd)

render_option = vis.get_render_option()
render_option.point_size = 0.01

data = pykitti.raw(basedir, date, drive)
to_reset_view_point = True
for points_with_intensity in data.velo:
    points = points_with_intensity[:, :3]
    pcd.points = open3d.Vector3dVector(points)

    vis.update_geometry()
    if to_reset_view_point:
        vis.reset_view_point(True)
        to_reset_view_point = False
    vis.poll_events()
    vis.update_renderer()
    time.sleep(0.2)
 def visualize_points(points):
     point_cloud = open3d.PointCloud()
     point_cloud.points = open3d.Vector3dVector(points)
     open3d.draw_geometries([point_cloud])
Exemple #21
0
 def __init__(self):
     self.vis_cloud = open3d.PointCloud()
     self.viewer = open3d.Visualizer()
     self.viewer.create_window()
     self.viewer.add_geometry(self.vis_cloud)
Exemple #22
0
            predict = ResNet.inference(model_path, img_path)

            predict = predict.reshape(185)

            im_name = ntpath.basename(img_path)
            im_name = im_name.split(im_name.split('.')[-1])[0][0:-1]

            out_file = out_dir + '/' + im_name

            np.savetxt(out_file + '_est.txt', predict, fmt="%f")

            est_shape, est_exp = project2bfm09(predict)

            est_geom = est_shape + est_exp

            est = open3d.PointCloud()
            est.points = open3d.Vector3dVector(est_geom)

            label_path = img_path[:-3] + 'txt'
            label = np.asarray(read_txt(label_path))
            truth_shape, truth_exp = project2bfm09(label, use_std=False)

            truth_geom = truth_shape + truth_exp

            truth = open3d.PointCloud()
            truth.points = open3d.Vector3dVector(truth_geom +
                                                 np.array([200000, 0, 0]))

            open3d.draw_geometries([est, truth])
Exemple #23
0
# 6DoF pose annotator
# Shuichi Akizuki, Chukyo Univ.
# Email: [email protected]
#
import open3d as o3
import numpy as np
import cv2
import copy
import argparse
import os
import common3Dfunc as c3D
from math import *
""" Object model to be transformed """
CLOUD_ROT = o3.PointCloud()
""" Total transformation"""
all_transformation = np.identity(4)
""" Step size for rotation """
step = 0.1 * pi
""" Voxel size for downsampling"""
voxel_size = 0.005


def get_argumets():
    """
        Parse arguments from command line
    """

    parser = argparse.ArgumentParser(
        description='Interactive 6DoF pose annotator')
    parser.add_argument('--cimg',
                        type=str,
        # drives=["0095", "0001"],
        drives=["0095"],
        box_size_x=hyper_params["box_size_x"],
        box_size_y=hyper_params["box_size_y"],
    )

    # Model
    max_batch_size = 128  # The more the better, limited by memory size
    predictor = PredictInterpolator(
        checkpoint_path=flags.ckpt,
        num_classes=dataset.num_classes,
        hyper_params=hyper_params,
    )

    # Init visualizer
    dense_pcd = open3d.PointCloud()
    vis = open3d.Visualizer()
    vis.create_window()
    vis.add_geometry(dense_pcd)
    render_option = vis.get_render_option()
    render_option.point_size = 0.05

    to_reset_view_point = True
    for kitti_file_data in dataset.list_file_data:
        timer = {
            "load_data": 0,
            "predict_interpolate": 0,
            "visualize": 0,
            "write_data": 0,
            "total": 0,
        }
Exemple #25
0
#         poses.append(to_rotation_matrix(R, T))

kitti = pykitti.odometry("./KITTI_ODOMETRY", "00")
poses = [
    torch.from_numpy(kitti.poses[i] @ kitti.calib.T_cam0_velo)
    for i in range(len(kitti.poses))
]

map_file = args.map
first_frame = int(args.start)
last_frame = min(len(poses), int(args.end))
kitti = pykitti.odometry(args.kitti_folder, sequence)
if map_file is None:

    pc_map = []
    pcl = o3.PointCloud()
    print("started making the map")
    for i in tqdm(range(first_frame, last_frame)):
        pc = kitti.get_velo(i)
        valid_indices = pc[:, 0] < -3.
        valid_indices = valid_indices | (pc[:, 0] > 3.)
        valid_indices = valid_indices | (pc[:, 1] < -3.)
        valid_indices = valid_indices | (pc[:, 1] > 3.)
        pc = pc[valid_indices].copy()
        intensity = pc[:, 3].copy()
        pc[:, 3] = 1.
        RT = poses[i].numpy()
        pc_rot = np.matmul(RT, pc.T)
        pc_rot = pc_rot.astype(np.float).T.copy()

        pcl_local = o3.PointCloud()
            # rd = re(np.array(gtRots[j], dtype=np.float32).reshape(3, 3), rmat)
            # xyz = te((np.array(gtPoses[j], dtype=np.float32)*0.001), (otvec.T))

            print('--------------------- ICP refinement -------------------')
            cv2.rectangle(
                image, (int(box[0]), int(box[1])),
                (int(box[0]) + int(box[2]), int(box[1]) + int(box[3])),
                (0, 0, 0), 3)

            pcd_img = create_point_cloud(image_dep, fxkin, fykin, cxkin, cykin,
                                         0.001)
            pcd_img = pcd_img.reshape(
                (480, 640, 3))[int(box[1]):int(box[1] + box[3]),
                               int(box[0]):int(box[0] + box[2]), :]
            pcd_img = pcd_img.reshape((pcd_img.shape[0] * pcd_img.shape[1], 3))
            pcd_crop = open3d.PointCloud()
            open3d.estimate_normals(
                pcd_crop,
                search_param=open3d.KDTreeSearchParamHybrid(radius=0.02,
                                                            max_nn=30))
            #open3d.draw_geometries([pcd_crop])

            guess = np.zeros((4, 4), dtype=np.float32)
            guess[:3, :3] = rmat
            guess[:3, 3] = itvec.T
            guess[3, :] = np.array([0.0, 0.0, 0.0, 1.0], dtype=np.float32).T

            if dC == 0:
                pcd_model = pcd_model_1
            elif dC == 1:
                pcd_model = pcd_model_2
Exemple #27
0
    classes = ["gt/gt_obj", "before/pred_obj", "after/pred_obj"]
    save_folder = ["graph/gt/", "graph/pred/", "graph/pred_proc/"]
    shiftX = 3

    with open('graph/scene_name.txt', 'w') as name:
        for index, item in enumerate(classes):
            all_file = all_path(item)
            for file_path, file_name in all_file:
                cloud = []
                with open(file_path, "r") as fr:
                    for line in fr:
                        temp = []
                        for s in line.split():
                            if s != 'v':
                                temp.append(float(s))
                        cloud.append(temp)
                cloud = np.array(cloud)

                coord = cloud[:, 0:3].astype(np.float32)
                color = cloud[:, 3:6].astype(np.uint32)
                pcd = o3d.PointCloud()
                pcd.points = o3d.Vector3dVector(coord + shiftX * index)
                pcd.colors = o3d.Vector3dVector(color)
                #o3d.visualization.draw_geometries([pcd])
                o3d.io.write_point_cloud(
                    save_folder[index] + file_name.split('.')[0] + '.pcd', pcd)

                print("Write file: ", file_name.split('.')[0] + '.pcd')
                if index == 0:
                    name.write(file_name[:-7] + '\n')
Exemple #28
0
    cam_ray_directions.append(cam_ray_direction)
    
    cam_point_cloud, depth_image = cam.simulate_camera_mesh(target_mesh,real=True, err=0)
    cam_point_clouds.append(cam_point_cloud)
    cam_depth_images.append(depth_image)

cam_point_clouds = np.array(cam_point_clouds)
cam_depth_images = np.array(cam_depth_images)
file_name = "V_%s_fps_%s_simulation_time_%s_start_angle_%s_Z_%s_radius_%s" %(V, fps, simulation_time, start_angle, Z, radius)

np.save("simulation/cam_point_clouds_" + file_name, cam_point_clouds)
np.save("simulation/cam_depth_images_" + file_name, cam_depth_images)

#%% PLOTING CAM POINT CLOUD

source = pn.PointCloud() 

vis = pn.Visualizer()
vis.create_window()
for i in range(2):
    for i, point_cloud in enumerate(cam_point_clouds):
        time.sleep(0.42)
        source.points = pn.Vector3dVector(point_cloud)
        source.paint_uniform_color([1, 0.706, 0])
        vis.add_geometry(source)
        vis.update_geometry()
        vis.poll_events()
        vis.update_renderer()

vis.destroy_window()
Exemple #29
0
def test(tr_src, train_tgt, tst_src, gt, output, mean, std):
    tensor_x = torch.Tensor(tst_src)
    tensor_y = torch.stack([torch.Tensor(i) for i in gt])
    # print(tensor_y.shape)
    testset = utils.TensorDataset(tensor_x, tensor_y)
    testloader = utils.DataLoader(testset)
    loss = 0.0
    test_size = 0
    saved_data = np.zeros(gt.shape)

    with torch.no_grad():
        for data in testloader:
            # print(saved_data)
            inputs, targets = data[0].to(device), data[1].to(device)
            outputs = posemlp(inputs)
            loss += criterion(outputs, targets[0]).item()
            saved_data[test_size] = outputs.cpu().numpy()
            test_size += 1

    saved_data = saved_data * std + mean
    print(saved_data)
    print('Finished testing with average loss %.6f.', loss / test_size)

    gt = gt * std + mean
    train_tgt = train_tgt * std + mean

    print('Dumping data to ' + output)
    np.save(output, saved_data)

    pred_pcd = o3d.PointCloud()
    pred_pcd.points = o3d.Vector3dVector(saved_data)
    pred_pcd = o3d.voxel_down_sample(pred_pcd, voxel_size=0.05)
    pred_pcd.paint_uniform_color([0, 0, 1])
    colors = np.zeros((saved_data.size / 3, 3))
    colors[..., 2] = np.ones(saved_data.size / 3)
    tr_src = np.around(tr_src, decimals=4)
    tst_src = np.around(tst_src, decimals=4)
    # print(tst_src)[900:1100]
    # print(tr_src)
    # print(np.asarray(pred_pcd.colors).size)
    print(saved_data.size)
    print(np.asarray(pred_pcd.colors).size)
    for i in range(saved_data.size / 3):
        if tst_src[i] in tr_src:
            np.asarray(pred_pcd.colors)[i, 2] = 0
            np.asarray(pred_pcd.colors)[i, 0] = 1
            # print(i)
            # colors[i, 0] = 0
            # colors[i, 1] = 0
            # colors[i, 2] = 1
    # pred_pcd.colors = o3d.Vector3dVector(colors)
    print(np.asarray(pred_pcd.colors))
    gt_pcd = o3d.PointCloud()
    gt_pcd.points = o3d.Vector3dVector(gt)
    gt_pcd = o3d.voxel_down_sample(gt_pcd, voxel_size=0.05)
    gt_pcd.paint_uniform_color([0, 1, 0])

    tr_pcd = o3d.PointCloud()
    tr_pcd.points = o3d.Vector3dVector(train_tgt)
    tr_pcd = o3d.voxel_down_sample(tr_pcd, voxel_size=0.05)
    tr_pcd.paint_uniform_color([1, 0, 0])

    o3d.visualization.draw_geometries([pred_pcd, gt_pcd, tr_pcd])
Exemple #30
0
def mandelbulb(K=200, p=8.0, N=10, extreme=1.5, optimize=False):
    M = T.ftensor4()
    C = T.ftensor4()
    n = T.fscalar()

    def step(X):
        r = T.sqrt(
            T.square(X[:, :, :, 0]) + T.square(X[:, :, :, 1]) +
            T.square(X[:, :, :, 2]))
        phi = T.arctan2(X[:, :, :, 1], X[:, :, :, 0])
        theta = T.arctan2(
            T.sqrt(T.square(X[:, :, :, 0]) + T.square(X[:, :, :, 1])),
            X[:, :, :, 2])

        X_ = T.stack((T.pow(r, n) * T.sin(n * theta) * T.cos(n * phi),
                      T.pow(r, n) * T.sin(n * theta) * T.sin(n * theta),
                      T.pow(r, n) * T.cos(n * theta)),
                     axis=-1)

        return X_ + C

    if optimize:
        result, _ = theano.scan(fn=step, outputs_info=M, n_steps=N)

        f = theano.function([M, C, n], result, allow_input_downcast=True)
    else:
        f_ = theano.function([M, C, n], step(M), allow_input_downcast=True)

        def f(X, c, q):
            for j in range(N):
                print 'step {%d}' % j
                X = f_(X, c, q)
            return np.array(X)

    x, y, z = np.meshgrid(np.linspace(-extreme, extreme, K),
                          np.linspace(-extreme, extreme, K),
                          np.linspace(-extreme, extreme, K))

    X = np.stack((x, y, z), axis=-1)

    if optimize:
        x_n = f(np.zeros((K, K, K, 3)), X, p)[-1]
    else:
        x_n = f(np.zeros((K, K, K, 3)), X, p)

    r_n = np.sqrt(np.square(x_n[:,:,:,0]) + \
                  np.square(x_n[:,:,:,1]) + \
                  np.square(x_n[:,:,:,2]))

    b_ = 1.0 * (r_n < 2.0)

    import open3d
    from scipy.misc import imresize
    from skimage import measure

    verts, faces = measure.marching_cubes_classic(b_, 0)
    faces = measure.correct_mesh_orientation(b_, verts, faces)
    verts = 2.0 * extreme * (np.array(verts, dtype=np.float32) / K) - extreme

    pcd = open3d.PointCloud()
    pcd.points = open3d.Vector3dVector(verts)
    open3d.estimate_normals(pcd,
                            search_param=open3d.KDTreeSearchParamHybrid(
                                radius=0.1, max_nn=30))

    global i, images
    i = 0
    images = []

    def custom_draw_geometry_with_rotation(pcd):
        def rotate_view(vis):
            ctr = vis.get_view_control()
            ctr.rotate(10.0, 0.0)

            global i, images
            i += 1
            print i % 210, i // 210

            image = np.asarray(vis.capture_screen_float_buffer())
            image = np.array(255 * image, dtype=np.uint8)
            image = imresize(image, 0.25)

            if (i // 210 == 0):
                images.append(image)

            return False

        open3d.draw_geometries_with_animation_callback([pcd], rotate_view)

    custom_draw_geometry_with_rotation(pcd)

    gif_name = 'Mandelbulb_%d_%d.gif' % (int(p), int(N))
    output_file = os.path.join(__file__.split('.')[0], gif_name)
    imageio.mimsave(output_file, images, duration=0.05)