コード例 #1
0
def get_intrinsics_scannet():
    K=np.loadtxt(join(args.input, 'intrinsic', f'intrinsic_depth.txt'))
    intr_path=join('/tmp', f'{args.input.__hash__()}.json')
    json.dump({
        "width": 640,
        "height": 480,
        "intrinsic_matrix": list(K[:3,:3].T.flatten())
    }, open(intr_path,'w'))
    print(json.load(open(intr_path)))
    return o3d.read_pinhole_camera_intrinsic(intr_path)
コード例 #2
0
 def __init__(self, camera_intrinsic_name, _w=640, _h=480, _d=1000.0):
     self.camera_intrinsic = o3.read_pinhole_camera_intrinsic(
         camera_intrinsic_name)
     self.width = _w
     self.height = _h
     self.d = _d
     self.camera_intrinsic4x4 = np.identity(4)
     self.camera_intrinsic4x4[0,
                              0] = self.camera_intrinsic.intrinsic_matrix[0,
                                                                          0]
     self.camera_intrinsic4x4[1,
                              1] = self.camera_intrinsic.intrinsic_matrix[1,
                                                                          1]
     self.camera_intrinsic4x4[0,
                              3] = self.camera_intrinsic.intrinsic_matrix[0,
                                                                          2]
     self.camera_intrinsic4x4[1,
                              3] = self.camera_intrinsic.intrinsic_matrix[1,
                                                                          2]
コード例 #3
0
def generateImage(mapping, im_color):
    global CLOUD_ROT

    img_m = mapping.Cloud2Image(CLOUD_ROT)
    img_mapped = cv2.addWeighted(img_m, 0.5, im_color, 0.5, 0)
    cv2.imshow(window_name, img_mapped)


if __name__ == "__main__":

    args = get_argumets()
    """Data loading"""
    print(":: Load two point clouds to be matched.")
    color_raw = o3.read_image(args.cimg)
    depth_raw = o3.read_image(args.dimg)
    camera_intrinsic = o3.read_pinhole_camera_intrinsic(args.intrin)

    im_color = np.asarray(color_raw)
    im_color = cv2.cvtColor(im_color, cv2.COLOR_BGR2RGB)
    im_depth = np.asarray(depth_raw)

    rgbd_image = o3.create_rgbd_image_from_color_and_depth(
        color_raw, depth_raw, 1000.0, 2.0)
    pcd = o3.create_point_cloud_from_rgbd_image(rgbd_image, camera_intrinsic)
    o3.write_point_cloud("cloud_in.ply", pcd)
    cloud_in_ds = o3.voxel_down_sample(pcd, 0.005)
    o3.write_point_cloud("cloud_in_ds.ply", cloud_in_ds)

    np_pcd = np.asarray(pcd.points)
    """Loading of the object model"""
    print('Loading: {}'.format(args.model))
コード例 #4
0
ファイル: visualize.py プロジェクト: Chauncy-Cai/transfer
    def track_view(vis):
        global frame, poses
        global num_frames, last_T
        global parameter
        ctr = vis.get_view_control()
        if frame == 0:
            params = ctr.convert_to_pinhole_camera_parameters()
            intrinsics = params.intrinsic
            extrinsics = params.extrinsic

            pose = np.array(
                [[-0.8188861922, 0.3889273405, -0.4220911372, -14.6068376600],
                 [-0.1157361687, -0.8321937190, -0.5422718444, 23.0477832143],
                 [-0.5621659395, -0.3952077147, 0.7264849060, 4.1193224787],
                 [0.0000000000, 0.0000000000, 0.0000000000, 1.0000000000]])
            params = o3d.camera.PinholeCameraParameters()
            params.extrinsic = pose
            params.intrinsic = intrinsics
            ctr.convert_from_pinhole_camera_parameters(params)
        fid = frame % num_frames

        keyindex1 = 500
        keyindex2 = 600

        if True:
            intrinsics = o3d.read_pinhole_camera_intrinsic(
                "stream/%d.intrinsics.json" % fid)
            T = np.loadtxt('stream/%d.pose' % fid)
            params = o3d.camera.PinholeCameraParameters()
            params.extrinsic = T
            params.intrinsic = intrinsics
            ctr.convert_from_pinhole_camera_parameters(params)  #很重要,不能删除
            if (fid == keyindex1) | (fid == keyindex2):
                """ Generate Point Cloud """
                if (last_T is None) or ((fid in frame_trace) or
                                        (AngularDistance(T, last_T))):
                    print('%d/%d' % (fid, num_frames))
                    depth = vis.capture_depth_float_buffer(False)
                    depth = np.array(depth)
                    idx = np.where(depth > 30)
                    depth[idx] = 0
                    depth = o3d.Image(depth)
                    image = vis.capture_screen_float_buffer(False)
                    image = o3d.Image(
                        np.array(np.array(image) * 255).astype(np.uint8))
                    rgbd = o3d.create_rgbd_image_from_color_and_depth(
                        image, depth, convert_rgb_to_intensity=False)
                    rgbd.depth = o3d.Image(np.array(rgbd.depth) * 1000)
                    pcd = o3d.create_point_cloud_from_rgbd_image(
                        rgbd, intrinsics)
                    pcd.transform(np.linalg.inv(T))
                    o3d.write_point_cloud("stream/%d.ply" % fid,
                                          pcd,
                                          write_ascii=True)
                    cv2.imwrite("stream/%d.png" % fid, np.array(image))
                    depth = np.array(depth)
                    #depth = np.array(depth)*1000
                    #depth.astype(np.uint16)
                    #cv2.imwrite("stream/%d_depth.png" % fid, depth)##
                    last_T = T
                    if (fid == keyindex1):
                        intrinsic_matrix = intrinsics.intrinsic_matrix

                        pcd0, image, indexlist = see2pcd(
                            fid, depth, np.array(image), pcd,
                            intrinsics.intrinsic_matrix)
                        pcd0.transform(np.linalg.inv(T))
                        #print(T)
                        parameter['0'] = [pcd0, image, indexlist]
                    if (fid == keyindex2):
                        intrinsic_matrix = intrinsics.intrinsic_matrix

                        pcd1, image, indexlist = see2pcd(
                            fid, depth, np.array(image), pcd,
                            intrinsics.intrinsic_matrix)
                        pcd1.transform(np.linalg.inv(T))
                        #print(T)
                        parameter['1'] = [pcd1, image, indexlist]
                        vis.destroy_window()
                        #o3d.draw_geometries([parameter['0'][0],pcd1])
                        compare(parameter)
                        os.system("pause")
            if fid == num_frames - 1:
                exit()
        frame += 1
コード例 #5
0
neighbour_file_name = config["neigh_info_file"]
neigh_direction_id_file_name = config["neigh_direction_id_file"]
motion_label_file_name = config["motion_label_file"]
enforce_generation = config["enforce_generation_neighbour"]
intrinsic_file_name = config["intrinsic_file"]
visibility_folder = config["visibility_folder"]
param_folder = config["param_folder"]
temp_folder = config[
    "temp_folder"]  # all info are only saved at run time and will be deleted after use

selected_room_names = all_room_names
radius_list = [0.1]  # unit is in meter

label_list = ["up", "down", "left", "right"]
intrinsic_file = os.path.join(proj_path, param_folder, intrinsic_file_name)
intrinsic = o3d.read_pinhole_camera_intrinsic(intrinsic_file)
print(intrinsic)

## empiracally tried,  but the real kinect intrinsics works better
K = np.eye(3)
K[0, 0] = 585  #595  #590 #573 #690
K[1, 1] = 585  #595  #590 #573 #647
K[0, 2] = 320
K[1, 2] = 240
intrinsic.intrinsic_matrix = K

DELETE_TMP_PCD = True

selected_room_names = ["room006"]  # here to indicate which room to visualise

start_key_list = [
コード例 #6
0
 def __init__(self):
     self.camera_intrinsic = o3.read_pinhole_camera_intrinsic("static_data/realsense_intrinsic.json")
     self.K = np.asarray(self.camera_intrinsic.intrinsic_matrix)
コード例 #7
0
    return current_r, current_t


# To get the now_pc and next_pc, there are two ways:
#(1) you can load from RGBD image by

from open3d import Image as IMG
from open3d import create_point_cloud_from_rgbd_image, create_rgbd_image_from_color_and_depth, read_pinhole_camera_intrinsic

color_image = np.array(Image.open("{0}/rgb/{1}".format(data_root, rgb_name)))
depth_im = np.array(Image.open("{0}/depth/{1}".format(data_root,
                                                      dep_name))) / 1000.0
color_raw = IMG(color_image)
depth_raw = IMG(depth_im.astype(np.float32))
rgbd_image = create_rgbd_image_from_color_and_depth(
    color_raw, depth_raw, depth_scale=1.0, convert_rgb_to_intensity=False)
pinhole_camera_intrinsic = read_pinhole_camera_intrinsic("camera_redwood.json")
new_pc = create_point_cloud_from_rgbd_image(rgbd_image,
                                            pinhole_camera_intrinsic)

# you can get the next_pc in the same way

#(2) direct change from .xyz
from open3d import PointCloud

new_pc = PointCloud()
new_pc.points = Vector3dVector(verts)
new_pc.normals = Vector3dVector(norms)
new_pc.colors = Vector3dVector(colors / 255.)

# you can get the next_pc in the same way
コード例 #8
0
    def track_view(vis):
        global frame, poses
        global num_frames, last_T
        #print('frame=%d' % frame)
        #import ipdb; ipdb.set_trace()
        ctr = vis.get_view_control()
        ######intrinsics = o3d.read_pinhole_camera_intrinsic('intrinsics.json')
        #intrinsics = o3d.PinholeCameraIntrinsic(o3d.PinholeCameraIntrinsicParameters.Kinect2ColorCameraDefault)
        ######intrinsics = o3d.PinholeCameraIntrinsic(
        ######                o3d.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
        ######intrinsics_mat=np.array([[935.30743608719376, 0.0, 0.0],
        ######                           [0.0, 935.30743608719376, 0.0],
        ######                           [959.5, 539.5, 1.0]])
        ######intrinsics.intrinsic_matrix = intrinsics_mat
        #import ipdb; ipdb.set_trace()
        ######print(intrinsics)
        #pcd = o3d.create_point_cloud_from_rgbd_image(rgbd, intrinsics)
        #o3d.write_point_cloud("stream/%d.ply" % frame, pcd)
        if frame == 0:
            #vis.get_render_option().load_from_json("render.json")
            intrinsics, extrinsics = ctr.convert_to_pinhole_camera_parameters()
            pose = [[-0.8188861922, 0.3889273405, -0.4220911372, -14.6068376600],
                    [-0.1157361687, -0.8321937190, -0.5422718444, 23.0477832143],
                    [-0.5621659395, -0.3952077147, 0.7264849060, 4.1193224787],
                    [0.0000000000, 0.0000000000, 0.0000000000, 1.0000000000]]
            ctr.convert_from_pinhole_camera_parameters(intrinsics, pose)
        fid = frame % num_frames

        if not args.load:
            intrinsics, extrinsics = ctr.convert_to_pinhole_camera_parameters()
            o3d.write_pinhole_camera_intrinsic("stream/%d.intrinsics.json" % frame, intrinsics)
            with open('stream/%d.pose' % frame, 'w') as fout:
                for i in range(4):
                    for j in range(4):
                        if j > 0:
                            fout.write(' ')
                        fout.write('%.10f' % extrinsics[i, j])
                    fout.write('\n')
            #o3d.write_pinhole_camera_intrinsics(intrinsics, "stream/%d.intrinsics.json" % frame)
            #pose_dict['intrinsics'].append(intrinsics)
            #pose_dict['extrinsics'].append(extrinsics)
            #if frame == 100:
            #    import pickle
            #    with open('stream/pose.p', 'wb') as fout:
            #        pickle.dump(pose_dict, fout, protocol=pickle.HIGHEST_PROTOCOL)
            #    exit()
        else:
            intrinsics = o3d.read_pinhole_camera_intrinsic("stream/%d.intrinsics.json" % fid)
            T = np.loadtxt('stream/%d.pose' % fid)
            ctr.convert_from_pinhole_camera_parameters(intrinsics, T)
            if args.generate:
                """ Generate Point Cloud """
                if (last_T is None) or (fid in frame_trace) or (AngularDistance(T, last_T)):
                    print('%d/%d' % (fid, num_frames))
                    #vis.update_geometry()
                    #vis.poll_events()
                    #vis.update_renderer()
                    depth = vis.capture_depth_float_buffer(False)
                    depth = np.array(depth)
                    #print(depth.max(), depth.min())
                    idx = np.where(depth > 30)
                    depth[idx] = 0
                    depth = o3d.Image(depth)
                    image = vis.capture_screen_float_buffer(False)
                    image = o3d.Image(np.array(np.array(image)*255).astype(np.uint8))
                    rgbd = o3d.create_rgbd_image_from_color_and_depth(
                                image, depth, convert_rgb_to_intensity = False)
                    rgbd.depth = o3d.Image(np.array(rgbd.depth)*1000)
                    pcd = o3d.create_point_cloud_from_rgbd_image(rgbd, intrinsics)
                    #pcd = o3d.voxel_down_sample(pcd, voxel_size=0.05)
                    #pcd.transform(np.linalg.inv(T))
                    o3d.write_point_cloud("stream/%d.ply" % fid, pcd, write_ascii=True)
                    cv2.imwrite("stream/%d.png" % fid, np.array(image))
                    last_T = T
                if fid == num_frames - 1:
                    exit()
        frame += 1
コード例 #9
0
ファイル: rendering.py プロジェクト: haruishi43/RecordWithRS
 def _get_window_size(self):
     intrinsics = o3.read_pinhole_camera_intrinsic(
         "static_data/pinholeCameraIntrinsic.json")
     h = intrinsics.height
     w = intrinsics.width
     return h, w