Exemplo n.º 1
0
    def open3dVis(self, depth, normal=None, color=None):
        """Visualize 3d map points from eigen depth

        Args:
            depth: depth map
            normal: normal map
            color: rgb image

        """
        points = get3dpoints(depth, color)
        points = np.asarray(points)
        pcd = PointCloud()
        pcd.points = Vector3dVector(points)

        if color is not None:
            color = np.reshape(color, [-1, 3])
            pcd.colors = Vector3dVector(color / 255.)

        if normal is not None:
            normal = np.reshape(normal, [-1, 3])
        else:
            _, normal, _ = self.compute_local_planes(points[:, 0],
                                                     points[:, 1], points[:,
                                                                          2])
            normal = np.reshape(normal, [-1, 3])

        pcd.normals = Vector3dVector(normal)
        draw_geometries([pcd])
Exemplo n.º 2
0
    def merge_visualise(self, pc1, pc2):
        """Concatenate all 3d points in pc1 and pc2

        Args:
            pc1: point cloud 1
            pc2: point cloud 2
        """
        pcd = PointCloud()
        points = np.concatenate((pc1['points'], pc2['points']), axis=0)
        colors = np.concatenate((pc1['colors'], pc2['colors']), axis=0)

        pcd.points = Vector3dVector(np.asarray(points))
        pcd.colors = Vector3dVector(np.asarray(colors) / 255.)
        draw_geometries([pcd])
Exemplo n.º 3
0
def main(radius, name):
    cloud = epc.compress((epc[:, -2] > radius) & (radius >= epc[:, -1]), 0)
    pcd = PointCloud()
    pcd.points = Vector3dVector(cloud[:, :3])
    pcd.colors = Vector3dVector(cloud[:, 3:6])
    write_point_cloud(name, pcd)
    print(
        pcd,
        cloud[:, -1].max(),
        cloud[:, -3].sum(),
        cloud[:, -4].max(),
        cloud[:, -5].sum(),
    )
    return None
Exemplo n.º 4
0
def pc2color_pcd(pc, color):
    from open3d import PointCloud, Vector3dVector
    color = np.array(color).reshape(-1, 3)
    assert pc.shape[0] == 3
    if color.shape[0] == 1:
        color = np.repeat(color, pc.shape[1], axis=0)

    color = color.copy().astype(np.float32)
    color /= 255.0

    pcd = PointCloud()
    pcd.points = Vector3dVector(pc.T)
    pcd.colors = Vector3dVector(color[:, ::-1])
    return pcd
Exemplo n.º 5
0
    def open3dVis(self, data):
        """Visualize through open3d

        Args:
            data: dict containing, input, depth, normal

        """
        pcd = PointCloud()
        depth = data['depth'][0, 0]
        xyz, _ = self.get_point_cloud(depth, data['image'][0])
        if 'normal' in data.keys():
            normals = np.transpose(data['normal'][0], (1, 2, 0))
            normals = normals.reshape((-1, 3))
        else:
            _, normals, _ = self.__compute_local_planes(
                xyz[:, 0], xyz[:, 1], xyz[:, 2])
            normals = normals.reshape((-1, 3))

        color = np.transpose(data['image'][0], [1, 2, 0]).reshape((-1, 3))

        pcd.points = Vector3dVector(xyz)
        pcd.colors = Vector3dVector(color / 255.)
        pcd.normals = Vector3dVector(normals)
        draw_geometries([pcd])
Exemplo n.º 6
0
def pcd_array(cloud):
    pcd = PointCloud()
    pcd.points = Vector3dVector(cloud[:, :3])
    pcd.colors = Vector3dVector(cloud[:, 3:])
    return pcd
Exemplo n.º 7
0
    return current_r, current_t


# To get the now_pc and next_pc, there are two ways:
#(1) you can load from RGBD image by

from open3d import Image as IMG
from open3d import create_point_cloud_from_rgbd_image, create_rgbd_image_from_color_and_depth, read_pinhole_camera_intrinsic

color_image = np.array(Image.open("{0}/rgb/{1}".format(data_root, rgb_name)))
depth_im = np.array(Image.open("{0}/depth/{1}".format(data_root,
                                                      dep_name))) / 1000.0
color_raw = IMG(color_image)
depth_raw = IMG(depth_im.astype(np.float32))
rgbd_image = create_rgbd_image_from_color_and_depth(
    color_raw, depth_raw, depth_scale=1.0, convert_rgb_to_intensity=False)
pinhole_camera_intrinsic = read_pinhole_camera_intrinsic("camera_redwood.json")
new_pc = create_point_cloud_from_rgbd_image(rgbd_image,
                                            pinhole_camera_intrinsic)

# you can get the next_pc in the same way

#(2) direct change from .xyz
from open3d import PointCloud

new_pc = PointCloud()
new_pc.points = Vector3dVector(verts)
new_pc.normals = Vector3dVector(norms)
new_pc.colors = Vector3dVector(colors / 255.)

# you can get the next_pc in the same way
Exemplo n.º 8
0
from numpy import load
from open3d import PointCloud, Vector3dVector, write_point_cloud
from sys import argv

if len(argv) > 3:
    name_epc = argv[1]
    radius = float(argv[2])
    name_output = argv[3]
else:
    name_epc = input('input npy: ')
    radius = float(input('radius: '))
    name_output = input('output: ')

epc = load(name_epc)
cloud = epc[:, :-2].compress((epc[:, -2] > radius) & (radius >= epc[:, -1]), 0)
pcd = PointCloud()
pcd.points = Vector3dVector(cloud[:, :3])
pcd.colors = Vector3dVector(cloud[:, 3:])
write_point_cloud(name_output, pcd)
print(pcd)
Exemplo n.º 9
0
def export_scene_pointcloud(nusc: NuScenes,
                            out_path: str,
                            scene_token: str,
                            scene_name,
                            trafo_in_origin_BOOL: bool,
                            write_ascii_BOOL: bool,
                            channel: str = 'LIDAR_TOP',
                            min_dist: float = 3.0,
                            max_dist: float = 30.0,
                            verbose: bool = True) -> None:
    """
    Export fused point clouds of a scene to a Wavefront OBJ file.
    This point-cloud can be viewed in your favorite 3D rendering tool, e.g. Meshlab or Maya.
    :param nusc: NuScenes instance.
    :param out_path: Output path to write the point-cloud to.
    :param scene_token: Unique identifier of scene to render.
    :param channel: Channel to render.
    :param min_dist: Minimum distance to ego vehicle below which points are dropped.
    :param max_dist: Maximum distance to ego vehicle above which points are dropped.
    :param verbose: Whether to print messages to stdout.
    """

    # Check inputs.
    valid_channels = [
        'LIDAR_TOP', 'RADAR_FRONT', 'RADAR_FRONT_RIGHT', 'RADAR_FRONT_LEFT',
        'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT'
    ]
    camera_channels = [
        'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT',
        'CAM_BACK', 'CAM_BACK_RIGHT'
    ]
    assert channel in valid_channels, 'Input channel {} not valid.'.format(
        channel)

    # Get records from DB.
    scene_rec = nusc.get('scene', scene_token)
    start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
    sd_rec = nusc.get('sample_data', start_sample_rec['data'][channel])

    # Make list of frames
    cur_sd_rec = sd_rec
    sd_tokens = []
    while cur_sd_rec['next'] != '':

        cur_sd_rec = nusc.get('sample_data', cur_sd_rec['next'])
        sd_tokens.append(cur_sd_rec['token'])

    ego_pose_info = []
    zeitstample = 0
    for sd_token in tqdm(sd_tokens):
        zeitstample = zeitstample + 1
        if verbose:
            print('Processing {}'.format(sd_rec['filename']))
        sc_rec = nusc.get('sample_data', sd_token)
        sample_rec = nusc.get('sample', sc_rec['sample_token'])
        # lidar_token = sd_rec['token'] # only for the start_sample_rec = nusc.get('sample', scene_rec['first_sample_token'])
        lidar_rec = nusc.get('sample_data', sd_token)
        pc = LidarPointCloud.from_file(
            osp.join(nusc.dataroot, lidar_rec['filename']))

        # Points live in their own reference frame. So they need to be transformed (DELETED: via global to the image plane.
        # First step: transform the point cloud to the ego vehicle frame for the timestamp of the sweep.
        cs_record = nusc.get('calibrated_sensor',
                             lidar_rec['calibrated_sensor_token'])
        pc.rotate(Quaternion(cs_record['rotation']).rotation_matrix)
        pc.translate(np.array(cs_record['translation']))

        # Optional Filter by distance to remove the ego vehicle.
        dists_origin = np.sqrt(np.sum(pc.points[:3, :]**2, axis=0))

        keep = np.logical_and(min_dist <= dists_origin,
                              dists_origin <= max_dist)
        pc.points = pc.points[:, keep]
        # coloring = coloring[:, keep]
        if verbose:
            print('Distance filter: Keeping %d of %d points...' %
                  (keep.sum(), len(keep)))

        # Second step: transform to the global frame.
        poserecord = nusc.get('ego_pose', lidar_rec['ego_pose_token'])
        if trafo_in_origin_BOOL == True:
            pc.rotate(Quaternion(poserecord['rotation']).rotation_matrix)
            pc.translate(np.array(poserecord['translation']))

        ego_pose_info.append(
            "Ego-Pose-Info for time-stample %i sd_token(%s) :\n" %
            (zeitstample, sd_token))
        ego_pose_info.append("   Rotation : %s \n" %
                             np.array2string(np.array(poserecord['rotation'])))
        ego_pose_info.append(
            "   Translation : %s \n" %
            np.array2string(np.array(poserecord['translation'])))

        pc_nparray = np.asarray(pc.points)
        # print(pc_nparray.T.shape(1))
        xyzi = pc_nparray.T

        xyz = np.zeros((xyzi.shape[0], 3))
        xyz[:, 0] = np.reshape(pc_nparray[0], -1)
        xyz[:, 1] = np.reshape(pc_nparray[1], -1)
        xyz[:, 2] = np.reshape(pc_nparray[2], -1)
        # xyzi[:, 3] = np.reshape(pc_nparray[3], -1)

        intensity_from_velodyne = xyzi[:, 3] / 255
        # print(np.amax(intensity_from_velodyne))
        # print(intensity_from_velodyne.shape)

        intensity = np.zeros((intensity_from_velodyne.shape[0], 3))
        intensity[:, 0] = np.reshape(intensity_from_velodyne[0], -1)
        intensity[:, 1] = np.reshape(intensity_from_velodyne[0], -1)
        intensity[:, 2] = np.reshape(intensity_from_velodyne[0], -1)

        PointCloud_open3d = PointCloud()
        PointCloud_open3d.colors = Vector3dVector(intensity)

        # xyzi = np.zeros((xyzi0.shape[0], 3))
        # xyzi[:, 0] = np.reshape(pc_nparray[0], -1)
        # xyzi[:, 1] = np.reshape(pc_nparray[1], -1)
        # xyzi[:, 2] = np.reshape(pc_nparray[2], -1)
        # print(xyzi.shape)

        PointCloud_open3d.points = Vector3dVector(xyz)
        write_point_cloud("%s-%i-%s.pcd" % (out_path, zeitstample, sd_token),
                          PointCloud_open3d,
                          write_ascii=write_ascii_BOOL)

    # Write ego-pose.
    f = open(args.out_dir + "/ego_pose_info.txt", 'w+')
    f.write("ego_pose_info for scene %s \n (with token %s) \n" %
            (scene_name, scene_token))
    f.write(' '.join(ego_pose_info))
    f.close()