#!/usr/bin/env python
# -*- coding:utf-8 -*-
#@Time  : 2019/7/17 2:32
#@Author: 黄怀宇
#@File  : codeRepetition.py
# from sklearn.metrics import accuracy_score #works
# print(accuracy_score([1, 1, 0], [1, 0, 1]))

from nuscenes.nuscenes import NuScenes
import numpy as np

nusc = NuScenes(version='v1.0-mini', dataroot='D:\\v1.0-mini', verbose=True)
print('all scene')
nusc.list_scenes()  # 列出所有的场景

my_scene = nusc.scene[0]
print('\nmy_scene\n', my_scene)  # 列出所有场景中的第一个场景对应的所有 token

first_sample_token = my_scene[
    'first_sample_token']  # scene.json 中第一个元素组中的 first_sample_token

# sample.json 中第一帧的所有元素(token、timestamp、prev、next、scene_token) 和
# 汽车所有传感器对应的数据

my_sample = nusc.get('sample', first_sample_token)
print('my_sample\n', my_sample)
#       只取汽车所有传感器对应的数据的方法 print('my_sample\n', my_sample['data'])
#       只取CAM_FRONT 的方法
# sensor = 'CAM_FRONT'
# cam_front_data = nusc.get('sample_data', my_sample['data'][sensor])
# print(cam_front_data)
Esempio n. 2
0
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box
from PIL import Image
from pyquaternion import Quaternion
import numpy as np
import os
import os.path as osp
from nuscenes.utils.geometry_utils import view_points, box_in_image, BoxVisibility, transform_matrix

nusc = NuScenes(version='v1.0-mini', dataroot='/share/nuscenes', verbose=True)
nusc_scenes = nusc.list_scenes()

current_scene = nusc.scene[0]
sensor = 'CAM_FRONT'
first_token = current_scene['first_sample_token']
first_token_meta = nusc.get('sample', first_token)
all_sensor_data = first_token_meta['data']
CAM_SAMPLE = nusc.get('sample_data', first_token_meta['data'][sensor])
CAM_EGO = CAM_SAMPLE['ego_pose_token']
EGO = nusc.get('ego_pose', token=CAM_EGO)

# map point cloud into image


def map_pointcloud_to_image(nusc,
                            pointsensor_token: str,
                            camera_token: str,
                            min_dist: float = 1.0,
                            render_intensity: bool = False,
                            show_lidarseg: bool = False):
    """
Esempio n. 3
0
def main(args):
    args.path = "/home/jo/training_data/nuscenes/nuscenes-v1.0"
    args.resize = [640, 256, 0]

    client = MongoClient(args.conn)
    collection = client[args.db][args.collection]

    nusc = NuScenes(version="v1.0-mini", dataroot=args.path, verbose=True)
    nusc.list_scenes()

    for scene in tqdm(nusc.scene):
        next_sample_token = scene["first_sample_token"]

        while True:
            sample = nusc.get('sample', next_sample_token)
            next_sample_token = sample["next"]
            
            sample_data_token = sample["data"]["CAM_FRONT"]
            sample_data = nusc.get_sample_data(sample_data_token)
            cam_front_data = nusc.get('sample_data', sample_data_token)

            # Create image data
            img_path = sample_data[0]
            if os.path.exists(img_path):
                img = cv2.imread(img_path)
                nusc.render_pointcloud_in_image
                depth_map = map_pointcloud_to_image(nusc, sample['token'], pointsensor_channel='LIDAR_TOP', camera_channel='CAM_FRONT')
                # Not sure about radar data, seems to not allign with lidar data very often, leaving it out for now
                # depth_map = map_pointcloud_to_image(nusc, sample['token'], pointsensor_channel='RADAR_FRONT', camera_channel='CAM_FRONT', depth_map=depth_map)
                roi = Roi()
                if args.resize:
                    img, roi = resize_img(img, args.resize[0], args.resize[1], args.resize[2])
                    depth_map, _ = resize_img(depth_map, args.resize[0], args.resize[1], args.resize[2], cv2.INTER_NEAREST)
                img_bytes = cv2.imencode('.jpeg', img)[1].tobytes()
                depth_bytes = cv2.imencode(".png", depth_map)[1].tobytes()
                content_type = "image/jpeg"
            else:
                print("WARNING: file not found: " + img_path + ", continue with next image")
                continue

            # Get sensor extrinsics, Not sure why roll and yaw seem to be PI/2 off compared to nuImage calibarted sensor
            sensor = nusc.get('calibrated_sensor', cam_front_data['calibrated_sensor_token'])
            q = MyQuaternion(sensor["rotation"][0], sensor["rotation"][1], sensor["rotation"][2], sensor["rotation"][3])
            roll  = math.atan2(2.0 * (q.z * q.y + q.w * q.x) , 1.0 - 2.0 * (q.x * q.x + q.y * q.y)) + math.pi * 0.5
            pitch = math.asin(2.0 * (q.y * q.w - q.z * q.x)) 
            yaw   = math.atan2(2.0 * (q.z * q.w + q.x * q.y) , - 1.0 + 2.0 * (q.w * q.w + q.x * q.x)) + math.pi * 0.5
            # print(sensor["translation"])
            # print(f"Pitch: {pitch*57.2} Yaw: {yaw*57.2} Roll: {roll*57.2}")

            # Sensor calibration is static, pose would be dynamic. TODO: Somehow also add some sort of cam to cam motion to be learned
            # ego_pose = nusc.get("ego_pose", cam_front_data["ego_pose_token"])
            # q = MyQuaternion(ego_pose["rotation"][0], ego_pose["rotation"][1], ego_pose["rotation"][2], ego_pose["rotation"][3])
            # roll  = math.atan2(2.0 * (q.z * q.y + q.w * q.x) , 1.0 - 2.0 * (q.x * q.x + q.y * q.y)) + math.pi * 0.5
            # pitch = math.asin(2.0 * (q.y * q.w - q.z * q.x)) 
            # yaw   = math.atan2(2.0 * (q.z * q.w + q.x * q.y) , - 1.0 + 2.0 * (q.w * q.w + q.x * q.x)) + math.pi * 0.5
            # print(ego_pose["translation"])
            # print(f"Pitch: {pitch*57.2} Yaw: {yaw*57.2} Roll: {roll*57.2}")

            entry = Entry(
                img=img_bytes,
                content_type=content_type,
                depth=depth_bytes,
                org_source="nuscenes",
                org_id=img_path,
                objects=[],
                ignore=[],
                has_3D_info=True,
                has_track_info=True,
                sensor_valid=True,
                yaw=wrap_angle(yaw),
                roll=wrap_angle(roll),
                pitch=wrap_angle(pitch),
                translation=sensor["translation"],
                scene_token=sample["scene_token"],
                timestamp=sample["timestamp"]
            )

            labels = sample_data[1]
            idx_counter = 0
            for box in labels:
                if box.name in CLASS_MAP.keys():
                    box.translate(np.array([0, box.wlh[2] / 2, 0])) # translate center center to bottom center
                    pos_3d = np.array([*box.center, 1.0])
                    cam_mat = np.hstack((sample_data[2], np.zeros((3, 1))))
                    cam_mat[0][2] += roi.offset_left
                    cam_mat[1][2] += roi.offset_top
                    cam_mat *= roi.scale
                    cam_mat[2][2] = 1.0
                    # create rotation matrix, somehow using the rotation matrix directly from nuscenes does not work
                    # instead, we calc the rotation as it is in kitti and use the same code
                    v = np.dot(box.rotation_matrix, np.array([1, 0, 0]))
                    yaw = -np.arctan2(v[2], v[0])
                    rot_angle = wrap_angle(float(yaw) + math.pi * 0.5) # because parallel to optical view of camera = 90 deg
                    rot_mat = np.array([
                        [math.cos(rot_angle), 0.0, math.sin(rot_angle), 0.0],
                        [0.0, 1.0, 0.0, 0.0],
                        [-math.sin(rot_angle), 0.0, math.cos(rot_angle), 0.0],
                        [0.0, 0.0, 0.0, 1.0],
                    ])
                    pos_2d = np.matmul(cam_mat, pos_3d)
                    pos_2d /= pos_2d[2]
                    box3d = calc_cuboid_from_3d(pos_3d, cam_mat, rot_mat, box.wlh[0], box.wlh[2], box.wlh[1])
                    box2d = bbox_from_cuboid(box3d)

                    annotation = nusc.get("sample_annotation", box.token)
                    instance_token = annotation["instance_token"]

                    entry.objects.append(Object(
                        obj_class=CLASS_MAP[box.name],
                        box2d=box2d,
                        box3d=box3d,
                        box3d_valid=True,
                        instance_token=instance_token,
                        truncated=None,
                        occluded=None,
                        width=box.wlh[0],
                        length=box.wlh[1],
                        height=box.wlh[2],
                        orientation=rot_angle,
                        # converted to autosar coordinate system
                        x=pos_3d[2],
                        y=-pos_3d[0],
                        z=-pos_3d[1]
                    ))

                    # For debugging show the data in the image
                    box2d = list(map(int, box2d))
                    cv2.circle(img, (int(pos_2d[0]), int(pos_2d[1])), 3, (255, 0, 0))
                    cv2.rectangle(img, (box2d[0], box2d[1]), (box2d[0] + box2d[2], box2d[1] + box2d[3]), (255, 255, 0), 1)
                    cv2.putText(img,f"{idx_counter}", (box2d[0], box2d[1] + int(box2d[3])), 0, 0.4, (255, 255, 255))
                    idx_counter += 1
                    print(f"{idx_counter}: {math.degrees(rot_angle)}")

            f, (ax1, ax2) = plt.subplots(2, 1)
            ax1.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
            max_val = np.amax(depth_map)
            ax2.imshow(depth_map, cmap="gray", vmin=1, vmax=10000)
            plt.show()

            # collection.insert_one(entry.get_dict())

            if next_sample_token == "":
                break
Esempio n. 4
0
import matplotlib.pyplot as plotter
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import RadarPointCloud

nusc = NuScenes(
    version='v1.0-mini',
    dataroot='C:/Users/chase/OneDrive - Clemson University/Perception/Q4',
    verbose=True)

nusc.list_scenes()
my_scene = nusc.scene[0]
my_scene
first_sample_token = my_scene['first_sample_token']

# The rendering command below is commented out because it tends to crash in notebooks
# nusc.render_sample(first_sample_token)
my_sample = nusc.get('sample', first_sample_token)
my_sample
nusc.list_sample(my_sample['token'])

my_sample['data']

sensor = 'CAM_FRONT'
cam_front_data = nusc.get('sample_data', my_sample['data'][sensor])
cam_front_data

nusc.render_sample_data(cam_front_data['token'])

my_annotation_token = my_sample['anns'][18]
my_annotation_metadata = nusc.get('sample_annotation', my_annotation_token)
my_annotation_metadata