def get_uvd(image_dir, laser_dir, poses_file, models_dir, extrinsics_dir, image_idx):

        model = CameraModel(models_dir, image_dir)

        extrinsics_path = os.path.join(extrinsics_dir, model.camera + '.txt')
        with open(extrinsics_path) as extrinsics_file:
            extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]

        G_camera_vehicle = build_se3_transform(extrinsics)
        G_camera_posesource = None

        poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)
        if poses_type in ['ins', 'rtk']:
            with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
                extrinsics = next(extrinsics_file)
                G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
        else:
            # VO frame and vehicle frame are the same
            G_camera_posesource = G_camera_vehicle


        timestamps_path = os.path.join(image_dir, os.pardir, model.camera + '.timestamps')
        if not os.path.isfile(timestamps_path):
            timestamps_path = os.path.join(image_dir, os.pardir, os.pardir, model.camera + '.timestamps')

        timestamp = 0
        with open(timestamps_path) as timestamps_file:
            for i, line in enumerate(timestamps_file):
                if i == image_idx:
                    timestamp = int(line.split(' ')[0])

        pointcloud, reflectance = build_pointcloud(laser_dir, poses_file, extrinsics_dir,
                                                   timestamp - 1e7, timestamp + 1e7, timestamp)

        pointcloud = np.dot(G_camera_posesource, pointcloud)
        #print(pointcloud)
        #print('pose', G_camera_posesource.shape)
        #print('pc', pointcloud[0].shape)
        image_path = os.path.join(image_dir, str(timestamp) + '.png')
        image = load_image(image_path, model)

        uv, depth = model.project(pointcloud, image.shape)

        # plt.imshow(image)
        # plt.scatter(np.ravel(uv[0, :]), np.ravel(uv[1, :]), s=2, c=depth, edgecolors='none', cmap='jet')
        # plt.xlim(0, image.shape[1])
        # plt.ylim(image.shape[0], 0)
        # plt.xticks([])
        # plt.yticks([])
        # plt.show()

        return uv, depth, timestamp, model
def get_next_split(i_start, max_trajectory_size, trajectory_ned, lidar_dir, 
                   extrinsics_dir='robotcar-dataset-sdk/extrinsics'):
    split = namedtuple('Split', 'i_end, end_of_route, pcl_ned, reflectance')
    state_first_frame = trajectory_ned[:,i_start]
    i_max = min(i_start+max_trajectory_size, trajectory_ned.shape[1])
    
    prev_pos = state_first_frame[:3]
    dist_to_20 = 20
    split.i_end = None
    i_pcl_end = None
    n_submaps_20 = 0
    for i in range(i_start+1,i_max):
        curr_pos = trajectory_ned[[0,1,2],i]
        dist_to_20 -= np.linalg.norm(prev_pos - curr_pos)
        prev_pos = curr_pos
        
        if dist_to_20 < 0:
            if i_pcl_end is not None:
                split.i_end = i_pcl_end
                n_submaps_20 += 1
            
            i_pcl_end = i
            dist_to_20 = 20

    print('Get next split: i_start {} i_end {} i_pcl_end {} n_submaps_20 {}'.
          format(i_start, split.i_end, i_pcl_end, n_submaps_20))
            
    if split.i_end is None or i_pcl_end is None:
        split.end_of_route = True
    else:
        split.end_of_route = False
        start_time = state_first_frame[6]
        end_time = trajectory_ned[6,i_pcl_end]
        
        # Build pointcloud in vehicle reference frame
        print('Build PCL...')
        pcl_split_veh, split.reflectance = sdk_pcl.build_pointcloud(lidar_dir, 
                                            ins_data_file, extrinsics_dir, 
                                            start_time, end_time)
        print('Done!')
        
        # Transform pointlcoud to NED system
        state_first_frame = state_first_frame.flatten()
        split.pcl_ned = pcl_trafo(pcl_split_veh, trans_newref=state_first_frame[:3], 
                                  rot=state_first_frame[3:])

    return split
else:
    # VO frame and vehicle frame are the same
    G_camera_posesource = G_camera_vehicle


timestamps_path = os.path.join(args.image_dir, os.pardir, model.camera + '.timestamps')
if not os.path.isfile(timestamps_path):
    timestamps_path = os.path.join(args.image_dir, os.pardir, os.pardir, model.camera + '.timestamps')

timestamp = 0
with open(timestamps_path) as timestamps_file:
    for i, line in enumerate(timestamps_file):
        if i == args.image_idx:
            timestamp = int(line.split(' ')[0])

pointcloud, reflectance = build_pointcloud(args.laser_dir, args.poses_file, args.extrinsics_dir,
                                           timestamp - 1e7, timestamp + 1e7, timestamp)

pointcloud = np.dot(G_camera_posesource, pointcloud)

image_path = os.path.join(args.image_dir, str(timestamp) + '.png')
image = load_image(image_path, model)

uv, depth = model.project(pointcloud, image.shape)

plt.imshow(image)
plt.hold(True)
plt.scatter(np.ravel(uv[0, :]), np.ravel(uv[1, :]), s=2, c=depth, edgecolors='none', cmap='jet')
plt.xlim(0, image.shape[1])
plt.ylim(image.shape[0], 0)
plt.xticks([])
plt.yticks([])
Esempio n. 4
0
def generate_vo_submap(args):
    # Get trajectory and submap metadata
    trajectory_ned = import_trajectory_ned(ins_data_file, lidar_timestamp_file)
    pcl_dir = 'data/reference/submaps_{}m'.format(args.length)
    metadata_filename = pcl_dir + '/metadata.csv'
    pcl_metadata = get_pcl_metadata(metadata_filename, seg_idx=args.index)
    assert pcl_metadata is not None
    
    # Get start and end time for VO trajectory generation (begin 30m from submap 
    # start, end where submap ends)
    i_start = np.argwhere(trajectory_ned[6,:] == pcl_metadata['timestamp_start'])[0,0]
    
    dist_to_vo_start = 30
    prev_pos = trajectory_ned[:3,i_start]
    for i_vo_start in range(i_start-1,0,-1):
        curr_pos = trajectory_ned[[0,1,2],i_vo_start]
        dist_to_vo_start -= np.linalg.norm(prev_pos - curr_pos)
        prev_pos = curr_pos
        if dist_to_vo_start < 0:
            break
    
    dist_to_vo_end = args.length
    prev_pos = trajectory_ned[:3,i_start]
    for i_vo_end in range(i_start+1,trajectory_ned.shape[1]):
        curr_pos = trajectory_ned[[0,1,2],i_vo_end]
        dist_to_vo_end -= np.linalg.norm(prev_pos - curr_pos)
        prev_pos = curr_pos
        if dist_to_vo_end < 0:
            break
        
    # Get timestamp of image considering offset
    i_img = i_start
    if args.camera == 'center' and args.offset > 0:
        dist_to_img = args.offset
        prev_pos = trajectory_ned[:3,i_start]
        for i_img in range(i_start-1,0,-1):
            curr_pos = trajectory_ned[[0,1,2],i_img]
            dist_to_img -= np.linalg.norm(prev_pos - curr_pos)
            prev_pos = curr_pos
            if dist_to_img < 0:
                break
    elif args.camera in ['left', 'right']:
        dist_to_img = args.length / 2
        prev_pos = trajectory_ned[:3,i_start]
        for i_img in range(i_start+1,trajectory_ned.shape[1]):
            curr_pos = trajectory_ned[[0,1,2],i_img]
            dist_to_img -= np.linalg.norm(prev_pos - curr_pos)
            prev_pos = curr_pos
            if dist_to_img < 0:
                break
        
        
    
    # Build pointcloud based on visual odometry
    timestamp_start = trajectory_ned[6,i_vo_start]
    timestamp_img = trajectory_ned[6,i_img]
    timestamp_end = trajectory_ned[6,i_vo_end]
    print('Build PCL from VO with {} measurements...'.format(i_vo_end-i_vo_start))
    pointcloud, refl = sdk_pcl.build_pointcloud(lidar_dir, vo_data_file, 
                                             extrinsics_dir, timestamp_start, 
                                             timestamp_end, timestamp_img)
    print('Done!')

    # Transform pointcloud to NED and generate submap
    reference_state = trajectory_ned[:,i_img].flatten()
    pointcloud_ned = pcl_trafo(pointcloud, trans_newref=reference_state[:3], 
                               rot=reference_state[3:])
    
    i_center = np.argwhere(trajectory_ned[6,:] == pcl_metadata['timestamp_center'])[0,0]
    submap_ned, _ = get_pcl_segment(trajectory_ned, pointcloud_ned, i_center, 
                                    float(args.length), alignment='trajectory', 
                                    width=40)
    
    submap = pcl_trafo(submap_ned, trans_oldref=-reference_state[:3], 
                       rot=-reference_state[3:])
    
    return i_img, submap
        timestamp = int(line.split(' ')[0])
        Ltimestamps.append(timestamp)

#Get timestamps for camera interpolation, interpolate, and lidar point clouds
stamp = []
stamp.append(Ctimestamps[0])
camera_poses = []
pointc = []
subset = np.asarray(Ltimestamps[1000:1100])

for i in range(len(subset) - 1):
    stamp.append(stamp[i] + subset[i + 1] - subset[i])
    camera_poses.append(interpolate_vo_poses(vopath, [stamp[i + 1]], stamp[i]))
    pointc.append(
        np.asarray(
            build_pointcloud(lmsFrontDir, vopath, extrinsics, subset[i],
                             subset[i])[0]).transpose())

#Get transformations between lidar scans, and the error+iterations
LidarPoses, c = icpLidar(subset)
'''
#Plot error against iterations
y = []
for i in range (len(c)):
    y.append(i+1)
plt.scatter(y,c)
plt.title("Reduction in squared error deviation by Iterative closest point")
print(len(c))
plt.xticks(y)
plt.xlabel("iterations")
plt.ylabel("Mean error")
Esempio n. 6
0
            image = load_image(image_path, model)

            with open(lidar_timestamps_path) as lidar_timestamps_file:
                for j, row in enumerate(lidar_timestamps_file):
                    lidar_timestamps = int(row.split(' ')[0])

                    if (lidar_timestamps > image_timestamp):
                        start_time = image_timestamp - 2e6
                        end_time = image_timestamp + 2e6
                        break

            lidar_timestamps_file.close()

            pointcloud, reflectance = build_pointcloud(my_params.laser_dir,
                                                       poses_file,
                                                       extrinsics_dir,
                                                       start_time, end_time,
                                                       lidar_timestamps)
            pointcloud = np.dot(G_camera_posesource, pointcloud)

            uv, depth = model.project(pointcloud, image.shape)

            frame_path = os.path.join(my_params.reprocess_image_dir + '//' +
                                      str(image_timestamp) + '.png')
            frame = cv2.imread(frame_path)  # must processed imagte
            # img2 = load_image(img2_path,model)
            pointcloud = np.zeros_like(frame)

            for k in range(uv.shape[1]):
                x_p = (int)(np.ravel(uv[:, k])[0])
                y_p = (int)(np.ravel(uv[:, k])[1])
Esempio n. 7
0
                               '.timestamps')

timestamp = 0
with open(timestamps_path) as timestamps_file:
    for i, line in enumerate(timestamps_file):
        if i == image_idx:
            # print('index:', i)
            timestamp = int(line.split(' ')[0])
            break

# print('index timestamp:', timestamp)
# print('start time:', start_time)
# print('end time:', end_time)

pointcloud, reflectance = build_pointcloud(laser_dir, poses_file,
                                           extrinsics_dir, timestamp - 5e7,
                                           timestamp + 5e7, timestamp)

pointcloud = np.dot(G_camera_posesource, pointcloud)

image_path = os.path.join(image_dir, str(timestamp) + '.png')
image = load_image(image_path, model)

uv, depth = model.project(pointcloud, image.shape)

plt.figure(1)
plt.imshow(image)
plt.scatter(np.ravel(uv[0, :]),
            np.ravel(uv[1, :]),
            s=2,
            c=depth,
def generate_pointcloud():

    WRITE_IMAGE = 1
    DRAW_MAP = 1

    # Data input
    image_dir = my_params.image_dir
    ldrms_dir = my_params.laser_dir
    lmsfront_dir = my_params.lmsfront_dir
    lmsrear_dir  = my_params.lmsrear_dir
    poses_file = my_params.poses_file
    models_dir = my_params.model_dir
    extrinsics_dir = my_params.extrinsics_dir

    output_img_dir   = my_params.output_dir2 + 'pl_img_'   + my_params.dataset_no + '//'
    output_ldrms_dir = my_params.output_dir2 + 'pl_ldrms_' + my_params.dataset_no + '//'
    output_front_dir = my_params.output_dir2 + 'pl_front_' + my_params.dataset_no + '//'
    output_rear_dir  = my_params.output_dir2 + 'pl_rear_'  + my_params.dataset_no + '//'
    
    map_ldrms_dir = my_params.output_dir2 + 'map_ldrms_' + my_params.dataset_no + '//'
    map_front_dir = my_params.output_dir2 + 'map_front_' + my_params.dataset_no + '//'
    map_rear_dir  = my_params.output_dir2 + 'map_rear_'  + my_params.dataset_no + '//'


    model = CameraModel(models_dir, image_dir)
    extrinsics_path = os.path.join(extrinsics_dir, model.camera + '.txt')
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_camera_vehicle = build_se3_transform(extrinsics)
    G_camera_posesource = None

    poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)
    if poses_type in ['ins', 'rtk']:
        with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
            extrinsics = next(extrinsics_file)
            G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
    else:
        # VO frame and vehicle frame are the same
        G_camera_posesource = G_camera_vehicle

    image_size = (960,1280,3)

    timestamps_path = os.path.join(my_params.dataset_patch + model.camera + '.timestamps')
    timestamp = 0
    plt.figure()
    with open(timestamps_path) as timestamps_file:
        for i, line in enumerate(timestamps_file):
            
            if i < 799:
                # print('open image index', i)
                # timestamp = int(line.split(' ')[0])
                # break
                continue

            timestamp = int(line.split(' ')[0])
            start_time = timestamp - 1e6
            
            frame_path = os.path.join(my_params.reprocess_image_dir + '//' + str(timestamp) + '.png')
            frame = cv2.imread(frame_path) 
            
            print('process image ',i,'-',timestamp)

            if i < 4: 
                if(WRITE_IMAGE):
                    savefile_name = output_dir + '\\lms_front_img_' + my_params.dataset_no + '\\' + str(timestamp) + '.png'
                    cv2.imwrite(savefile_name, frame)           
                continue

            pl_ldrms = np.zeros((960,1280),dtype=int)
            pl_front = np.zeros((960,1280),dtype=int)

            

            # LDRMS
            ldrms_pointcloud, _ = build_pointcloud(ldrms_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 2e6, timestamp)
            ldrms_pointcloud = np.dot(G_camera_posesource, ldrms_pointcloud)
            uv, depth = model.project(ldrms_pointcloud,image_size)

            x_p = np.ravel(uv[0,:])
            y_p = np.ravel(uv[1,:])
            z_p = np.ravel(depth)

            map_ldrms = (np.array(ldrms_pointcloud[0:3,:])).transpose()
            map_ldrms_filename = map_ldrms_dir + str(timestamp) + '.csv'
            np.savetxt(map_ldrms_filename, map_ldrms, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(ldrms_pointcloud[0,:])]
                map_y = [numeric_map_y for numeric_map_y in np.array(ldrms_pointcloud[1,:])]
                map_z = np.array(ldrms_pointcloud[2,:])

                plt.scatter((map_y),(map_x),c='b',marker='.', zorder=1)

            # LDRMS pointcloud to CSV
            # for k in range(uv.shape[1]):

            #     pl_ldrms[int(y_p[k]),int(x_p[k])] = int(100*depth[k])

            # ldrms_filename = output_ldrms_dir + str(timestamp) + '.csv'
            # np.savetxt(ldrms_filename, pl_ldrms, delimiter=",")
        

            # LMS-FRONT
            front_pointcloud, _ = build_pointcloud(lmsfront_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 1e6, timestamp)
            front_pointcloud = np.dot(G_camera_posesource, front_pointcloud)
            wh,xrange = model.project(front_pointcloud,image_size)

            x_f = np.ravel(wh[0,:])
            y_f = np.ravel(wh[1,:])
            z_f = np.ravel(xrange)

            map_front = (np.array(front_pointcloud[0:3,:])).transpose()
            map_front_filename = map_front_dir + str(timestamp) + '.csv'
            np.savetxt(map_front_filename, map_front, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(front_pointcloud[0,:])]
                map_y = [numeric_map_y for numeric_map_y in np.array(front_pointcloud[1,:])]
                map_z = [-numeric_map_z for numeric_map_z in np.array(front_pointcloud[2,:])]

                plt.scatter((map_y),(map_z),c='r',marker='.', zorder=1)

            # LMS-FRONT pointcloud to CSV
            # for k in range(wh.shape[1]):

            #     pl_ldrms[int(y_f[k]),int(x_f[k])] = int(100*xrange[k])

            # front_filename = output_front_dir + str(timestamp) + '.csv'
            # np.savetxt(front_filename, pl_ldrms, delimiter=",")

            # LMS-REAR

            rear_pointcloud, _ = build_pointcloud(lmsrear_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 2e6, timestamp)
            rear_pointcloud = np.dot(G_camera_posesource, rear_pointcloud)

            map_rear = (np.array(rear_pointcloud[0:3,:])).transpose()
            map_rear_filename = map_rear_dir + str(timestamp) + '.csv'
            np.savetxt(map_rear_filename, map_rear, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(rear_pointcloud[0,:])]
                map_y = [-numeric_map_y for numeric_map_y in np.array(rear_pointcloud[1,:])]
                map_z = [numeric_map_z for numeric_map_z in np.array(rear_pointcloud[2,:])]

                plt.scatter((map_y),(map_z),c='g',marker='.', zorder=1)


            if (WRITE_IMAGE):
                for k in range(uv.shape[1]):

                    color = (int(255-8*depth[k]),255-3*depth[k],50+3*depth[k])
                    frame= cv2.circle(frame, (int(x_p[k]), int(y_p[k])), 1, color, 1)

                for k in range(wh.shape[1]):

                    color = (int(255-8*xrange[k]),255-3*xrange[k],50+3*xrange[k])
                    frame= cv2.circle(frame, (int(x_f[k]), int(y_f[k])), 1, color, 1)

                cv2.imshow('frame',frame)
                image_filename = output_img_dir   + str(timestamp) + '.png'
                cv2.imwrite(image_filename, frame)
                cv2.waitKey(1)
            # plt.show()
            plt.pause(0.1)