def get_uvd(image_dir, laser_dir, poses_file, models_dir, extrinsics_dir, image_idx):

        model = CameraModel(models_dir, image_dir)

        extrinsics_path = os.path.join(extrinsics_dir, model.camera + '.txt')
        with open(extrinsics_path) as extrinsics_file:
            extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]

        G_camera_vehicle = build_se3_transform(extrinsics)
        G_camera_posesource = None

        poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)
        if poses_type in ['ins', 'rtk']:
            with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
                extrinsics = next(extrinsics_file)
                G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
        else:
            # VO frame and vehicle frame are the same
            G_camera_posesource = G_camera_vehicle


        timestamps_path = os.path.join(image_dir, os.pardir, model.camera + '.timestamps')
        if not os.path.isfile(timestamps_path):
            timestamps_path = os.path.join(image_dir, os.pardir, os.pardir, model.camera + '.timestamps')

        timestamp = 0
        with open(timestamps_path) as timestamps_file:
            for i, line in enumerate(timestamps_file):
                if i == image_idx:
                    timestamp = int(line.split(' ')[0])

        pointcloud, reflectance = build_pointcloud(laser_dir, poses_file, extrinsics_dir,
                                                   timestamp - 1e7, timestamp + 1e7, timestamp)

        pointcloud = np.dot(G_camera_posesource, pointcloud)
        #print(pointcloud)
        #print('pose', G_camera_posesource.shape)
        #print('pc', pointcloud[0].shape)
        image_path = os.path.join(image_dir, str(timestamp) + '.png')
        image = load_image(image_path, model)

        uv, depth = model.project(pointcloud, image.shape)

        # plt.imshow(image)
        # plt.scatter(np.ravel(uv[0, :]), np.ravel(uv[1, :]), s=2, c=depth, edgecolors='none', cmap='jet')
        # plt.xlim(0, image.shape[1])
        # plt.ylim(image.shape[0], 0)
        # plt.xticks([])
        # plt.yticks([])
        # plt.show()

        return uv, depth, timestamp, model
Esempio n. 2
0
def makeCroppedGraySequence(seq_id, num_threads=8):
    src_folder = os.path.join(symlink('robotcar'), seq_id)
    dst_folder = os.path.join(symlink('robotcar_cropped_gray'), seq_id)
    if not os.path.exists(dst_folder):
        os.makedirs(dst_folder)

    # Convert images.
    stereo_dir = os.path.join(src_folder, 'stereo')

    for leftright in ['left', 'right']:
        in_dir = os.path.join(stereo_dir, leftright)
        images = utils_path.imagesFromDir(in_dir, extension='.png')
        im_names = [os.path.basename(i) for i in images]
        cam_model = camera_model.CameraModel(cam_model_dir, in_dir)
        out_dir = os.path.join(dst_folder, 'rect', leftright)
        if not os.path.exists(out_dir):
            os.makedirs(out_dir)

        p = multiprocessing.Pool(num_threads)
        print('Converting %s...' % leftright)
        p.map(ParallelImageConverter(cam_model, in_dir, out_dir, im_names),
              range(len(images)))

        # K files
        # Image downscaling affects camera intrinsics
        f = [0.5 * x for x in cam_model.focal_length]
        c = [0.5 * x for x in cam_model.principal_point]
        K = np.array([[f[0], 0, c[0]], [0, f[1], c[1]], [0, 0, 1]])
        np.savetxt(os.path.join(dst_folder, '%s_K.txt' % leftright), K)

    times = [int(i[:16]) for i in im_names]

    # Get poses everywhere.
    T_W_C_file = os.path.join(dst_folder, 'T_W_C.txt')
    if not os.path.exists(T_W_C_file):
        print('Interpolating poses...')
        insext = np.loadtxt(os.path.join(sdk_dir, 'extrinsics',
                                         'ins.txt')).tolist()
        mtx = transform.build_se3_transform(insext)
        # Cx: Oxford style camera, where x looks into the image plane.
        T_I_Cx = pose.fromMatrix(np.asarray(mtx)).inverse()
        T_Cx_C = pose.yRotationDeg(90) * pose.zRotationDeg(90)
        T_I_C = T_I_Cx * T_Cx_C

        # T_W_I0: Because in Oxford, z looks down.
        T_W_I0 = pose.xRotationDeg(180)

        ins_path = os.path.join(src_folder, 'gps', 'ins.csv')
        T_I0_I = interpolate_poses.interpolate_ins_poses(
            ins_path, times, times[0])

        T_I0_I = [pose.fromMatrix(np.asarray(i)) for i in T_I0_I]

        T_W_C = [T_W_I0 * i * T_I_C for i in T_I0_I]
        T_W_C_serialized = np.array([i.asArray().ravel() for i in T_W_C])
        np.savetxt(T_W_C_file, T_W_C_serialized)
Esempio n. 3
0
def compute_vo_poses(vo_path, pose_timestamps, origin_timestamp):
    """Interpolate poses from visual odometry.

    Args:
        vo_path (str): path to file containing relative poses from visual odometry.
        pose_timestamps (list[int]): UNIX timestamps at which interpolated poses are required.
        origin_timestamp (int): UNIX timestamp of origin frame. Poses will be reported relative to this frame.

    Returns:
        list[numpy.matrixlib.defmatrix.matrix]: SE3 matrix representing interpolated pose for each requested timestamp.

    """
    with open(vo_path) as vo_file:
        vo_reader = csv.reader(vo_file)
        headers = next(vo_file)

        vo_timestamps = [0]

        changetoDSOcoord = np.matrix('0 1 0 0; 0 0 -1 0; 1 0 0 0; 0 0 0 1')
        #changetoDSOcoord = changetoDSOcoord.transpose()
        #changetoDSOcoord = np.matrix('0 0 1 0; 1 0 0 0; 0 1 0 0; 0 0 0 1')
        #changetoDSOcoord = np.matrix('0 0 1 0; 0 1 0 0; 1 0 0 0; 0 0 0 1')  false
        #changetoDSOcoord = np.matrix('0 1 0 0; 1 0 0 0; 0 0 1 0; 0 0 0 1') false
        #changetoDSOcoord = np.matrix('0 0 1 0; -1 0 0 0; 0 1 0 0; 0 0 0 1')
        #R2 = np.matrix('0 1 0; 0 0 1; 1 0 0')
        abs_poses = [ml.identity(4)]
        abs_poses[0] = np.dot(changetoDSOcoord, abs_poses[0])

        lower_timestamp = min(min(pose_timestamps), origin_timestamp)
        upper_timestamp = max(max(pose_timestamps), origin_timestamp)
        print("lower timestamp, ", lower_timestamp)
        print("upper timestamp, ", upper_timestamp)
        for row in vo_reader:
            timestamp = int(row[0])
            if timestamp < lower_timestamp:
                vo_timestamps[0] = timestamp
                continue

            vo_timestamps.append(timestamp)

            xyzrpy = [float(v) for v in row[2:8]]
            rel_pose = build_se3_transform(xyzrpy)
            abs_pose = abs_poses[-1] * rel_pose
            #changetoDSOcoord = np.matrix('0 1 0 0; 0 0 -1 0; 1 0 0 0; 0 0 0 1')
            #abs_pose = np.dot(changetoDSOcoord,abs_pose)
            #abs_posecopy = copy.deepcopy(abs_pose)
            #abs_posecopy[0,3],abs_posecopy[1,3],abs_posecopy[2,3]=abs_pose[2,3],abs_pose[0,3],abs_pose[1,3]
            abs_poses.append(abs_pose)

            if timestamp >= upper_timestamp:
                break
        abs_poses[0] = np.dot(changetoDSOcoord, abs_poses[0])

    return abs_poses
def init_camera_model_posture():
    global CAMERA_MODEL
    global G_CAMERA_POSESOURCE
    models_dir = "/data/lyh/lab/robotcar-dataset-sdk/models/"
    CAMERA_MODEL = CameraModel(models_dir, "stereo_centre")
    #read the camera and ins extrinsics
    extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/stereo.txt"
    print(extrinsics_path)
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_camera_vehicle = build_se3_transform(extrinsics)
    print(G_camera_vehicle)

    extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/ins.txt"
    print(extrinsics_path)
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_ins_vehicle = build_se3_transform(extrinsics)
    print(G_ins_vehicle)
    G_CAMERA_POSESOURCE = G_camera_vehicle * G_ins_vehicle
Esempio n. 5
0
def compute_ins_poses(ins_path, pose_timestamps, origin_timestamp):
    """Interpolate poses from visual odometry.

    ins_path (str): path to file containing poses from INS.
        pose_timestamps (list[int]): UNIX timestamps at which interpolated poses are required.
        origin_timestamp (int): UNIX timestamp of origin frame. Poses will be reported relative to this frame.

    Returns:
        list[numpy.matrixlib.defmatrix.matrix]: SE3 matrix representing interpolated pose for each requested timestamp.

    """
    with open(ins_path) as ins_file:
        ins_reader = csv.reader(ins_file)
        headers = next(ins_file)

        ins_timestamps = [0]
        abs_poses = [ml.identity(4)]
        #abs_poses[0] = np.dot(changetoDSOcoord, abs_poses[0])

        lower_timestamp = min(min(pose_timestamps), origin_timestamp)
        upper_timestamp = max(max(pose_timestamps), origin_timestamp)

        for row in ins_reader:
            timestamp = int(row[0])
            if timestamp < lower_timestamp:
                ins_timestamps[0] = timestamp
                continue
            ins_timestamps.append(timestamp)

            xyzrpy = [float(v)
                      for v in row[5:8]] + [float(v) for v in row[-3:]]
            abs_pose = build_se3_transform(xyzrpy)
            abs_poses.append(abs_pose)

            if timestamp >= upper_timestamp:
                break

    ins_timestamps = ins_timestamps[1:]
    abs_poses = abs_poses[1:]
    print(len(pose_timestamps))
    print("ins timestamps", len(ins_timestamps))
    #poses = interpolate_poses(ins_timestamps, abs_poses, pose_timestamps, origin_timestamp)
    #print(len(poses))
    changetoDSOcoord = np.matrix('1 0 0 0; 0 0 -1 0; 0 1 0 0; 0 0 0 1')
    poses_trans = [changetoDSOcoord * i for i in abs_poses]

    return abs_poses, ins_timestamps
def ProcessFrame(params, Tprev, frame_nr,
                 stamp):  # write Fibonacci series up to n
    #time=gt_row(0) #time is in nanoseconds
    #t.header.stamp = rospy.Time(time)
    #t.header.frame_id = "/world"
    #t.child_frame_id = "/navtech"

    Tinc = build_se3_transform(params)
    Tupd = Tprev * Tinc

    #print("input: ")
    #print(params)
    #print("prev: ")
    #print(Tprev)
    #print("inc: ")
    #print(Tinc)
    #print("Tupd: ")
    #print(Tupd)

    #print("ros time: ")
    #print(stamp)

    t = geometry_msgs.msg.TransformStamped()
    t.header.frame_id = "/world"
    t.header.stamp = stamp
    t.child_frame_id = "/navtech"
    t.transform.translation.x = Tupd[0, 3]
    t.transform.translation.y = Tupd[1, 3]
    t.transform.translation.z = 0.0
    #print(t.transform.translation)

    qupd = so3_to_quaternion(Tupd[0:3, 0:3])
    t.transform.rotation.x = qupd[1]
    t.transform.rotation.y = qupd[2]
    t.transform.rotation.z = qupd[3]
    t.transform.rotation.w = qupd[0]

    return Tupd, t
Esempio n. 7
0
    else:
        raise ValueError('Wrong camera type', args.camera)
    
    
    if use_processed:
        pass
    else:
        i_img, submap = generate_vo_submap(args) 
    
    
            
    # Get camera transformation
    with open(camera_extrinsics) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]

    G_camera_vehicle = build_se3_transform(extrinsics)
    submap = np.dot(G_camera_vehicle, submap)
    
    
    # Load image
    img_data = import_camera_trajectory(camera_timestamp_file, ins_data_file, 
                                        camera_dir)
    camera_trajectory, camera_model = img_data[0], img_data[1]
    camera_state = camera_trajectory[:,i_img] #Was i_start, not correct?
    image, pil_image = load_image(camera_dir, camera_state[6], camera_model)
    
    # Project points into image
    uv, depth = camera_model.project(submap, image.shape)
    
    # Get ratio of points in image to total points
    points_in_image_ratio = uv.shape[1] / submap.shape[1]
Esempio n. 8
0
def build_pointcloud(lidar_dir,
                     poses_file,
                     extrinsics_dir,
                     start_time,
                     end_time,
                     origin_time=-1):
    """Builds a pointcloud by combining multiple LIDAR scans with odometry information.

    Args:
        lidar_dir (str): Directory containing LIDAR scans.
        poses_file (str): Path to a file containing pose information. Can be VO or INS data.
        extrinsics_dir (str): Directory containing extrinsic calibrations.
        start_time (int): UNIX timestamp of the start of the window over which to build the pointcloud.
        end_time (int): UNIX timestamp of the end of the window over which to build the pointcloud.
        origin_time (int): UNIX timestamp of origin frame. Pointcloud coordinates are relative to this frame.

    Returns:
        numpy.ndarray: 3xn array of (x, y, z) coordinates of pointcloud
        numpy.array: array of n reflectance values or None if no reflectance values are recorded (LDMRS)

    Raises:
        ValueError: if specified window doesn't contain any laser scans.
        IOError: if scan files are not found.

    """
    if origin_time < 0:
        origin_time = start_time

    lidar = re.search(
        '(lms_front|lms_rear|ldmrs|velodyne_left|velodyne_right)',
        lidar_dir).group(0)
    timestamps_path = os.path.join(lidar_dir, os.pardir, lidar + '.timestamps')

    timestamps = []
    with open(timestamps_path) as timestamps_file:
        for line in timestamps_file:
            timestamp = int(line.split(' ')[0])
            if start_time <= timestamp <= end_time:
                timestamps.append(timestamp)

    if len(timestamps) == 0:
        raise ValueError("No LIDAR data in the given time bracket.")

    with open(os.path.join(extrinsics_dir, lidar + '.txt')) as extrinsics_file:
        extrinsics = next(extrinsics_file)
    G_posesource_laser = build_se3_transform(
        [float(x) for x in extrinsics.split(' ')])

    poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)

    if poses_type in ['ins', 'rtk']:
        with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
            extrinsics = next(extrinsics_file)
            G_posesource_laser = np.linalg.solve(
                build_se3_transform([float(x) for x in extrinsics.split(' ')]),
                G_posesource_laser)

        poses = interpolate_ins_poses(poses_file,
                                      timestamps,
                                      origin_time,
                                      use_rtk=(poses_type == 'rtk'))
    else:
        # sensor is VO, which is located at the main vehicle frame
        poses = interpolate_vo_poses(poses_file, timestamps, origin_time)

    pointcloud = np.array([[0], [0], [0], [0]])
    if lidar == 'ldmrs':
        reflectance = None
    else:
        reflectance = np.empty((0))

    for i in range(0, len(poses)):
        scan_path = os.path.join(lidar_dir, str(timestamps[i]) + '.bin')
        if "velodyne" not in lidar:
            if not os.path.isfile(scan_path):
                continue

            scan_file = open(scan_path)
            scan = np.fromfile(scan_file, np.double)
            scan_file.close()

            scan = scan.reshape((len(scan) // 3, 3)).transpose()

            if lidar != 'ldmrs':
                # LMS scans are tuples of (x, y, reflectance)
                reflectance = np.concatenate(
                    (reflectance, np.ravel(scan[2, :])))
                scan[2, :] = np.zeros((1, scan.shape[1]))
        else:
            if os.path.isfile(scan_path):
                ptcld = load_velodyne_binary(scan_path)
            else:
                scan_path = os.path.join(lidar_dir,
                                         str(timestamps[i]) + '.png')
                if not os.path.isfile(scan_path):
                    continue
                ranges, intensities, angles, approximate_timestamps = load_velodyne_raw(
                    scan_path)
                ptcld = velodyne_raw_to_pointcloud(ranges, intensities, angles)

            reflectance = np.concatenate((reflectance, ptcld[3]))
            scan = ptcld[:3]

        scan = np.dot(np.dot(poses[i], G_posesource_laser),
                      np.vstack([scan, np.ones((1, scan.shape[1]))]))
        pointcloud = np.hstack([pointcloud, scan])

    pointcloud = pointcloud[:, 1:]
    if pointcloud.shape[1] == 0:
        raise IOError(
            "Could not find scan files for given time range in directory " +
            lidar_dir)

    return pointcloud, reflectance
Esempio n. 9
0
    if reflectance is not None:
        colours = (reflectance - reflectance.min()) / (reflectance.max() -
                                                       reflectance.min())
        colours = 1 / (1 + np.exp(-10 * (colours - colours.mean())))
    else:
        colours = 'gray'

    # Pointcloud Visualisation using Open3D
    vis = open3d.Visualizer()
    vis.create_window(window_name=os.path.basename(__file__))
    render_option = vis.get_render_option()
    render_option.background_color = np.array([0.1529, 0.1569, 0.1333],
                                              np.float32)
    render_option.point_color_option = open3d.PointColorOption.ZCoordinate
    coordinate_frame = open3d.geometry.create_mesh_coordinate_frame()
    vis.add_geometry(coordinate_frame)
    pcd = open3d.geometry.PointCloud()
    pcd.points = open3d.utility.Vector3dVector(-np.ascontiguousarray(
        pointcloud[[1, 0, 2]].transpose().astype(np.float64)))
    pcd.colors = open3d.utility.Vector3dVector(
        np.tile(colours[:, np.newaxis], (1, 3)).astype(np.float64))
    # Rotate pointcloud to align displayed coordinate frame colouring
    pcd.transform(build_se3_transform([0, 0, 0, np.pi, 0, -np.pi / 2]))
    vis.add_geometry(pcd)
    view_control = vis.get_view_control()
    params = view_control.convert_to_pinhole_camera_parameters()
    params.extrinsic = build_se3_transform(
        [0, 3, 10, 0, -np.pi * 0.42, -np.pi / 2])
    view_control.convert_from_pinhole_camera_parameters(params)
    vis.run()
Esempio n. 10
0
pub_polar = rospy.Publisher('/Navtech/Polar', Image,queue_size=10)


radar_timestamps = np.loadtxt(timestamps_path, delimiter=' ', usecols=[0], dtype=np.int64)
GtPoseStamps=[]
stamp = rospy.Time.now()
with open(gt_path, newline='') as csv_file: #Read radar csv file
    csv_reader = csv.reader(csv_file, delimiter=',')
    line_count = 0
    for row in csv_reader:
        GtPoseStamps.append(row)


current_frame = 0
pose_init = [0,0,0,0,0,0]
Tpose = build_se3_transform(pose_init)
curr_row = GtPoseStamps[1]
#stamp = rospy.Time.from_sec(int(curr_row[1])/1000000)
for radar_timestamp in radar_timestamps:


    if current_frame == len(GtPoseStamps) :
        break
    #if current_frame == 240 :
    #    bw.Close()
    #    quit()

    filename = os.path.join(args.dir, str(radar_timestamp) + '.png')
    if not os.path.isfile(filename):
        raise FileNotFoundError("Could not find radar example: {}".format(filename))
Esempio n. 11
0
def main():
    args = get_arguments()

    camera = re.search('(stereo|mono_(left|right|rear))', args.dir).group(0)

    timestamps_path = os.path.join(
        os.path.join(args.dir, os.pardir, camera + '.timestamps'))
    if not os.path.isfile(timestamps_path):
        timestamps_path = os.path.join(args.dir, os.pardir, os.pardir,
                                       camera + '.timestamps')
        if not os.path.isfile(timestamps_path):
            raise IOError("Could not find timestamps file")

    model = None
    if args.models_dir:
        model = CameraModel(args.models_dir, args.dir)

    output_dir = os.curdir
    if args.output_dir:
        output_dir = args.output_dir
    if not os.path.isdir(output_dir):
        raise IOError(output_dir + "is not an existing folder")

    result_list = []
    count = 0
    dictionary = {}
    t_records = []
    p_records = []
    angles_records = []
    intrinsic_matrix = None

    with open(args.poses_file) as vo_file:
        vo_reader = csv.reader(vo_file)
        headers = next(vo_file)
        for row in vo_reader:
            src_image_name = row[0]
            dst_image_name = row[1]
            src_image_filename = os.path.join(args.dir,
                                              src_image_name + '.png')
            dst_image_filename = os.path.join(args.dir,
                                              dst_image_name + '.png')
            if not os.path.isfile(src_image_filename) or not os.path.isfile(
                    dst_image_filename):
                continue
            if dst_image_name not in dictionary:
                img, orig_resolution = process_image(
                    load_image(dst_image_filename, model), args.crop,
                    args.scale)
                dictionary[dst_image_name] = count
                count = count + 1
                result_list.append(list(img))
            if src_image_name not in dictionary:
                img, orig_resolution = process_image(
                    load_image(src_image_filename, model), args.crop,
                    args.scale)
                dictionary[src_image_name] = count
                count = count + 1
                result_list.append(list(img))

            focal_length, principal_point = get_intrinsics_parameters(
                model.get_focal_length(), model.get_principal_point(),
                orig_resolution, args.crop, args.scale)
            src_image_idx = dictionary[src_image_name]
            dst_image_idx = dictionary[dst_image_name]
            xyzrpy = [float(v) for v in row[2:8]]
            rel_pose = build_se3_transform(xyzrpy)
            t_matrix = rel_pose[0:3]  # 3x4 matrix
            intrinsic_matrix = build_intrinsic_matrix(focal_length,
                                                      principal_point)
            p_matrix = intrinsic_matrix * t_matrix
            t_records.append((t_matrix, src_image_idx, dst_image_idx))
            p_records.append((p_matrix, src_image_idx, dst_image_idx))
            angles_records.append((xyzrpy, src_image_idx, dst_image_idx))

    transf = np.array(t_records,
                      dtype=[('T', ('float64', (3, 4))), ('src_idx', 'int32'),
                             ('dst_idx', 'int32')])
    proy = np.array(p_records,
                    dtype=[('P', ('float64', (3, 4))), ('src_idx', 'int32'),
                           ('dst_idx', 'int32')])
    angles = np.array(angles_records,
                      dtype=[('ang', ('float64', 6)), ('src_idx', 'int32'),
                             ('dst_idx', 'int32')])
    # Solo lo guardo una vez porque es constante para todo el dataset (o deberia serlo)
    if intrinsic_matrix is not None:
        save_txt(os.path.join(output_dir, "intrinsic_matrix"),
                 intrinsic_matrix)
        save_txt(os.path.join(output_dir, "intrinsic_parameters"),
                 [focal_length, principal_point])
    #path = os.path.normpath(args.dir)
    #folders = path.split(os.sep)
    #compressed_file_path = os.path.join(output_dir, folders[-3])
    result = list_to_array(result_list)
    save_txt(os.path.join(output_dir, 'images_shape'), result.shape, fmt='%i')
    print result.shape
    compressed_file_path = os.path.join(output_dir, 'images')
    savez_compressed(compressed_file_path, result)
    savez_compressed(os.path.join(output_dir, 't'), transf)
    savez_compressed(os.path.join(output_dir, 'p'), proy)
    savez_compressed(os.path.join(output_dir, 'angles'), angles)
from transform import build_se3_transform

import my_params

np.set_printoptions(suppress=True)

DRAW_ON_IMAGE = True

output_image_dir = my_params.output_dir+'\\'+ my_params.dataset_no + '\\'
output_points_patch = my_params.output_dir+'\\'+ my_params.dataset_no + '_vo_points.csv'

if __name__ == '__main__':
    # Intial data
    # H = np.identity(4)      # intial pose
    xyzrpy = [620021.4778138875, 0, 5734795.685425566, 0.0128231,-0.0674645,-0.923368707] #2015
    H = build_se3_transform(xyzrpy)
    print(H)
    # H = np.identity(4)      # intial pose
    poses = []              # poses list
    final_points=[]         # positions list

    # First point
    poses.append(H)
    final_points.append((620021.4778138875, 5734795.685425566))
 
    x_0 = (H[0,3])
    z_0 = (H[2,3])
    print(x_0,z_0)
    plt.plot(x_0,z_0,'o',color='red')
    plt.show()
    # Get image    
from camera_model import CameraModel

# Load data
vo_directory = my_params.dataset_patch + 'vo//vo.csv'
lidar_folder_path = my_params.laser_dir
lidar_timestamps_path = my_params.dataset_patch + 'ldmrs.timestamps'

# Making Video
# fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
# vw = cv2.VideoWriter('laser_scan.mp4', fourcc, 30, (600, 800))

C_origin = np.zeros((3, 1))
R_origin = np.identity(3)
xyzrpy = [0, 0, 0, -0.083514, -0.009355, 1.087759]

abs_pose = build_se3_transform(xyzrpy)
abs_poses = [abs_pose]
H_new = abs_pose
vo_timestamps = []

# Blank image
reso = [600, 800]
scale = 0.5
vo_map = np.zeros((reso[0], reso[1], 3), np.uint8)  # 800*600 --> scale=1
start_map = (300, 150)

# vo_map = cv2.circle(vo_map,start_map,5,(0,0,255),thickness=3)
index = 0

with open(vo_directory) as vo_file:
    vo_reader = csv.reader(vo_file)
Esempio n. 14
0
# vw = cv2.VideoWriter('laser_scan.mp4', fourcc, 30, (600, 800))


# Intial data
# xyzrpy = [0, 0, 0, -0.090749, -0.000226, 4.211563]
# abs_poses = [abs_pose]


vo_on_map = []
obj_on_map = []
vo_timestamps = [0]
abs_poses = [ml.identity(4)]

scale = 1

abs_pose = build_se3_transform(my_params.xyzrpy)
abs_poses.append(abs_pose)
# Reading vo
print('Loading visual odometry...')
with open(vo_directory) as vo_file:
    vo_reader = csv.reader(vo_file)
    headers = next(vo_file)

    index = 0
    for row in vo_reader:
        timestamp = int(row[0])
        vo_timestamps.append(timestamp)
        # datetime = dt.utcfromtimestamp(timestamp/1000000)
       
        index += 1
        # if index > 3000 : 
Esempio n. 15
0
def main():
    #read the pointcloud
    pointcloud_filename = "/data/lyh/lab/pcaifeat_RobotCar_v3_1/1400505794141322.txt"
    pointcloud = np.loadtxt(pointcloud_filename, delimiter=' ')
    pointcloud = np.hstack([pointcloud, np.ones((pointcloud.shape[0], 1))])
    '''
	for i in range(pointcloud.shape[0]):
		print(" %.3f %.3f %.3f %.3f"%(pointcloud[i,0],pointcloud[i,1],pointcloud[i,2],pointcloud[i,3]))
	exit()
	'''

    #load the camera model
    models_dir = "/data/lyh/lab/robotcar-dataset-sdk/models/"
    model = CameraModel(models_dir, "mono_left")

    #load the camera global pose
    imgpos_path = "/data/lyh/lab/pcaifeat_RobotCar_v3_1/1400505794141322_imgpos.txt"
    print(imgpos_path)
    imgpos = {}
    with open(imgpos_path) as imgpos_file:
        for line in imgpos_file:
            pos = [x for x in line.split(' ')]
            for i in range(len(pos) - 2):
                pos[i + 1] = float(pos[i + 1])
            imgpos[pos[0]] = np.reshape(np.array(pos[1:-1]), [4, 4])
    '''
	for key in imgpos.keys():
		print(key)
		print(imgpos[key])
	exit()
	'''

    #read the camera and ins extrinsics
    extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/mono_left.txt"
    print(extrinsics_path)
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_camera_vehicle = build_se3_transform(extrinsics)
    print(G_camera_vehicle)

    extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/ins.txt"
    print(extrinsics_path)
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_ins_vehicle = build_se3_transform(extrinsics)
    print(G_ins_vehicle)
    G_camera_posesource = G_camera_vehicle * G_ins_vehicle

    #translate pointcloud to image coordinate
    pointcloud = np.dot(np.linalg.inv(imgpos["mono_left"]), pointcloud.T)
    pointcloud = np.dot(G_camera_posesource, pointcloud)

    #project pointcloud to image
    image_path = "/data/lyh/lab/pcaifeat_RobotCar_v3_1/1400505794141322_mono_left.png"
    #image = load_image(image_path, model)
    image = load_image(image_path)

    uv = model.project(pointcloud, [1024, 1024])

    lut = model.bilinear_lut[:, 1::-1].T.reshape((2, 1024, 1024))
    u = map_coordinates(lut[0, :, :], uv, order=1)
    v = map_coordinates(lut[1, :, :], uv, order=1)
    uv = np.array([u, v])
    print(uv.shape)
    transform_matrix = np.zeros([64, 4096])
    for i in range(uv.shape[1]):
        if uv[0, i] == 0 and uv[1, i] == 0:
            continue
        cur_uv = np.rint(uv[:, i] / 128)
        row = cur_uv[1] * 8 + cur_uv[0]
        transform_matrix[int(row), i] = 1

    aa = np.sum(transform_matrix, 1).reshape([8, 8])
    print(np.sum(aa))
    plt.figure(1)
    plt.imshow(aa)
    #plt.show()

    #exit()
    plt.figure(2)
    #plt.imshow(image)
    #uv = np.rint(uv/32)
    plt.scatter(np.ravel(uv[0, :]),
                np.ravel(uv[1, :]),
                s=2,
                edgecolors='none',
                cmap='jet')
    plt.xlim(0, 1024)
    plt.ylim(1024, 0)
    plt.xticks([])
    plt.yticks([])
    plt.show()
def build_pointcloud(lidar_dir, poses_file, extrinsics_dir, start_time, end_time, origin_time=-1):
    """Builds a pointcloud by combining multiple LIDAR scans with odometry information.

    Args:
        lidar_dir (str): Directory containing LIDAR scans.
        poses_file (str): Path to a file containing pose information. Can be VO or INS data.
        extrinsics_dir (str): Directory containing extrinsic calibrations.
        start_time (int): UNIX timestamp of the start of the window over which to build the pointcloud.
        end_time (int): UNIX timestamp of the end of the window over which to build the pointcloud.
        origin_time (int): UNIX timestamp of origin frame. Pointcloud coordinates are relative to this frame.

    Returns:
        numpy.ndarray: 3xn array of (x, y, z) coordinates of pointcloud
        numpy.array: array of n reflectance values or None if no reflectance values are recorded (LDMRS)

    Raises:
        ValueError: if specified window doesn't contain any laser scans.
        IOError: if scan files are not found.

    """
    if origin_time < 0:
        origin_time = start_time

    lidar = re.search('(lms_front|lms_rear|ldmrs)', lidar_dir).group(0)
    timestamps_path = os.path.join(lidar_dir, os.pardir, lidar + '.timestamps')

    timestamps = []
    with open(timestamps_path) as timestamps_file:
        for line in timestamps_file:
            timestamp = int(line.split(' ')[0])
            if start_time <= timestamp <= end_time:
                timestamps.append(timestamp)

    if len(timestamps) == 0:
        raise ValueError("No LIDAR data in the given time bracket.")

    with open(os.path.join(extrinsics_dir, lidar + '.txt')) as extrinsics_file:
        extrinsics = next(extrinsics_file)
    G_posesource_laser = build_se3_transform([float(x) for x in extrinsics.split(' ')])

    poses_type = re.search('(vo|ins)\.csv', poses_file).group(1)

    if poses_type == 'ins':
        with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
            extrinsics = next(extrinsics_file)
            G_posesource_laser = np.linalg.solve(build_se3_transform([float(x) for x in extrinsics.split(' ')]),
                                                 G_posesource_laser)

        poses = interpolate_ins_poses(poses_file, timestamps, origin_time)
    else:
        # sensor is VO, which is located at the main vehicle frame
        poses = interpolate_vo_poses(poses_file, timestamps, origin_time)

    pointcloud = np.array([[0], [0], [0], [0]])
    if lidar == 'ldmrs':
        reflectance = None
    else:
        reflectance = np.empty((0))

    for i in range(0, len(poses)):
        scan_path = os.path.join(lidar_dir, str(timestamps[i]) + '.bin')
        if not os.path.isfile(scan_path):
            continue

        scan_file = open(scan_path)
        scan = np.fromfile(scan_file, np.double)
        scan_file.close()

        scan = scan.reshape((len(scan) // 3, 3)).transpose()

        if lidar != 'ldmrs':
            # LMS scans are tuples of (x, y, reflectance)
            reflectance = np.concatenate((reflectance, np.ravel(scan[2, :])))
            scan[2, :] = np.zeros((1, scan.shape[1]))

        scan = np.dot(np.dot(poses[i], G_posesource_laser), np.vstack([scan, np.ones((1, scan.shape[1]))]))
        pointcloud = np.hstack([pointcloud, scan])

    pointcloud = pointcloud[:, 1:]
    if pointcloud.shape[1] == 0:
        raise IOError("Could not find scan files for given time range in directory " + lidar_dir)

    return pointcloud, reflectance
def generate_pointcloud():

    WRITE_IMAGE = 1
    DRAW_MAP = 1

    # Data input
    image_dir = my_params.image_dir
    ldrms_dir = my_params.laser_dir
    lmsfront_dir = my_params.lmsfront_dir
    lmsrear_dir  = my_params.lmsrear_dir
    poses_file = my_params.poses_file
    models_dir = my_params.model_dir
    extrinsics_dir = my_params.extrinsics_dir

    output_img_dir   = my_params.output_dir2 + 'pl_img_'   + my_params.dataset_no + '//'
    output_ldrms_dir = my_params.output_dir2 + 'pl_ldrms_' + my_params.dataset_no + '//'
    output_front_dir = my_params.output_dir2 + 'pl_front_' + my_params.dataset_no + '//'
    output_rear_dir  = my_params.output_dir2 + 'pl_rear_'  + my_params.dataset_no + '//'
    
    map_ldrms_dir = my_params.output_dir2 + 'map_ldrms_' + my_params.dataset_no + '//'
    map_front_dir = my_params.output_dir2 + 'map_front_' + my_params.dataset_no + '//'
    map_rear_dir  = my_params.output_dir2 + 'map_rear_'  + my_params.dataset_no + '//'


    model = CameraModel(models_dir, image_dir)
    extrinsics_path = os.path.join(extrinsics_dir, model.camera + '.txt')
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_camera_vehicle = build_se3_transform(extrinsics)
    G_camera_posesource = None

    poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)
    if poses_type in ['ins', 'rtk']:
        with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
            extrinsics = next(extrinsics_file)
            G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
    else:
        # VO frame and vehicle frame are the same
        G_camera_posesource = G_camera_vehicle

    image_size = (960,1280,3)

    timestamps_path = os.path.join(my_params.dataset_patch + model.camera + '.timestamps')
    timestamp = 0
    plt.figure()
    with open(timestamps_path) as timestamps_file:
        for i, line in enumerate(timestamps_file):
            
            if i < 799:
                # print('open image index', i)
                # timestamp = int(line.split(' ')[0])
                # break
                continue

            timestamp = int(line.split(' ')[0])
            start_time = timestamp - 1e6
            
            frame_path = os.path.join(my_params.reprocess_image_dir + '//' + str(timestamp) + '.png')
            frame = cv2.imread(frame_path) 
            
            print('process image ',i,'-',timestamp)

            if i < 4: 
                if(WRITE_IMAGE):
                    savefile_name = output_dir + '\\lms_front_img_' + my_params.dataset_no + '\\' + str(timestamp) + '.png'
                    cv2.imwrite(savefile_name, frame)           
                continue

            pl_ldrms = np.zeros((960,1280),dtype=int)
            pl_front = np.zeros((960,1280),dtype=int)

            

            # LDRMS
            ldrms_pointcloud, _ = build_pointcloud(ldrms_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 2e6, timestamp)
            ldrms_pointcloud = np.dot(G_camera_posesource, ldrms_pointcloud)
            uv, depth = model.project(ldrms_pointcloud,image_size)

            x_p = np.ravel(uv[0,:])
            y_p = np.ravel(uv[1,:])
            z_p = np.ravel(depth)

            map_ldrms = (np.array(ldrms_pointcloud[0:3,:])).transpose()
            map_ldrms_filename = map_ldrms_dir + str(timestamp) + '.csv'
            np.savetxt(map_ldrms_filename, map_ldrms, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(ldrms_pointcloud[0,:])]
                map_y = [numeric_map_y for numeric_map_y in np.array(ldrms_pointcloud[1,:])]
                map_z = np.array(ldrms_pointcloud[2,:])

                plt.scatter((map_y),(map_x),c='b',marker='.', zorder=1)

            # LDRMS pointcloud to CSV
            # for k in range(uv.shape[1]):

            #     pl_ldrms[int(y_p[k]),int(x_p[k])] = int(100*depth[k])

            # ldrms_filename = output_ldrms_dir + str(timestamp) + '.csv'
            # np.savetxt(ldrms_filename, pl_ldrms, delimiter=",")
        

            # LMS-FRONT
            front_pointcloud, _ = build_pointcloud(lmsfront_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 1e6, timestamp)
            front_pointcloud = np.dot(G_camera_posesource, front_pointcloud)
            wh,xrange = model.project(front_pointcloud,image_size)

            x_f = np.ravel(wh[0,:])
            y_f = np.ravel(wh[1,:])
            z_f = np.ravel(xrange)

            map_front = (np.array(front_pointcloud[0:3,:])).transpose()
            map_front_filename = map_front_dir + str(timestamp) + '.csv'
            np.savetxt(map_front_filename, map_front, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(front_pointcloud[0,:])]
                map_y = [numeric_map_y for numeric_map_y in np.array(front_pointcloud[1,:])]
                map_z = [-numeric_map_z for numeric_map_z in np.array(front_pointcloud[2,:])]

                plt.scatter((map_y),(map_z),c='r',marker='.', zorder=1)

            # LMS-FRONT pointcloud to CSV
            # for k in range(wh.shape[1]):

            #     pl_ldrms[int(y_f[k]),int(x_f[k])] = int(100*xrange[k])

            # front_filename = output_front_dir + str(timestamp) + '.csv'
            # np.savetxt(front_filename, pl_ldrms, delimiter=",")

            # LMS-REAR

            rear_pointcloud, _ = build_pointcloud(lmsrear_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 2e6, timestamp)
            rear_pointcloud = np.dot(G_camera_posesource, rear_pointcloud)

            map_rear = (np.array(rear_pointcloud[0:3,:])).transpose()
            map_rear_filename = map_rear_dir + str(timestamp) + '.csv'
            np.savetxt(map_rear_filename, map_rear, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(rear_pointcloud[0,:])]
                map_y = [-numeric_map_y for numeric_map_y in np.array(rear_pointcloud[1,:])]
                map_z = [numeric_map_z for numeric_map_z in np.array(rear_pointcloud[2,:])]

                plt.scatter((map_y),(map_z),c='g',marker='.', zorder=1)


            if (WRITE_IMAGE):
                for k in range(uv.shape[1]):

                    color = (int(255-8*depth[k]),255-3*depth[k],50+3*depth[k])
                    frame= cv2.circle(frame, (int(x_p[k]), int(y_p[k])), 1, color, 1)

                for k in range(wh.shape[1]):

                    color = (int(255-8*xrange[k]),255-3*xrange[k],50+3*xrange[k])
                    frame= cv2.circle(frame, (int(x_f[k]), int(y_f[k])), 1, color, 1)

                cv2.imshow('frame',frame)
                image_filename = output_img_dir   + str(timestamp) + '.png'
                cv2.imwrite(image_filename, frame)
                cv2.waitKey(1)
            # plt.show()
            plt.pause(0.1)
Esempio n. 18
0
def main():
    velodyne_dir = args.dir
    if velodyne_dir[-1] == '/':
        velodyne_dir = velodyne_dir[:-1]

    velodyne_sensor = os.path.basename(velodyne_dir)
    if velodyne_sensor not in ["velodyne_left", "velodyne_right"]:
        raise ValueError(
            "Velodyne directory not valid: {}".format(velodyne_dir))

    timestamps_path = velodyne_dir + '.timestamps'
    if not os.path.isfile(timestamps_path):
        raise IOError(
            "Could not find timestamps file: {}".format(timestamps_path))

    title = "Velodyne Visualisation Example"
    extension = ".bin" if args.mode == "bin_ptcld" else ".png"
    velodyne_timestamps = np.loadtxt(timestamps_path,
                                     delimiter=' ',
                                     usecols=[0],
                                     dtype=np.int64)
    colourmap = (get_cmap("viridis")(np.linspace(0, 1, 255))[:, :3] *
                 255).astype(np.uint8)[:, ::-1]
    interp_angles = np.mod(np.linspace(np.pi, 3 * np.pi, 720), 2 * np.pi)
    vis = None

    for velodyne_timestamp in velodyne_timestamps:

        filename = os.path.join(args.dir, str(velodyne_timestamp) + extension)

        if args.mode == "bin_ptcld":
            ptcld = load_velodyne_binary(filename)
        else:
            ranges, intensities, angles, approximate_timestamps = load_velodyne_raw(
                filename)

            if args.mode == "raw_ptcld":
                ptcld = velodyne_raw_to_pointcloud(ranges, intensities, angles)
            elif args.mode == "raw_interp":
                intensities = interpolate.interp1d(
                    angles[0], intensities, bounds_error=False)(interp_angles)
                ranges = interpolate.interp1d(
                    angles[0], ranges, bounds_error=False)(interp_angles)
                intensities[np.isnan(intensities)] = 0
                ranges[np.isnan(ranges)] = 0

        if '_ptcld' in args.mode:
            # Pointcloud Visualisation using Open3D
            if vis is None:
                vis = open3d.Visualizer()
                vis.create_window(window_name=title)
                pcd = open3d.geometry.PointCloud()
                # initialise the geometry pre loop
                pcd.points = open3d.utility.Vector3dVector(
                    ptcld[:3].transpose().astype(np.float64))
                pcd.colors = open3d.utility.Vector3dVector(
                    np.tile(ptcld[3:].transpose(), (1, 3)).astype(np.float64))
                # Rotate pointcloud to align displayed coordinate frame colouring
                pcd.transform(
                    build_se3_transform([0, 0, 0, np.pi, 0, -np.pi / 2]))
                vis.add_geometry(pcd)
                render_option = vis.get_render_option()
                render_option.background_color = np.array(
                    [0.1529, 0.1569, 0.1333], np.float32)
                render_option.point_color_option = open3d.PointColorOption.ZCoordinate
                coordinate_frame = open3d.geometry.create_mesh_coordinate_frame(
                )
                vis.add_geometry(coordinate_frame)
                view_control = vis.get_view_control()
                params = view_control.convert_to_pinhole_camera_parameters()
                params.extrinsic = build_se3_transform(
                    [0, 3, 10, 0, -np.pi * 0.42, -np.pi / 2])
                view_control.convert_from_pinhole_camera_parameters(params)

            pcd.points = open3d.utility.Vector3dVector(
                ptcld[:3].transpose().astype(np.float64))
            pcd.colors = open3d.utility.Vector3dVector(
                np.tile(ptcld[3:].transpose(), (1, 3)).astype(np.float64) / 40)
            vis.update_geometry()
            vis.poll_events()
            vis.update_renderer()

        else:
            # Ranges and Intensities visualisation using OpenCV
            intensities_vis = colourmap[np.clip(
                (intensities * 4).astype(np.int), 0, colourmap.shape[0] - 1)]
            ranges_vis = colourmap[np.clip((ranges * 4).astype(np.int), 0,
                                           colourmap.shape[0] - 1)]
            visualisation = np.concatenate((intensities_vis, ranges_vis), 0)
            visualisation = cv2.resize(visualisation,
                                       None,
                                       fy=6 * args.scale,
                                       fx=args.scale,
                                       interpolation=cv2.INTER_NEAREST)
            cv2.imshow(title, visualisation)
            cv2.waitKey(1)
Esempio n. 19
0
parser.add_argument('--image_dir', type=str, help='Directory containing images')
parser.add_argument('--laser_dir', type=str, help='Directory containing LIDAR scans')
parser.add_argument('--poses_file', type=str, help='File containing either INS or VO poses')
parser.add_argument('--models_dir', type=str, help='Directory containing camera models')
parser.add_argument('--extrinsics_dir', type=str, help='Directory containing sensor extrinsics')
parser.add_argument('--image_idx', type=int, help='Index of image to display')

args = parser.parse_args()

model = CameraModel(args.models_dir, args.image_dir)

extrinsics_path = os.path.join(args.extrinsics_dir, model.camera + '.txt')
with open(extrinsics_path) as extrinsics_file:
    extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]

G_camera_vehicle = build_se3_transform(extrinsics)
G_camera_posesource = None

poses_type = re.search('(vo|ins)\.csv', args.poses_file).group(1)
if poses_type == 'ins':
    with open(os.path.join(args.extrinsics_dir, 'ins.txt')) as extrinsics_file:
        extrinsics = next(extrinsics_file)
        G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
else:
    # VO frame and vehicle frame are the same
    G_camera_posesource = G_camera_vehicle


timestamps_path = os.path.join(args.image_dir, os.pardir, model.camera + '.timestamps')
if not os.path.isfile(timestamps_path):
    timestamps_path = os.path.join(args.image_dir, os.pardir, os.pardir, model.camera + '.timestamps')
Esempio n. 20
0
def play_vo():
    import numpy.matlib as ml
    import csv
    from interpolate_poses import interpolate_poses
    from transform import build_se3_transform

    # Read data
    vo_directory = my_params.dataset_patch + 'vo\\vo.csv'
    output_image_dir = my_params.output_dir + '\\' + my_params.dataset_no + '\\'
    output_points_patch = my_params.output_dir + '\\' + my_params.dataset_no + '_vo_admin.csv'

    # fix start point
    # abs_pose = [ml.identity(4)]

    abs_pose = build_se3_transform(my_params.xyzrpy)
    abs_poses = [abs_pose]
    with open(vo_directory) as vo_file:
        vo_reader = csv.reader(vo_file)
        headers = next(vo_file)

        index = 0
        for row in vo_reader:
            timestamp = int(row[0])
            datetime = dt.utcfromtimestamp(timestamp / 1000000)

            xyzrpy = [float(v) for v in row[2:8]]
            rel_pose = build_se3_transform(xyzrpy)
            abs_pose = abs_poses[-1] * rel_pose
            abs_poses.append(abs_pose)

            index += 1
            if index > 2000: break
    vo_file.close()

    # abs_quaternions = np.zeros((4, len(abs_poses)))
    abs_positions = np.zeros((3, len(abs_poses)))

    print('num of point:', len(abs_poses))

    # plt.figure()
    # plt.gca().set_aspect('equal', adjustable='box')
    fig, ax = plt.subplots()

    # image from opestreetmap.org
    ruh_m = plt.imread(my_params.project_patch + 'rtk\\' +
                       my_params.dataset_no + '.png')

    points = []
    for i, pose in enumerate(abs_poses):
        # abs_quaternions[:, i] = so3_to_quaternion(pose[0:3, 0:3])
        abs_positions[:, i] = np.ravel(pose[0:3, 3])
        point = (-abs_positions[0, i], abs_positions[1, i])  # -x, y of point
        # plt.plot(point[0],point[1],'.',color='red')
        # plt.scatter(point[0],point[1],c='b',marker='.', zorder=1)
        ax.scatter(point[0], point[1], zorder=1, alpha=0.2, c='b', marker='.')
        points.append(point)

    points = np.array(points)
    # print(points.shape)

    # Save and quit
    np.savetxt(output_points_patch, points, delimiter=",")

    BBox = (np.min(points[:, 0]), np.max(points[:, 0]), np.min(points[:, 1]),
            np.max(points[:, 1]))

    ax.set_title('Plotting VO Map')
    ax.set_xlim(BBox[0], BBox[1])
    ax.set_ylim(BBox[2], BBox[3])
    ax.imshow(ruh_m, zorder=0, extent=BBox, aspect='equal')
    plt.show()