def init_camera_model_posture():
	global CAMERA_MODEL
	global G_CAMERA_POSESOURCE
	models_dir = "/data/lyh/lab/robotcar-dataset-sdk/models/"
	CAMERA_MODEL = CameraModel(models_dir, "stereo_centre")
	#read the camera and ins extrinsics
	extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/stereo.txt"
	print(extrinsics_path)
	with open(extrinsics_path) as extrinsics_file:
		extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
	G_camera_vehicle = build_se3_transform(extrinsics)
	print(G_camera_vehicle)
	
	extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/ins.txt"
	print(extrinsics_path)
	with open(extrinsics_path) as extrinsics_file:
		extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
	G_ins_vehicle = build_se3_transform(extrinsics)
	print(G_ins_vehicle)
	G_CAMERA_POSESOURCE = G_camera_vehicle*G_ins_vehicle
Exemplo n.º 2
0
from build_pointcloud import build_pointcloud
from transform import build_se3_transform
from image import load_image
from camera_model import CameraModel

parser = argparse.ArgumentParser(description='Project LIDAR data into camera image')
parser.add_argument('--image_dir', type=str, help='Directory containing images')
parser.add_argument('--laser_dir', type=str, help='Directory containing LIDAR scans')
parser.add_argument('--poses_file', type=str, help='File containing either INS or VO poses')
parser.add_argument('--models_dir', type=str, help='Directory containing camera models')
parser.add_argument('--extrinsics_dir', type=str, help='Directory containing sensor extrinsics')
parser.add_argument('--image_idx', type=int, help='Index of image to display')

args = parser.parse_args()

model = CameraModel(args.models_dir, args.image_dir)

extrinsics_path = os.path.join(args.extrinsics_dir, model.camera + '.txt')
with open(extrinsics_path) as extrinsics_file:
    extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]

G_camera_vehicle = build_se3_transform(extrinsics)
G_camera_posesource = None

poses_type = re.search('(vo|ins)\.csv', args.poses_file).group(1)
if poses_type == 'ins':
    with open(os.path.join(args.extrinsics_dir, 'ins.txt')) as extrinsics_file:
        extrinsics = next(extrinsics_file)
        G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
else:
    # VO frame and vehicle frame are the same
args = parser.parse_args()

camera = re.search('(stereo|mono_(left|right|rear))', args.dir).group(0)

timestamps_path = os.path.join(
    os.path.join(args.dir, os.pardir, camera + '.timestamps'))
if not os.path.isfile(timestamps_path):
    timestamps_path = os.path.join(args.dir, os.pardir, os.pardir,
                                   camera + '.timestamps')
    if not os.path.isfile(timestamps_path):
        raise IOError("Could not find timestamps file")

model = None
if args.models_dir:
    model = CameraModel(args.models_dir, args.dir)

current_chunk = 0
timestamps_file = open(timestamps_path)
i = 0
for line in timestamps_file:
    tokens = line.split()
    datetime = dt.utcfromtimestamp(int(tokens[0]) / 1000000)
    chunk = int(tokens[1])

    filename = os.path.join(args.dir, tokens[0] + '.png')
    if not os.path.isfile(filename):
        if chunk != current_chunk:
            print("Chunk " + str(chunk) + " not found")
            current_chunk = chunk
        continue
def main(_config, seed):
    global EPOCH, weights
    if _config['weight'] is not None:
        weights = _config['weight']

    dataset_class = DatasetVisibilityKittiSingle
    img_shape = (384, 1280)

    split = 'test'
    if _config['random_initial_pose']:
        split = 'test_random'
    maps_folder = 'local_maps'
    if _config['maps_folder'] is not None:
        maps_folder = _config['maps_folder']
    if _config['test_sequence'] is None:
        raise TypeError('test_sequences cannot be None')
    else:
        if isinstance(_config['test_sequence'], int):
            _config['test_sequence'] = f"{_config['test_sequence']:02d}"
        dataset_val = dataset_class(_config['data_folder'],
                                    max_r=_config['max_r'],
                                    max_t=_config['max_t'],
                                    split=split,
                                    use_reflectance=_config['use_reflectance'],
                                    maps_folder=maps_folder,
                                    test_sequence=_config['test_sequence'])

    np.random.seed(seed)
    torch.random.manual_seed(seed)

    def init_fn(x):
        return _init_fn(x, seed)

    num_worker = 6
    batch_size = 1

    TestImgLoader = torch.utils.data.DataLoader(dataset=dataset_val,
                                                shuffle=False,
                                                batch_size=batch_size,
                                                num_workers=num_worker,
                                                worker_init_fn=init_fn,
                                                collate_fn=merge_inputs,
                                                drop_last=False,
                                                pin_memory=False)

    print(len(TestImgLoader))

    models = []
    for i in range(len(weights)):
        if _config['network'].startswith('PWC'):
            feat = 1
            md = 4
            split = _config['network'].split('_')
            for item in split[1:]:
                if item.startswith('f'):
                    feat = int(item[-1])
                elif item.startswith('md'):
                    md = int(item[2:])
            assert 0 < feat < 7, "Feature Number from PWC have to be between 1 and 6"
            assert 0 < md, "md must be positive"
            model = CMRNet(img_shape,
                           use_feat_from=feat,
                           md=md,
                           use_reflectance=_config['use_reflectance'])
        else:
            raise TypeError("Network unknown")
        checkpoint = torch.load(weights[i], map_location='cpu')
        saved_state_dict = checkpoint['state_dict']
        model.load_state_dict(saved_state_dict)
        model = model.to(device)
        model.eval()
        models.append(model)
        if i == 0:
            _config['occlusion_threshold'] = checkpoint['config'][
                'occlusion_threshold']
            _config['occlusion_kernel'] = checkpoint['config'][
                'occlusion_kernel']
        else:
            assert _config['occlusion_threshold'] == checkpoint['config'][
                'occlusion_threshold']
            assert _config['occlusion_kernel'] == checkpoint['config'][
                'occlusion_kernel']

    if _config['save_log']:
        log_file = f'./results_for_paper/log_seq{_config["test_sequence"]}.csv'
        log_file = open(log_file, 'w')
        log_file = csv.writer(log_file)
        header = ['frame']
        for i in range(len(weights) + 1):
            header += [
                f'iter{i}_error_t', f'iter{i}_error_r', f'iter{i}_error_x',
                f'iter{i}_error_y', f'iter{i}_error_z', f'iter{i}_error_r',
                f'iter{i}_error_p', f'iter{i}_error_y'
            ]
        log_file.writerow(header)

    show = _config['show']
    show = True
    errors_r = []
    errors_t = []
    errors_t2 = []
    errors_rpy = []
    all_RTs = []

    prev_tr_error = None
    prev_rot_error = None

    for i in range(len(weights) + 1):
        errors_r.append([])
        errors_t.append([])
        errors_t2.append([])
        errors_rpy.append([])

    for batch_idx, sample in enumerate(TestImgLoader):

        log_string = [str(batch_idx)]

        lidar_input = []
        rgb_input = []
        shape_pad = [0, 0, 0, 0]

        if batch_idx == 0 or not _config['use_prev_output']:
            # Qui dare posizione di input del frame corrente rispetto alla GT
            sample['tr_error'] = sample['tr_error'].cuda()
            sample['rot_error'] = sample['rot_error'].cuda()
        else:
            sample['tr_error'] = prev_tr_error
            sample['rot_error'] = prev_rot_error

        for idx in range(len(sample['rgb'])):

            real_shape = [
                sample['rgb'][idx].shape[1], sample['rgb'][idx].shape[2],
                sample['rgb'][idx].shape[0]
            ]

            # ProjectPointCloud in RT-pose
            sample['point_cloud'][idx] = sample['point_cloud'][idx].cuda()
            pc_rotated = sample['point_cloud'][idx].clone()
            reflectance = None
            if _config['use_reflectance']:
                reflectance = sample['reflectance'][idx].cuda()

            R = mathutils.Quaternion(sample['rot_error'][idx])
            T = mathutils.Vector(sample['tr_error'][idx])

            pc_rotated = rotate_back(pc_rotated, R, T)
            cam_params = sample['calib'][idx].cuda()
            cam_model = CameraModel()
            cam_model.focal_length = cam_params[:2]
            cam_model.principal_point = cam_params[2:]
            uv, depth, points, refl = cam_model.project_pytorch(
                pc_rotated, real_shape, reflectance)
            uv = uv.t().int()
            depth_img = torch.zeros(real_shape[:2],
                                    device='cuda',
                                    dtype=torch.float)
            depth_img += 1000.
            depth_img = visibility.depth_image(uv, depth, depth_img,
                                               uv.shape[0], real_shape[1],
                                               real_shape[0])
            depth_img[depth_img == 1000.] = 0.

            projected_points = torch.zeros_like(depth_img, device='cuda')
            projected_points = visibility.visibility2(
                depth_img, cam_params, projected_points, depth_img.shape[1],
                depth_img.shape[0], _config['occlusion_threshold'],
                _config['occlusion_kernel'])

            if _config['use_reflectance']:
                uv = uv.long()
                indexes = projected_points[uv[:, 1], uv[:, 0]] == depth
                refl_img = torch.zeros(real_shape[:2],
                                       device='cuda',
                                       dtype=torch.float)
                refl_img[uv[indexes, 1], uv[indexes, 0]] = refl[0, indexes]

            projected_points /= 100.
            if not _config['use_reflectance']:
                projected_points = projected_points.unsqueeze(0)
            else:
                projected_points = torch.stack((projected_points, refl_img))

            rgb = sample['rgb'][idx].cuda()

            shape_pad[3] = (img_shape[0] - rgb.shape[1])
            shape_pad[1] = (img_shape[1] - rgb.shape[2])

            rgb = F.pad(rgb, shape_pad)
            projected_points = F.pad(projected_points, shape_pad)

            rgb_input.append(rgb)
            lidar_input.append(projected_points)

        lidar_input = torch.stack(lidar_input)
        rgb_input = torch.stack(rgb_input)
        if show:
            out0 = overlay_imgs(rgb, lidar_input)

            cv2.imshow("INPUT", out0[:, :, [2, 1, 0]])
            cv2.waitKey(1)

            pc_GT = sample['point_cloud'][idx].clone()

            uv, depth, _, refl = cam_model.project_pytorch(pc_GT, real_shape)
            uv = uv.t().int()
            depth_img = torch.zeros(real_shape[:2],
                                    device='cuda',
                                    dtype=torch.float)
            depth_img += 1000.
            depth_img = visibility.depth_image(uv, depth, depth_img,
                                               uv.shape[0], real_shape[1],
                                               real_shape[0])
            depth_img[depth_img == 1000.] = 0.

            projected_points = torch.zeros_like(depth_img, device='cuda')
            projected_points = visibility.visibility2(
                depth_img, cam_params, projected_points, depth_img.shape[1],
                depth_img.shape[0], _config['occlusion_threshold'],
                _config['occlusion_kernel'])
            projected_points /= 100.

            projected_points = F.pad(projected_points, shape_pad)

            lidar_GT = projected_points.unsqueeze(0).unsqueeze(0)
            out1 = overlay_imgs(rgb_input[0], lidar_GT)
            cv2.imshow("GT", out1[:, :, [2, 1, 0]])
            # plt.figure()
            # plt.imshow(out1)
            # if batch_idx == 0:
            #     # import ipdb; ipdb.set_trace()
            #     out2 = overlay_imgs(sample['rgb'][0], lidar_input[:,:,:,1241])
            #     plt.figure()
            #     plt.imshow(out2)
            #     io.imshow(lidar_input[0][0].cpu().numpy(), cmap='jet')
            #     io.show()
        rgb = rgb_input.to(device)
        lidar = lidar_input.to(device)
        target_transl = sample['tr_error'].to(device)
        target_rot = sample['rot_error'].to(device)

        point_cloud = sample['point_cloud'][0].to(device)
        reflectance = None
        if _config['use_reflectance']:
            reflectance = sample['reflectance'][0].to(device)
        camera_model = cam_model

        R = quat2mat(target_rot[0])
        T = tvector2mat(target_transl[0])
        RT1_inv = torch.mm(T, R)
        RT1 = RT1_inv.clone().inverse()

        rotated_point_cloud = rotate_forward(point_cloud, RT1)
        RTs = [RT1]

        T_composed = RT1[:3, 3]
        R_composed = quaternion_from_matrix(RT1)
        errors_t[0].append(T_composed.norm().item())
        errors_t2[0].append(T_composed)
        errors_r[0].append(
            quaternion_distance(
                R_composed.unsqueeze(0),
                torch.tensor([1., 0., 0., 0.],
                             device=R_composed.device).unsqueeze(0),
                R_composed.device))
        # rpy_error = quaternion_to_tait_bryan(R_composed)
        rpy_error = mat2xyzrpy(RT1)[3:]

        rpy_error *= (180.0 / 3.141592)
        errors_rpy[0].append(rpy_error)
        log_string += [
            str(errors_t[0][-1]),
            str(errors_r[0][-1]),
            str(errors_t2[0][-1][0].item()),
            str(errors_t2[0][-1][1].item()),
            str(errors_t2[0][-1][2].item()),
            str(errors_rpy[0][-1][0].item()),
            str(errors_rpy[0][-1][1].item()),
            str(errors_rpy[0][-1][2].item())
        ]

        if batch_idx == 0.:
            print(f'Initial T_erorr: {errors_t[0]}')
            print(f'Initial R_erorr: {errors_r[0]}')
        start = 0

        # Run model
        with torch.no_grad():
            for iteration in range(start, len(weights)):
                # Run the i-th network
                T_predicted, R_predicted = models[iteration](rgb, lidar)
                if _config['rot_transl_separated'] and iteration == 0:
                    T_predicted = torch.tensor([[0., 0., 0.]], device='cuda')
                if _config['rot_transl_separated'] and iteration == 1:
                    R_predicted = torch.tensor([[1., 0., 0., 0.]],
                                               device='cuda')

                # Project the points in the new pose predicted by the i-th network
                R_predicted = quat2mat(R_predicted[0])
                T_predicted = tvector2mat(T_predicted[0])
                RT_predicted = torch.mm(T_predicted, R_predicted)
                RTs.append(torch.mm(RTs[iteration], RT_predicted))

                rotated_point_cloud = rotate_forward(rotated_point_cloud,
                                                     RT_predicted)

                uv2, depth2, _, refl = camera_model.project_pytorch(
                    rotated_point_cloud, real_shape, reflectance)
                uv2 = uv2.t().int()
                depth_img2 = torch.zeros(real_shape[:2], device=device)
                depth_img2 += 1000.
                depth_img2 = visibility.depth_image(uv2, depth2, depth_img2,
                                                    uv2.shape[0],
                                                    real_shape[1],
                                                    real_shape[0])
                depth_img2[depth_img2 == 1000.] = 0.

                out_cuda2 = torch.zeros_like(depth_img2, device=device)
                out_cuda2 = visibility.visibility2(
                    depth_img2, cam_params, out_cuda2, depth_img2.shape[1],
                    depth_img2.shape[0], _config['occlusion_threshold'],
                    _config['occlusion_kernel'])

                if _config['use_reflectance']:
                    uv = uv.long()
                    indexes = projected_points[uv[:, 1], uv[:, 0]] == depth
                    refl_img = torch.zeros(real_shape[:2],
                                           device='cuda',
                                           dtype=torch.float)
                    refl_img[uv[indexes, 1], uv[indexes, 0]] = refl[0, indexes]
                    refl_img = F.pad(refl_img, shape_pad)

                out_cuda2 = F.pad(out_cuda2, shape_pad)

                lidar = out_cuda2.clone()
                lidar /= 100.
                if not _config['use_reflectance']:
                    lidar = lidar.unsqueeze(0)
                else:
                    lidar = torch.stack((lidar, refl_img))
                lidar = lidar.unsqueeze(0)
                if show:
                    out3 = overlay_imgs(rgb[0], lidar, idx=batch_idx)
                    cv2.imshow(f'Iter_{iteration}', out3[:, :, [2, 1, 0]])
                    cv2.waitKey(1)
                    # if iter == 1:
                    # plt.figure()
                    # plt.imshow(out3)
                    # io.imshow(lidar.cpu().numpy()[0,0], cmap='jet')
                    # io.show()

                T_composed = RTs[iteration + 1][:3, 3]
                R_composed = quaternion_from_matrix(RTs[iteration + 1])
                errors_t[iteration + 1].append(T_composed.norm().item())
                errors_t2[iteration + 1].append(T_composed)
                errors_r[iteration + 1].append(
                    quaternion_distance(
                        R_composed.unsqueeze(0),
                        torch.tensor([1., 0., 0., 0.],
                                     device=R_composed.device).unsqueeze(0),
                        R_composed.device))

                # rpy_error = quaternion_to_tait_bryan(R_composed)
                rpy_error = mat2xyzrpy(RTs[iteration + 1])[3:]
                rpy_error *= (180.0 / 3.141592)
                errors_rpy[iteration + 1].append(rpy_error)
                log_string += [
                    str(errors_t[iteration + 1][-1]),
                    str(errors_r[iteration + 1][-1]),
                    str(errors_t2[iteration + 1][-1][0].item()),
                    str(errors_t2[iteration + 1][-1][1].item()),
                    str(errors_t2[iteration + 1][-1][2].item()),
                    str(errors_rpy[iteration + 1][-1][0].item()),
                    str(errors_rpy[iteration + 1][-1][1].item()),
                    str(errors_rpy[iteration + 1][-1][2].item())
                ]

        all_RTs.append(RTs[-1])
        prev_RT = RTs[-1].inverse()
        prev_tr_error = prev_RT[:3, 3].unsqueeze(0)
        prev_rot_error = quaternion_from_matrix(prev_RT).unsqueeze(0)
        # Qui prev_rt è quanto si discosta l'output della rete rispetto alla GT

        if _config['save_log']:
            log_file.writerow(log_string)

    if _config['save_log']:
        log_file.close()
    print("Iterative refinement: ")
    for i in range(len(weights) + 1):
        errors_r[i] = torch.tensor(errors_r[i]) * (180.0 / 3.141592)
        errors_t[i] = torch.tensor(errors_t[i]) * 100
        print(
            f"Iteration {i}: \tMean Translation Error: {errors_t[i].mean():.4f} cm "
            f"     Mean Rotation Error: {errors_r[i].mean():.4f} °")
        print(
            f"Iteration {i}: \tMedian Translation Error: {errors_t[i].median():.4f} cm "
            f"     Median Rotation Error: {errors_r[i].median():.4f} °\n")
    print("-------------------------------------------------------")
    print("Timings:")
    for i in range(len(errors_t2)):
        errors_t2[i] = torch.stack(errors_t2[i])
        errors_rpy[i] = torch.stack(errors_rpy[i])
    plt.plot(errors_t2[-1][:, 0].cpu().numpy())
    plt.show()
    plt.plot(errors_t2[-1][:, 1].cpu().numpy())
    plt.show()
    plt.plot(errors_t2[-1][:, 2].cpu().numpy())
    plt.show()

    if _config["save_name"] is not None:
        torch.save(
            torch.stack(errors_t).cpu().numpy(),
            f'./results_for_paper/{_config["save_name"]}_errors_t')
        torch.save(
            torch.stack(errors_r).cpu().numpy(),
            f'./results_for_paper/{_config["save_name"]}_errors_r')
        torch.save(
            torch.stack(errors_t2).cpu().numpy(),
            f'./results_for_paper/{_config["save_name"]}_errors_t2')
        torch.save(
            torch.stack(errors_rpy).cpu().numpy(),
            f'./results_for_paper/{_config["save_name"]}_errors_rpy')

    print("End!")
Exemplo n.º 5
0
    # Making save
    # fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    # vw = cv2.VideoWriter('laserrange_image.mp4', fourcc, 30, (1280, 960))

    lidar = re.search(
        '(lms_front|lms_rear|ldmrs|velodyne_left|velodyne_right)',
        my_params.laser_dir).group(0)
    lidar_timestamps_path = os.path.join(my_params.dataset_patch,
                                         lidar + '.timestamps')

    camera = re.search('(stereo|mono_(left|right|rear))',
                       my_params.image_dir).group(0)
    camera_timestamps_path = os.path.join(
        os.path.join(my_params.dataset_patch, camera + '.timestamps'))

    model = CameraModel(my_params.model_dir, my_params.image_dir)
    poses_file = my_params.poses_file
    extrinsics_dir = my_params.extrinsics_dir

    with open(
            os.path.join(
                os.path.join(my_params.extrinsics_dir,
                             camera + '.txt'))) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_camera_vehicle = build_se3_transform(extrinsics)
    G_camera_posesource = None

    # VO frame and vehicle frame are the same
    G_camera_posesource = G_camera_vehicle

    timestamp = 0
Exemplo n.º 6
0
def main():
    args = get_arguments()

    camera = re.search('(stereo|mono_(left|right|rear))', args.dir).group(0)

    timestamps_path = os.path.join(
        os.path.join(args.dir, os.pardir, camera + '.timestamps'))
    if not os.path.isfile(timestamps_path):
        timestamps_path = os.path.join(args.dir, os.pardir, os.pardir,
                                       camera + '.timestamps')
        if not os.path.isfile(timestamps_path):
            raise IOError("Could not find timestamps file")

    model = None
    if args.models_dir:
        model = CameraModel(args.models_dir, args.dir)

    output_dir = os.curdir
    if args.output_dir:
        output_dir = args.output_dir
    if not os.path.isdir(output_dir):
        raise IOError(output_dir + "is not an existing folder")

    result_list = []
    count = 0
    dictionary = {}
    t_records = []
    p_records = []
    angles_records = []
    intrinsic_matrix = None

    with open(args.poses_file) as vo_file:
        vo_reader = csv.reader(vo_file)
        headers = next(vo_file)
        for row in vo_reader:
            src_image_name = row[0]
            dst_image_name = row[1]
            src_image_filename = os.path.join(args.dir,
                                              src_image_name + '.png')
            dst_image_filename = os.path.join(args.dir,
                                              dst_image_name + '.png')
            if not os.path.isfile(src_image_filename) or not os.path.isfile(
                    dst_image_filename):
                continue
            if dst_image_name not in dictionary:
                img, orig_resolution = process_image(
                    load_image(dst_image_filename, model), args.crop,
                    args.scale)
                dictionary[dst_image_name] = count
                count = count + 1
                result_list.append(list(img))
            if src_image_name not in dictionary:
                img, orig_resolution = process_image(
                    load_image(src_image_filename, model), args.crop,
                    args.scale)
                dictionary[src_image_name] = count
                count = count + 1
                result_list.append(list(img))

            focal_length, principal_point = get_intrinsics_parameters(
                model.get_focal_length(), model.get_principal_point(),
                orig_resolution, args.crop, args.scale)
            src_image_idx = dictionary[src_image_name]
            dst_image_idx = dictionary[dst_image_name]
            xyzrpy = [float(v) for v in row[2:8]]
            rel_pose = build_se3_transform(xyzrpy)
            t_matrix = rel_pose[0:3]  # 3x4 matrix
            intrinsic_matrix = build_intrinsic_matrix(focal_length,
                                                      principal_point)
            p_matrix = intrinsic_matrix * t_matrix
            t_records.append((t_matrix, src_image_idx, dst_image_idx))
            p_records.append((p_matrix, src_image_idx, dst_image_idx))
            angles_records.append((xyzrpy, src_image_idx, dst_image_idx))

    transf = np.array(t_records,
                      dtype=[('T', ('float64', (3, 4))), ('src_idx', 'int32'),
                             ('dst_idx', 'int32')])
    proy = np.array(p_records,
                    dtype=[('P', ('float64', (3, 4))), ('src_idx', 'int32'),
                           ('dst_idx', 'int32')])
    angles = np.array(angles_records,
                      dtype=[('ang', ('float64', 6)), ('src_idx', 'int32'),
                             ('dst_idx', 'int32')])
    # Solo lo guardo una vez porque es constante para todo el dataset (o deberia serlo)
    if intrinsic_matrix is not None:
        save_txt(os.path.join(output_dir, "intrinsic_matrix"),
                 intrinsic_matrix)
        save_txt(os.path.join(output_dir, "intrinsic_parameters"),
                 [focal_length, principal_point])
    #path = os.path.normpath(args.dir)
    #folders = path.split(os.sep)
    #compressed_file_path = os.path.join(output_dir, folders[-3])
    result = list_to_array(result_list)
    save_txt(os.path.join(output_dir, 'images_shape'), result.shape, fmt='%i')
    print result.shape
    compressed_file_path = os.path.join(output_dir, 'images')
    savez_compressed(compressed_file_path, result)
    savez_compressed(os.path.join(output_dir, 't'), transf)
    savez_compressed(os.path.join(output_dir, 'p'), proy)
    savez_compressed(os.path.join(output_dir, 'angles'), angles)
Exemplo n.º 7
0
def main(_config, _run, seed):
    global EPOCH
    print(_config['loss'])

    if _config['test_sequence'] is None:
        raise TypeError('test_sequences cannot be None')
    else:
        _config['test_sequence'] = f"{_config['test_sequence']:02d}"
        print("Test Sequence: ", _config['test_sequence'])
        dataset_class = DatasetVisibilityKittiSingle
    occlusion_threshold = _config['occlusion_threshold']
    img_shape = (384, 1280)
    _config["savemodel"] = os.path.join(_config["savemodel"], _config['dataset'])

    maps_folder = 'local_maps'
    if _config['maps_folder'] is not None:
        maps_folder = _config['maps_folder']
    dataset = dataset_class(_config['data_folder'], max_r=_config['max_r'], max_t=_config['max_t'],
                            split='train', use_reflectance=_config['use_reflectance'], maps_folder=maps_folder,
                            test_sequence=_config['test_sequence'])
    dataset_val = dataset_class(_config['data_folder'], max_r=_config['max_r'], max_t=_config['max_t'],
                                split='test', use_reflectance=_config['use_reflectance'], maps_folder=maps_folder,
                                test_sequence=_config['test_sequence'])
    _config["savemodel"] = os.path.join(_config["savemodel"], _config['test_sequence'])
    if not os.path.exists(_config["savemodel"]):
        os.mkdir(_config["savemodel"])

    np.random.seed(seed)
    torch.random.manual_seed(seed)

    def init_fn(x): return _init_fn(x, seed)

    dataset_size = len(dataset)

    # Training and test set creation
    num_worker = _config['num_worker']
    batch_size = _config['batch_size']
    TrainImgLoader = torch.utils.data.DataLoader(dataset=dataset,
                                                 shuffle=True,
                                                 batch_size=batch_size,
                                                 num_workers=num_worker,
                                                 worker_init_fn=init_fn,
                                                 collate_fn=merge_inputs,
                                                 drop_last=False,
                                                 pin_memory=True)

    TestImgLoader = torch.utils.data.DataLoader(dataset=dataset_val,
                                                shuffle=False,
                                                batch_size=batch_size,
                                                num_workers=num_worker,
                                                worker_init_fn=init_fn,
                                                collate_fn=merge_inputs,
                                                drop_last=False,
                                                pin_memory=True)

    print(len(TrainImgLoader))
    print(len(TestImgLoader))

    if _config['loss'] == 'simple':
        loss_fn = ProposedLoss(_config['rescale_transl'], _config['rescale_rot'])
    elif _config['loss'] == 'geometric':
        loss_fn = GeometricLoss()
        loss_fn = loss_fn.to(device)
    elif _config['loss'] == 'points_distance':
        loss_fn = DistancePoints3D()
    elif _config['loss'] == 'L1':
        loss_fn = L1Loss(_config['rescale_transl'], _config['rescale_rot'])
    else:
        raise ValueError("Unknown Loss Function")

    #runs = datetime.now().strftime('%b%d_%H-%M-%S') + "/"
    #train_writer = SummaryWriter('./logs/' + runs)
    #ex.info["tensorflow"] = {}
    #ex.info["tensorflow"]["logdirs"] = ['./logs/' + runs]

    if _config['network'].startswith('PWC'):
        feat = 1
        md = 4
        split = _config['network'].split('_')
        for item in split[1:]:
            if item.startswith('f'):
                feat = int(item[-1])
            elif item.startswith('md'):
                md = int(item[2:])
        assert 0 < feat < 7, "Feature Number from PWC have to be between 1 and 6"
        assert 0 < md, "md must be positive"
        model = CMRNet(img_shape, use_feat_from=feat, md=md,
                       use_reflectance=_config['use_reflectance'], dropout=_config['dropout'])
    else:
        raise TypeError("Network unknown")
    if _config['weights'] is not None:
        print(f"Loading weights from {_config['weights']}")
        checkpoint = torch.load(_config['weights'], map_location='cpu')
        saved_state_dict = checkpoint['state_dict']
        model.load_state_dict(saved_state_dict)
    model = model.to(device)

    print(dataset_size)
    print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in model.parameters()])))

    parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
    if _config['loss'] == 'geometric':
        parameters += list(loss_fn.parameters())
    if _config['optimizer'] == 'adam':
        optimizer = optim.Adam(parameters, lr=_config['BASE_LEARNING_RATE'], weight_decay=5e-6)
        # Probably this scheduler is not used
        scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 50, 70], gamma=0.5)
    else:
        optimizer = optim.SGD(parameters, lr=_config['BASE_LEARNING_RATE'], momentum=0.9,
                              weight_decay=5e-6, nesterov=True)

    starting_epoch = 0
    if _config['weights'] is not None and _config['resume']:
        checkpoint = torch.load(_config['weights'], map_location='cpu')
        opt_state_dict = checkpoint['optimizer']
        optimizer.load_state_dict(opt_state_dict)
        starting_epoch = checkpoint['epoch']

    # Allow mixed-precision if needed
    # model, optimizer = apex.amp.initialize(model, optimizer, opt_level=_config["precision"])

    start_full_time = time.time()
    BEST_VAL_LOSS = 10000.
    old_save_filename = None

    total_iter = 0
    for epoch in range(starting_epoch, _config['epochs'] + 1):
        EPOCH = epoch
        print('This is %d-th epoch' % epoch)
        epoch_start_time = time.time()
        total_train_loss = 0
        local_loss = 0.
        if _config['optimizer'] != 'adam':
            _run.log_scalar("LR", _config['BASE_LEARNING_RATE'] *
                            math.exp((1 - epoch) * 4e-2), epoch)
            for param_group in optimizer.param_groups:
                param_group['lr'] = _config['BASE_LEARNING_RATE'] * \
                                    math.exp((1 - epoch) * 4e-2)
        else:
            #scheduler.step(epoch%100)
            _run.log_scalar("LR", scheduler.get_lr()[0])


        ## Training ##
        time_for_50ep = time.time()
        for batch_idx, sample in enumerate(TrainImgLoader):

            #print(f'batch {batch_idx+1}/{len(TrainImgLoader)}', end='\r')
            start_time = time.time()
            lidar_input = []
            rgb_input = []

            sample['tr_error'] = sample['tr_error'].cuda()
            sample['rot_error'] = sample['rot_error'].cuda()

            start_preprocess = time.time()
            for idx in range(len(sample['rgb'])):
                # ProjectPointCloud in RT-pose

                real_shape = [sample['rgb'][idx].shape[1], sample['rgb'][idx].shape[2], sample['rgb'][idx].shape[0]]

                sample['point_cloud'][idx] = sample['point_cloud'][idx].cuda()
                pc_rotated = sample['point_cloud'][idx].clone()
                reflectance = None
                if _config['use_reflectance']:
                    reflectance = sample['reflectance'][idx].cuda()

                R = mathutils.Quaternion(sample['rot_error'][idx]).to_matrix()
                R.resize_4x4()
                T = mathutils.Matrix.Translation(sample['tr_error'][idx])
                RT = T * R

                pc_rotated = rotate_back(pc_rotated, RT)

                if _config['max_depth'] < 100.:
                    pc_rotated = pc_rotated[:, pc_rotated[0, :] < _config['max_depth']].clone()

                cam_params = sample['calib'][idx].cuda()
                cam_model = CameraModel()
                cam_model.focal_length = cam_params[:2]
                cam_model.principal_point = cam_params[2:]
                uv, depth, _, refl = cam_model.project_pytorch(pc_rotated, real_shape, reflectance)
                uv = uv.t().int()
                depth_img = torch.zeros(real_shape[:2], device='cuda', dtype=torch.float)
                depth_img += 1000.
                depth_img = visibility.depth_image(uv, depth, depth_img, uv.shape[0], real_shape[1], real_shape[0])
                depth_img[depth_img == 1000.] = 0.

                depth_img_no_occlusion = torch.zeros_like(depth_img, device='cuda')
                depth_img_no_occlusion = visibility.visibility2(depth_img, cam_params, depth_img_no_occlusion,
                                                                depth_img.shape[1], depth_img.shape[0],
                                                                occlusion_threshold, _config['occlusion_kernel'])

                if _config['use_reflectance']:
                    # This need to be checked
                    uv = uv.long()
                    indexes = depth_img_no_occlusion[uv[:,1], uv[:,0]] == depth
                    refl_img = torch.zeros(real_shape[:2], device='cuda', dtype=torch.float)
                    refl_img[uv[indexes,1], uv[indexes,0]] = refl[0, indexes]

                depth_img_no_occlusion /= _config['max_depth']
                if not _config['use_reflectance']:
                    depth_img_no_occlusion = depth_img_no_occlusion.unsqueeze(0)
                else:
                    depth_img_no_occlusion = torch.stack((depth_img_no_occlusion, refl_img))

                # PAD ONLY ON RIGHT AND BOTTOM SIDE
                rgb = sample['rgb'][idx].cuda()
                shape_pad = [0, 0, 0, 0]

                shape_pad[3] = (img_shape[0] - rgb.shape[1])  # // 2
                shape_pad[1] = (img_shape[1] - rgb.shape[2])  # // 2 + 1

                rgb = F.pad(rgb, shape_pad)
                depth_img_no_occlusion = F.pad(depth_img_no_occlusion, shape_pad)

                rgb_input.append(rgb)
                lidar_input.append(depth_img_no_occlusion)

            lidar_input = torch.stack(lidar_input)
            rgb_input = torch.stack(rgb_input)
            end_preprocess = time.time()
            loss = train(model, optimizer, rgb_input, lidar_input, sample['tr_error'],
                         sample['rot_error'], loss_fn, sample['point_cloud'])

            if loss != loss:
                raise ValueError("Loss is NaN")

            #train_writer.add_scalar("Loss", loss, total_iter)
            local_loss += loss

            if batch_idx % 50 == 0 and batch_idx != 0:

                print(f'Iter {batch_idx}/{len(TrainImgLoader)} training loss = {local_loss/50:.3f}, '
                      f'time = {(time.time() - start_time)/lidar_input.shape[0]:.4f}, '
                      #f'time_preprocess = {(end_preprocess-start_preprocess)/lidar_input.shape[0]:.4f}, '
                      f'time for 50 iter: {time.time()-time_for_50ep:.4f}')
                time_for_50ep = time.time()
                _run.log_scalar("Loss", local_loss/50, total_iter)
                local_loss = 0.
            total_train_loss += loss * len(sample['rgb'])
            total_iter += len(sample['rgb'])

        print("------------------------------------")
        print('epoch %d total training loss = %.3f' % (epoch, total_train_loss / len(dataset)))
        print('Total epoch time = %.2f' % (time.time() - epoch_start_time))
        print("------------------------------------")
        _run.log_scalar("Total training loss", total_train_loss / len(dataset), epoch)

        ## Test ##
        total_test_loss = 0.
        total_test_t = 0.
        total_test_r = 0.

        local_loss = 0.0
        for batch_idx, sample in enumerate(TestImgLoader):
            # print(f'batch {batch_idx + 1}/{len(TestImgLoader)}', end='\r')
            start_time = time.time()
            lidar_input = []
            rgb_input = []

            #sample['rgb'] = sample['rgb'].cuda()
            sample['tr_error'] = sample['tr_error'].cuda()
            sample['rot_error'] = sample['rot_error'].cuda()

            for idx in range(len(sample['rgb'])):
                # ProjectPointCloud in RT-pose
                real_shape = [sample['rgb'][idx].shape[1], sample['rgb'][idx].shape[2], sample['rgb'][idx].shape[0]]

                sample['point_cloud'][idx] = sample['point_cloud'][idx].cuda()
                pc_rotated = sample['point_cloud'][idx].clone()
                reflectance = None
                if _config['use_reflectance']:
                    reflectance = sample['reflectance'][idx].cuda()

                R = mathutils.Quaternion(sample['rot_error'][idx]).to_matrix()
                R.resize_4x4()
                T = mathutils.Matrix.Translation(sample['tr_error'][idx])
                RT = T * R

                pc_rotated = rotate_back(pc_rotated, RT)

                if _config['max_depth'] < 100.:
                    pc_rotated = pc_rotated[:, pc_rotated[0, :] < _config['max_depth']].clone()

                cam_params = sample['calib'][idx].cuda()
                cam_model = CameraModel()
                cam_model.focal_length = cam_params[:2]
                cam_model.principal_point = cam_params[2:]
                uv, depth, _, refl = cam_model.project_pytorch(pc_rotated, real_shape, reflectance)
                uv = uv.t().int()
                depth_img = torch.zeros(real_shape[:2], device='cuda', dtype=torch.float)
                depth_img += 1000.
                depth_img = visibility.depth_image(uv, depth, depth_img, uv.shape[0], real_shape[1], real_shape[0])
                depth_img[depth_img == 1000.] = 0.

                depth_img_no_occlusion = torch.zeros_like(depth_img, device='cuda')
                depth_img_no_occlusion = visibility.visibility2(depth_img,
                                                                cam_params, depth_img_no_occlusion,
                                                                depth_img.shape[1], depth_img.shape[0],
                                                                occlusion_threshold, _config['occlusion_kernel'])

                if _config['use_reflectance']:
                    uv = uv.long()
                    indexes = depth_img_no_occlusion[uv[:,1], uv[:,0]] == depth
                    refl_img = torch.zeros(real_shape[:2], device='cuda', dtype=torch.float)
                    refl_img[uv[indexes,1], uv[indexes,0]] = refl[0, indexes]

                depth_img_no_occlusion /= _config['max_depth']
                if not _config['use_reflectance']:
                    depth_img_no_occlusion = depth_img_no_occlusion.unsqueeze(0)
                else:
                    depth_img_no_occlusion = torch.stack((depth_img_no_occlusion, refl_img))

                rgb = sample['rgb'][idx].cuda()
                shape_pad = [0, 0, 0, 0]

                shape_pad[3] = (img_shape[0] - rgb.shape[1])
                shape_pad[1] = (img_shape[1] - rgb.shape[2])

                rgb = F.pad(rgb, shape_pad)
                depth_img_no_occlusion = F.pad(depth_img_no_occlusion, shape_pad)

                rgb_input.append(rgb)
                lidar_input.append(depth_img_no_occlusion)

            lidar_input = torch.stack(lidar_input)
            rgb_input = torch.stack(rgb_input)

            loss, trasl_e, rot_e = test(model, rgb_input, lidar_input, sample['tr_error'],
                                        sample['rot_error'], loss_fn, dataset_val.model, sample['point_cloud'])

            if loss != loss:
                raise ValueError("Loss is NaN")

            total_test_t += trasl_e
            total_test_r += rot_e
            local_loss += loss

            if batch_idx % 50 == 0 and batch_idx != 0:
                print('Iter %d test loss = %.3f , time = %.2f' % (batch_idx, local_loss/50.,
                                                                  (time.time() - start_time)/lidar_input.shape[0]))
                local_loss = 0.0
            total_test_loss += loss * len(sample['rgb'])

        print("------------------------------------")
        print('total test loss = %.3f' % (total_test_loss / len(dataset_val)))
        print(f'total traslation error: {total_test_t / len(dataset_val)} cm')
        print(f'total rotation error: {total_test_r / len(dataset_val)} °')
        print("------------------------------------")

        #train_writer.add_scalar("Val_Loss", total_test_loss / len(dataset_val), epoch)
        #train_writer.add_scalar("Val_t_error", total_test_t / len(dataset_val), epoch)
        #train_writer.add_scalar("Val_r_error", total_test_r / len(dataset_val), epoch)
        _run.log_scalar("Val_Loss", total_test_loss / len(dataset_val), epoch)
        _run.log_scalar("Val_t_error", total_test_t / len(dataset_val), epoch)
        _run.log_scalar("Val_r_error", total_test_r / len(dataset_val), epoch)

        # SAVE
        val_loss = total_test_loss / len(dataset_val)
        if val_loss < BEST_VAL_LOSS:
            BEST_VAL_LOSS = val_loss
            #_run.result = BEST_VAL_LOSS
            if _config['rescale_transl'] > 0:
                _run.result = total_test_t / len(dataset_val)
            else:
                _run.result = total_test_r / len(dataset_val)
            savefilename = f'{_config["savemodel"]}/checkpoint_r{_config["max_r"]:.2f}_t{_config["max_t"]:.2f}_e{epoch}_{val_loss:.3f}.tar'
            torch.save({
                'config': _config,
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'train_loss': total_train_loss / len(dataset),
                'test_loss': total_test_loss / len(dataset_val),
            }, savefilename)
            print(f'Model saved as {savefilename}')
            if old_save_filename is not None:
                if os.path.exists(old_save_filename):
                    os.remove(old_save_filename)
            old_save_filename = savefilename

    print('full training time = %.2f HR' % ((time.time() - start_full_time) / 3600))
    return _run.result
Exemplo n.º 8
0
    def __init__(self,
                 dataset_dir,
                 transform=None,
                 augmentation=False,
                 maps_folder='local_maps',
                 use_reflectance=False,
                 max_t=2.,
                 max_r=10.,
                 split='test',
                 device='cpu',
                 test_sequence='00'):
        super(DatasetVisibilityKittiSingle, self).__init__()
        self.use_reflectance = use_reflectance
        self.maps_folder = maps_folder
        self.device = device
        self.max_r = max_r
        self.max_t = max_t
        self.augmentation = augmentation
        self.root_dir = dataset_dir
        self.transform = transform
        self.split = split
        self.GTs_R = {}
        self.GTs_T = {}

        self.all_files = []
        self.model = CameraModel()
        self.model.focal_length = [7.18856e+02, 7.18856e+02]
        self.model.principal_point = [6.071928e+02, 1.852157e+02]
        # for dir in ['00', '03', '05', '06', '07', '08', '09']:
        for dir in ['00']:
            self.GTs_R[dir] = []
            self.GTs_T[dir] = []
            df_locations = pd.read_csv(os.path.join(dataset_dir, dir,
                                                    'poses.csv'),
                                       sep=',',
                                       dtype={'timestamp': str})
            for index, row in df_locations.iterrows():
                if not os.path.exists(
                        os.path.join(dataset_dir, dir, maps_folder,
                                     str(row['timestamp']) + '.h5')):
                    continue
                if not os.path.exists(
                        os.path.join(dataset_dir, dir, 'image_2',
                                     str(row['timestamp']) + '.png')):
                    continue
                if dir == test_sequence and split.startswith('test'):
                    self.all_files.append(
                        os.path.join(dir, str(row['timestamp'])))
                elif (not dir == test_sequence) and split == 'train':
                    self.all_files.append(
                        os.path.join(dir, str(row['timestamp'])))
                GT_R = np.array([row['qw'], row['qx'], row['qy'], row['qz']])
                GT_T = np.array([row['x'], row['y'], row['z']])
                self.GTs_R[dir].append(GT_R)
                self.GTs_T[dir].append(GT_T)

        self.test_RT = []
        if split == 'test':
            test_RT_file = os.path.join(
                dataset_dir,
                f'test_RT_seq{test_sequence}_{max_r:.2f}_{max_t:.2f}.csv')
            if os.path.exists(test_RT_file):
                print(f'TEST SET: Using this file: {test_RT_file}')
                df_test_RT = pd.read_csv(test_RT_file, sep=',')
                for index, row in df_test_RT.iterrows():
                    self.test_RT.append(list(row))
            else:
                print(f'TEST SET - Not found: {test_RT_file}')
                print("Generating a new one")
                test_RT_file = open(test_RT_file, 'w')
                test_RT_file = csv.writer(test_RT_file, delimiter=',')
                test_RT_file.writerow(
                    ['id', 'tx', 'ty', 'tz', 'rx', 'ry', 'rz'])
                for i in range(len(self.all_files)):
                    rotz = np.random.uniform(-max_r,
                                             max_r) * (3.141592 / 180.0)
                    roty = np.random.uniform(-max_r,
                                             max_r) * (3.141592 / 180.0)
                    rotx = np.random.uniform(-max_r,
                                             max_r) * (3.141592 / 180.0)
                    transl_x = np.random.uniform(-max_t, max_t)
                    transl_y = np.random.uniform(-max_t, max_t)
                    transl_z = np.random.uniform(-max_t, min(max_t, 1.))
                    test_RT_file.writerow(
                        [i, transl_x, transl_y, transl_z, rotx, roty, rotz])
                    self.test_RT.append(
                        [i, transl_x, transl_y, transl_z, rotx, roty, rotz])

            assert len(self.test_RT) == len(
                self.all_files), "Something wrong with test RTs"
Exemplo n.º 9
0
def main():
    #read the pointcloud
    pointcloud_filename = "/data/lyh/lab/pcaifeat_RobotCar_v3_1/1400505794141322.txt"
    pointcloud = np.loadtxt(pointcloud_filename, delimiter=' ')
    pointcloud = np.hstack([pointcloud, np.ones((pointcloud.shape[0], 1))])
    '''
	for i in range(pointcloud.shape[0]):
		print(" %.3f %.3f %.3f %.3f"%(pointcloud[i,0],pointcloud[i,1],pointcloud[i,2],pointcloud[i,3]))
	exit()
	'''

    #load the camera model
    models_dir = "/data/lyh/lab/robotcar-dataset-sdk/models/"
    model = CameraModel(models_dir, "mono_left")

    #load the camera global pose
    imgpos_path = "/data/lyh/lab/pcaifeat_RobotCar_v3_1/1400505794141322_imgpos.txt"
    print(imgpos_path)
    imgpos = {}
    with open(imgpos_path) as imgpos_file:
        for line in imgpos_file:
            pos = [x for x in line.split(' ')]
            for i in range(len(pos) - 2):
                pos[i + 1] = float(pos[i + 1])
            imgpos[pos[0]] = np.reshape(np.array(pos[1:-1]), [4, 4])
    '''
	for key in imgpos.keys():
		print(key)
		print(imgpos[key])
	exit()
	'''

    #read the camera and ins extrinsics
    extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/mono_left.txt"
    print(extrinsics_path)
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_camera_vehicle = build_se3_transform(extrinsics)
    print(G_camera_vehicle)

    extrinsics_path = "/data/lyh/lab/robotcar-dataset-sdk/extrinsics/ins.txt"
    print(extrinsics_path)
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_ins_vehicle = build_se3_transform(extrinsics)
    print(G_ins_vehicle)
    G_camera_posesource = G_camera_vehicle * G_ins_vehicle

    #translate pointcloud to image coordinate
    pointcloud = np.dot(np.linalg.inv(imgpos["mono_left"]), pointcloud.T)
    pointcloud = np.dot(G_camera_posesource, pointcloud)

    #project pointcloud to image
    image_path = "/data/lyh/lab/pcaifeat_RobotCar_v3_1/1400505794141322_mono_left.png"
    #image = load_image(image_path, model)
    image = load_image(image_path)

    uv = model.project(pointcloud, [1024, 1024])

    lut = model.bilinear_lut[:, 1::-1].T.reshape((2, 1024, 1024))
    u = map_coordinates(lut[0, :, :], uv, order=1)
    v = map_coordinates(lut[1, :, :], uv, order=1)
    uv = np.array([u, v])
    print(uv.shape)
    transform_matrix = np.zeros([64, 4096])
    for i in range(uv.shape[1]):
        if uv[0, i] == 0 and uv[1, i] == 0:
            continue
        cur_uv = np.rint(uv[:, i] / 128)
        row = cur_uv[1] * 8 + cur_uv[0]
        transform_matrix[int(row), i] = 1

    aa = np.sum(transform_matrix, 1).reshape([8, 8])
    print(np.sum(aa))
    plt.figure(1)
    plt.imshow(aa)
    #plt.show()

    #exit()
    plt.figure(2)
    #plt.imshow(image)
    #uv = np.rint(uv/32)
    plt.scatter(np.ravel(uv[0, :]),
                np.ravel(uv[1, :]),
                s=2,
                edgecolors='none',
                cmap='jet')
    plt.xlim(0, 1024)
    plt.ylim(1024, 0)
    plt.xticks([])
    plt.yticks([])
    plt.show()
Exemplo n.º 10
0
def iterate_images(root_dir, camera_description, models_dir, full_size=True):
    if 'stereo' in camera_description:
        timestamps_path = os.path.join(root_dir, 'stereo.timestamps')
    else:
        timestamps_path = os.path.join(root_dir,
                                       camera_description + '.timestamps')

    if not os.path.isfile(timestamps_path):
        raise IOError("Could not find timestamps file")

    images_dir = os.path.join(root_dir, camera_description)
    model = CameraModel(models_dir, images_dir)

    current_chunk = 0
    timestamps_file = open(timestamps_path)
    i = 0
    for line in timestamps_file:
        tokens = line.split()
        if len(tokens) < 2:
            break

        timestamp_us = long(tokens[0])
        timestamp_ns = timestamp_us * long(1000)

        if full_size:
            filename_processed = os.path.join(images_dir,
                                              tokens[0] + '_undistorted.png')
        else:
            filename_processed = os.path.join(
                images_dir, tokens[0] + '_ds2_undistorted.png')

        if os.path.exists(filename_processed):
            im = cv2.imread(filename_processed, cv2.IMREAD_GRAYSCALE)

        else:
            datetime = dt.utcfromtimestamp(int(tokens[0]) / 1000000)
            chunk = int(tokens[1])

            filename = os.path.join(images_dir, tokens[0] + '.png')
            if not os.path.isfile(filename):
                if chunk != current_chunk:
                    print("Chunk " + str(chunk) + " not found")
                    current_chunk = chunk
                continue

            current_chunk = chunk

            img = load_image(filename, model)

            im_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            filename_gray = os.path.join(images_dir,
                                         tokens[0] + '_undistorted.png')
            cv2.imwrite(filename_gray, im_gray)

            im_gray_downscaled = cv2.resize(
                im_gray, (im_gray.shape[1] / 2, im_gray.shape[0] / 2))
            cv2.imwrite(filename_processed, im_gray_downscaled)

            if full_size:
                im = im_gray
            else:
                im = im_gray_downscaled

        if i % 100 == 0:
            print "At i ", i

        i += 1

        yield timestamp_ns, im
Exemplo n.º 11
0
def generate_pointcloud():

    WRITE_IMAGE = 1
    DRAW_MAP = 1

    # Data input
    image_dir = my_params.image_dir
    ldrms_dir = my_params.laser_dir
    lmsfront_dir = my_params.lmsfront_dir
    lmsrear_dir  = my_params.lmsrear_dir
    poses_file = my_params.poses_file
    models_dir = my_params.model_dir
    extrinsics_dir = my_params.extrinsics_dir

    output_img_dir   = my_params.output_dir2 + 'pl_img_'   + my_params.dataset_no + '//'
    output_ldrms_dir = my_params.output_dir2 + 'pl_ldrms_' + my_params.dataset_no + '//'
    output_front_dir = my_params.output_dir2 + 'pl_front_' + my_params.dataset_no + '//'
    output_rear_dir  = my_params.output_dir2 + 'pl_rear_'  + my_params.dataset_no + '//'
    
    map_ldrms_dir = my_params.output_dir2 + 'map_ldrms_' + my_params.dataset_no + '//'
    map_front_dir = my_params.output_dir2 + 'map_front_' + my_params.dataset_no + '//'
    map_rear_dir  = my_params.output_dir2 + 'map_rear_'  + my_params.dataset_no + '//'


    model = CameraModel(models_dir, image_dir)
    extrinsics_path = os.path.join(extrinsics_dir, model.camera + '.txt')
    with open(extrinsics_path) as extrinsics_file:
        extrinsics = [float(x) for x in next(extrinsics_file).split(' ')]
    G_camera_vehicle = build_se3_transform(extrinsics)
    G_camera_posesource = None

    poses_type = re.search('(vo|ins|rtk)\.csv', poses_file).group(1)
    if poses_type in ['ins', 'rtk']:
        with open(os.path.join(extrinsics_dir, 'ins.txt')) as extrinsics_file:
            extrinsics = next(extrinsics_file)
            G_camera_posesource = G_camera_vehicle * build_se3_transform([float(x) for x in extrinsics.split(' ')])
    else:
        # VO frame and vehicle frame are the same
        G_camera_posesource = G_camera_vehicle

    image_size = (960,1280,3)

    timestamps_path = os.path.join(my_params.dataset_patch + model.camera + '.timestamps')
    timestamp = 0
    plt.figure()
    with open(timestamps_path) as timestamps_file:
        for i, line in enumerate(timestamps_file):
            
            if i < 799:
                # print('open image index', i)
                # timestamp = int(line.split(' ')[0])
                # break
                continue

            timestamp = int(line.split(' ')[0])
            start_time = timestamp - 1e6
            
            frame_path = os.path.join(my_params.reprocess_image_dir + '//' + str(timestamp) + '.png')
            frame = cv2.imread(frame_path) 
            
            print('process image ',i,'-',timestamp)

            if i < 4: 
                if(WRITE_IMAGE):
                    savefile_name = output_dir + '\\lms_front_img_' + my_params.dataset_no + '\\' + str(timestamp) + '.png'
                    cv2.imwrite(savefile_name, frame)           
                continue

            pl_ldrms = np.zeros((960,1280),dtype=int)
            pl_front = np.zeros((960,1280),dtype=int)

            

            # LDRMS
            ldrms_pointcloud, _ = build_pointcloud(ldrms_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 2e6, timestamp)
            ldrms_pointcloud = np.dot(G_camera_posesource, ldrms_pointcloud)
            uv, depth = model.project(ldrms_pointcloud,image_size)

            x_p = np.ravel(uv[0,:])
            y_p = np.ravel(uv[1,:])
            z_p = np.ravel(depth)

            map_ldrms = (np.array(ldrms_pointcloud[0:3,:])).transpose()
            map_ldrms_filename = map_ldrms_dir + str(timestamp) + '.csv'
            np.savetxt(map_ldrms_filename, map_ldrms, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(ldrms_pointcloud[0,:])]
                map_y = [numeric_map_y for numeric_map_y in np.array(ldrms_pointcloud[1,:])]
                map_z = np.array(ldrms_pointcloud[2,:])

                plt.scatter((map_y),(map_x),c='b',marker='.', zorder=1)

            # LDRMS pointcloud to CSV
            # for k in range(uv.shape[1]):

            #     pl_ldrms[int(y_p[k]),int(x_p[k])] = int(100*depth[k])

            # ldrms_filename = output_ldrms_dir + str(timestamp) + '.csv'
            # np.savetxt(ldrms_filename, pl_ldrms, delimiter=",")
        

            # LMS-FRONT
            front_pointcloud, _ = build_pointcloud(lmsfront_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 1e6, timestamp)
            front_pointcloud = np.dot(G_camera_posesource, front_pointcloud)
            wh,xrange = model.project(front_pointcloud,image_size)

            x_f = np.ravel(wh[0,:])
            y_f = np.ravel(wh[1,:])
            z_f = np.ravel(xrange)

            map_front = (np.array(front_pointcloud[0:3,:])).transpose()
            map_front_filename = map_front_dir + str(timestamp) + '.csv'
            np.savetxt(map_front_filename, map_front, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(front_pointcloud[0,:])]
                map_y = [numeric_map_y for numeric_map_y in np.array(front_pointcloud[1,:])]
                map_z = [-numeric_map_z for numeric_map_z in np.array(front_pointcloud[2,:])]

                plt.scatter((map_y),(map_z),c='r',marker='.', zorder=1)

            # LMS-FRONT pointcloud to CSV
            # for k in range(wh.shape[1]):

            #     pl_ldrms[int(y_f[k]),int(x_f[k])] = int(100*xrange[k])

            # front_filename = output_front_dir + str(timestamp) + '.csv'
            # np.savetxt(front_filename, pl_ldrms, delimiter=",")

            # LMS-REAR

            rear_pointcloud, _ = build_pointcloud(lmsrear_dir, poses_file, extrinsics_dir,
                                            start_time, timestamp + 2e6, timestamp)
            rear_pointcloud = np.dot(G_camera_posesource, rear_pointcloud)

            map_rear = (np.array(rear_pointcloud[0:3,:])).transpose()
            map_rear_filename = map_rear_dir + str(timestamp) + '.csv'
            np.savetxt(map_rear_filename, map_rear, delimiter=",")

            if (DRAW_MAP):
                map_x = [numeric_map_x for numeric_map_x in np.array(rear_pointcloud[0,:])]
                map_y = [-numeric_map_y for numeric_map_y in np.array(rear_pointcloud[1,:])]
                map_z = [numeric_map_z for numeric_map_z in np.array(rear_pointcloud[2,:])]

                plt.scatter((map_y),(map_z),c='g',marker='.', zorder=1)


            if (WRITE_IMAGE):
                for k in range(uv.shape[1]):

                    color = (int(255-8*depth[k]),255-3*depth[k],50+3*depth[k])
                    frame= cv2.circle(frame, (int(x_p[k]), int(y_p[k])), 1, color, 1)

                for k in range(wh.shape[1]):

                    color = (int(255-8*xrange[k]),255-3*xrange[k],50+3*xrange[k])
                    frame= cv2.circle(frame, (int(x_f[k]), int(y_f[k])), 1, color, 1)

                cv2.imshow('frame',frame)
                image_filename = output_img_dir   + str(timestamp) + '.png'
                cv2.imwrite(image_filename, frame)
                cv2.waitKey(1)
            # plt.show()
            plt.pause(0.1)