コード例 #1
0
 def set_active_cam_intrinsics(self,
                               calibration_np_mat,
                               width,
                               height,
                               max_clipping_range=sys.float_info.max):
     active_vtk_camera = self.vtk_renderer.GetActiveCamera()
     focal_length = calibration_np_mat[0][0]
     if not calibration_np_mat[0][0] == calibration_np_mat[1][1]:
         logger.vinfo('calibration_mat', calibration_np_mat)
         assert False
     view_angle = Camera.compute_view_angle(focal_length, width, height)
     active_vtk_camera.SetViewAngle(view_angle)
     active_vtk_camera.SetClippingRange(0.0, max_clipping_range)
コード例 #2
0
def get_calibration_mat(blender_camera):
    #logger.info('get_calibration_mat: ...')
    scene = bpy.context.scene
    render_resolution_width = scene.render.resolution_x
    render_resolution_height = scene.render.resolution_y
    focal_length_in_mm = float(blender_camera.data.lens)
    sensor_width_in_mm = float(blender_camera.data.sensor_width)
    focal_length_in_pixel = \
        float(max(scene.render.resolution_x, scene.render.resolution_y)) * \
        focal_length_in_mm / sensor_width_in_mm

    max_extent = max(render_resolution_width, render_resolution_height)
    p_x = render_resolution_width / 2.0 - blender_camera.data.shift_x * max_extent
    p_y = render_resolution_height / 2.0 - blender_camera.data.shift_y * max_extent

    calibration_mat = Camera.compute_calibration_mat(
        focal_length_in_pixel, cx=p_x, cy=p_y)

    #logger.info('get_calibration_mat: Done')
    return calibration_mat
コード例 #3
0
    def _readCameras(self, cameraCount):
        cameras = []
        for index in range(cameraCount):

            focal_length, k1, k2 = self._readFloatItems()
            rotation = np.array([
                self._readFloatItems(),
                self._readFloatItems(),
                self._readFloatItems()
            ])
            translation = np.array(self._readFloatItems())

            if focal_length > 0.0:
                camera = Camera()
                camera.set_rotation_mat(rotation)
                camera._translation_vec = translation
                camera.calibration_mat = np.array([[focal_length, 0, 0],
                                                   [0, focal_length, 0],
                                                   [0, 0, 1]])

                cameras.append(camera)  # Only add valid cameras
        return cameras
コード例 #4
0
def collect_camera_object_trajectory_information(
        virtual_camera_name, render_stereo_camera, stereo_camera_baseline,
        car_body_name, car_body_matrix_world_after_loading):
    logger.info('collect_camera_object_trajectory_information: ...')
    scene = bpy.context.scene

    blender_camera = bpy.data.objects[virtual_camera_name]
    if not scene.camera:
        scene.camera = blender_camera

    camera_object_trajectory = CameraObjectTrajectory()

    # Gather the blender_camera and object transformations at each frame
    for _frm_idx in range(scene.frame_end):

        # We start the animation at frame
        corrected_frame_index = _frm_idx + 1
        # Blender indexes frames from 1, ..., n
        # Setting the current frame index is required to access
        # the correct values with blender_camera.matrix_world
        bpy.context.scene.frame_set(corrected_frame_index)

        current_frame_stem = 'frame' + str(corrected_frame_index).zfill(5)
        current_frame_name = current_frame_stem + '.jpg'

        #logger.vinfo('current_frame_name', current_frame_name)

        # Only if the objects have a scale of 1,
        # the 3x3 part of the corresponding matrix_world contains a pure rotation
        # Otherwise it also contains scale or shear information
        if tuple(blender_camera.scale) != (1, 1, 1):
            logger.vinfo('blender_camera.scale', blender_camera.scale)
            assert False

        calibration_mat = get_calibration_mat(blender_camera)
        rotated_camera_matrix_around_x_by_180 = get_computer_vision_camera_matrix(
            blender_camera)
        #logger.vinfo('rotated_camera_matrix_around_x_by_180', rotated_camera_matrix_around_x_by_180)

        cam = Camera()
        cam.set_4x4_cam_to_world_mat(rotated_camera_matrix_around_x_by_180)
        cam.set_calibration(calibration_mat, 0)

        if render_stereo_camera:
            stereo_cam = StereoCamera(left_camera=cam,
                                      baseline=stereo_camera_baseline)
            stereo_cam.left_camera.file_name = current_frame_stem + '_left.jpg'
            stereo_cam.right_camera.file_name = current_frame_stem + '_right.jpg'
            camera_object_trajectory.set_camera(current_frame_name, stereo_cam)
        else:
            cam.file_name = current_frame_name
            camera_object_trajectory.set_camera(current_frame_name, cam)

        # It is important to access the car_body for each frame
        # to get the most recent animated position
        car_body = bpy.data.objects[car_body_name]
        if tuple(car_body.scale) != (1, 1, 1):
            logger.vinfo('car_body_name', car_body_name)
            logger.vinfo('car_body.scale', car_body.scale)
            assert False

        # Make sure matrix world is up to date
        bpy.context.scene.update()
        object_matrix_world_at_trajectory = np.array(car_body.matrix_world)
        matrix_world_relative_to_initial_pose = object_matrix_world_at_trajectory.dot(
            car_body_matrix_world_after_loading.inverted())

        camera_object_trajectory.set_object_matrix_world(
            current_frame_name, matrix_world_relative_to_initial_pose)

    bpy.context.scene.frame_set(1)
    logger.info('collect_camera_object_trajectory_information: Done')
    return camera_object_trajectory
コード例 #5
0
    def parse_bundler_file(input_visual_fsm_bundle_out_file_name):

        # =======
        # REMARK: The camera coordinate frame is transformed to
        #         the same camera coordinate frame used by VisualSfM
        #         i.e. a rotation around the x axis by 180 degree
        # =======

        input_file = open(input_visual_fsm_bundle_out_file_name, 'r')
        # first line is just a comment, throw it away
        (input_file.readline()).rstrip()

        # read amount cameras and points
        current_line = (input_file.readline()).rstrip()
        amount_cams = 0
        amount_points = 0
        amount_cams, amount_points = map(int, current_line.split(' '))
        print(amount_cams)
        print(amount_points)

        cameras = []

        # for camera_index in range(0, int(amount_cams)):
        for camera_index in range(0, amount_cams):

            # Each camera entry <cameraI> contains the estimated camera intrinsics and extrinsics, and has the form:
            # <f> <k1> <k2>   [the focal length, followed by two radial distortion coeffs]
            # <R>             [a 3x3 matrix representing the camera rotation]
            # <t>             [a 3-vector describing the camera translation]

            current_line = (input_file.readline()).rstrip()     # read internal camera parameters
            # TODO Handle Radial Distortion
            focal_length = float(current_line.split(' ')[0])
            camera_calibration_matrix = np.asarray(
                [[focal_length, 0, 0], [0, focal_length, 0], [0, 0, 1]],
                dtype=float)

            # use map to convert all string numbers to floats
            # python 3 requires a explicit cast from 'map object' to list
            current_line = (input_file.readline()).rstrip()
            first_row = list(map(float, current_line.split(' ')))
            current_line = (input_file.readline()).rstrip()
            second_row = list(map(float, current_line.split(' ')))
            current_line = (input_file.readline()).rstrip()
            third_row = list(map(float, current_line.split(' ')))

            cam_rotation_matrix = np.asarray(
                [first_row, second_row, third_row],
                dtype=float)

            # this line contains the translation vector and not the camera center
            current_line = (input_file.readline()).rstrip()     # read translation vector
            translation_vector = np.asarray(
                list(map(float, current_line.split(' '))),
                dtype=float)

            # Transform the camera coordinate system from the computer vision camera coordinate frame to the computer
            # vision camera coordinate frame (i.e. rotate the camera matrix around the x axis by 180 degree)
            # inverting the y and the z axis transform the points in the camera coordinate frame used by visualsfm
            cam_rotation_matrix = TransformationCollection.invert_y_and_z_axis(cam_rotation_matrix)
            translation_vector = TransformationCollection.invert_y_and_z_axis(translation_vector)


            current_camera = Camera()
            current_camera.set_rotation_mat(cam_rotation_matrix)
            # set camera center AFTER rotation (since setting the center sets also the translation vector)
            #current_camera.set_camera_center_after_rotation(center_vec)
            current_camera.set_camera_translation_vector_after_rotation(translation_vector)

            # from cam coordinates to world coordinates
            cam_rotation_matrix_inv = np.linalg.inv(cam_rotation_matrix)
            current_camera.rotation_inv_mat = cam_rotation_matrix_inv
            current_camera.calibration_mat = camera_calibration_matrix
            current_camera.id = camera_index

            cameras.append(current_camera)

        points = []
        for point_index in range(0, amount_points):

            current_point = Point()

            # Each point entry has the form:
            # <position>      [a 3-vector describing the 3D position of the point]
            # <color>         [a 3-vector describing the RGB color of the point]
            # <view list>     [a list of views the point is visible in]

            current_line = (input_file.readline()).rstrip()         # read the 3d position
            point_center = np.array(list(map(float, current_line.split(' '))))
            current_point._coord = point_center

            current_line = (input_file.readline()).rstrip()         # read the color
            point_color = np.array(list(map(int, current_line.split(' '))))
            current_point._color = point_color

            current_line = (input_file.readline()).rstrip()         # read the view list
            measurement_values = current_line.split(' ')
            measurements = [measurement_values[1+i*4:1+i*4+4] for i in range(int(measurement_values[0]))]
            # Remark: The last measurement is inverted, so the y axis points downwards (like in visualsfm)
            current_point.measurements = [
                (int(measurement[0]), int(measurement[1]), float(measurement[2]), -float(measurement[3]))
                for measurement in measurements]
            current_point.id = point_index

            points.append(current_point)

        return cameras, points
コード例 #6
0
    def parse_camera_and_object_trajectory_file(cam_and_obj_trajectory_fp):
        """
        Access the returned data structure with
        # is a numpy 4x4 array (homogeneous transformation)
        image_name_to_cam_obj_transf[index]['camera_pose']
        # is a numpy 4x4 array (homogeneous transformation)
        image_name_to_cam_obj_transf[index]['object_matrix_world']
        """
        logger.info('parse_camera_and_object_trajectory_file: ...')
        logger.info('cam_and_obj_trajectory_fp: ' + cam_and_obj_trajectory_fp)

        with open(cam_and_obj_trajectory_fp) as gt_file:
            gt_content = gt_file.readlines()

        lines_per_frame, num_lines_per_frame = TrajectoryFileHandler.compute_gt_lines_per_frame(
            gt_content)

        camera_object_trajectory = CameraObjectTrajectory()

        image_name_to_cam_obj_transf = OrderedDict()
        # we can not use enumerate here, since we do not know
        # if the first camera is actually part of the trajectory
        for content in lines_per_frame:

            logger.info('content: ' + str(content))
            image_name = content[0]

            cam_left = Camera()

            current_line_idx = 2
            if num_lines_per_frame > 11:
                camera_calibration_matrix = TrajectoryFileHandler._parse_matrix_3x3(
                    content, start_index=current_line_idx)
                cam_left.set_calibration(camera_calibration_matrix,
                                         radial_distortion=0)
                current_line_idx += 3

            camera_matrix_left_world = TrajectoryFileHandler._parse_matrix_4x4(
                content, start_index=current_line_idx)
            cam_left.set_4x4_cam_to_world_mat(camera_matrix_left_world)
            current_line_idx += 4

            baseline_or_object_trans_str = content[
                current_line_idx]  # current_line_idx = 9
            current_line_idx += 1
            #logger.vinfo('baseline_or_object_trans_str', baseline_or_object_trans_str)

            # Check for the stereo case (legacy fiel structure handling)
            if baseline_or_object_trans_str != 'Object to World transformation':

                if baseline_or_object_trans_str == 'None':
                    baseline = None
                else:
                    baseline = float(baseline_or_object_trans_str)

                camera_matrix_right_world = TrajectoryFileHandler._parse_matrix_4x4(
                    content, start_index=current_line_idx)

                # Check if stereo camera is defined using the baseline
                if baseline is not None and baseline > 0:
                    stereo_cam = StereoCamera(cam_left, baseline=baseline)
                    camera_object_trajectory.set_camera(image_name, stereo_cam)

                # Check if the stereo camera is defined using the second transformation matrix
                elif not np.allclose(camera_matrix_right_world,
                                     np.zeros((4, 4), dtype=float)):

                    cam_right = Camera()
                    cam_right.set_calibration(camera_calibration_matrix,
                                              radial_distortion=0)
                    cam_right.set_4x4_cam_to_world_mat(
                        camera_matrix_left_world)

                    stereo_cam = StereoCamera(left_camera=cam_left,
                                              right_camera=cam_right)
                    camera_object_trajectory.set_camera(image_name, stereo_cam)

                current_line_idx += 5  # 1 line for the stereo baseline + 4 lines for the matrix

            else:
                # if baseline and second matrix is the 0 matrix, we consider the camera as monocular
                camera_object_trajectory.set_camera(image_name, cam_left)

            object_matrix_world = TrajectoryFileHandler._parse_matrix_4x4(
                content, start_index=current_line_idx)

            # logger.vinfo('object_matrix_world', object_matrix_world)

            # TODO THIS IS NOT READY FOR USAGE
            # TODO SCALE IS ATM NOT HANDLED BY THE COORDINATE_FRAME_SYSTEM class
            #obj_coord_sys = CoordinateFrameSystem()
            #obj_coord_sys.set_4x4_cam_to_world_mat(object_matrix_world)
            #camera_object_trajectory.add_object_single_coord_system(image_name, obj_coord_sys)

            # FIXME Temporary solution
            camera_object_trajectory.set_object_matrix_world(
                image_name, object_matrix_world)

        logger.info('parse_camera_and_object_trajectory_file: Done')

        return camera_object_trajectory
コード例 #7
0
    def _parse_cameras(input_file, num_cameras, camera_calibration_matrix):

        # VisualSFM CAMERA coordinate system is the standard
        # CAMERA coordinate system in computer vision
        # (not the same as in computer graphics like in bundler, blender, etc.)
        # That means
        #       the y axis in the image is pointing downwards (not upwards)
        #       the camera is looking along the positive z axis
        #       (points in front of the camera show a positive z value)

        # The camera coordinate system in computer vision VISUALSFM
        # uses camera matrices which are rotated around the x axis by 180 degree
        # i.e. the y and z axis of the CAMERA MATRICES are inverted
        # therefore, the y and z axis of the TRANSLATION VECTOR are also inverted

        cameras = []

        for camera_index in range(num_cameras):
            line = input_file.readline()

            # <Camera> = <File name> <focal length> <quaternion WXYZ> <camera center> <radial distortion> 0
            line_values = line.split()

            file_name = os.path.basename(line_values[0])

            focal_length = float(line_values[1])

            if camera_calibration_matrix is None:
                camera_calibration_matrix = np.array([[focal_length, 0, 0],
                                                      [0, focal_length, 0],
                                                      [0, 0, 1]])

            quaternion_w = float(line_values[2])
            quaternion_x = float(line_values[3])
            quaternion_y = float(line_values[4])
            quaternion_z = float(line_values[5])
            quaternion = np.array(
                [quaternion_w, quaternion_x, quaternion_y, quaternion_z],
                dtype=float)

            camera_center_x = float(line_values[6])
            camera_center_y = float(line_values[7])
            camera_center_z = float(line_values[8])
            center_vec = np.array(
                [camera_center_x, camera_center_y, camera_center_z])

            radial_distortion = float(line_values[9])
            zero_value = float(line_values[10])
            assert (zero_value == 0)

            current_camera = Camera()
            # Setting the quaternion also sets the rotation matrix
            current_camera.set_quaternion(quaternion)

            # Set the camera center after rotation
            # COMMENT FROM PBA CODE:
            #   older format for compability
            #   camera_data[i].SetQuaternionRotation(q); // quaternion from the file
            #   camera_data[i].SetCameraCenterAfterRotation(c); // camera center from the file
            current_camera._center = center_vec

            # set the camera view direction as normal w.r.t world coordinates
            cam_view_vec_cam_coord = np.array([0, 0, 1]).T
            cam_rotation_matrix_inv = np.linalg.inv(
                current_camera.get_rotation_mat())
            cam_view_vec_world_coord = cam_rotation_matrix_inv.dot(
                cam_view_vec_cam_coord)
            current_camera.normal = cam_view_vec_world_coord

            translation_vec = compute_camera_coordinate_system_translation_vector(
                center_vec, current_camera.get_rotation_mat())
            current_camera._translation_vec = translation_vec

            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion)
            current_camera.file_name = file_name
            current_camera.camera_index = camera_index

            cameras.append(current_camera)

        return cameras
コード例 #8
0
    def parse_cameras(json_data):

        views = json_data['views']
        intrinsics = json_data['intrinsics']
        extrinsics = json_data['extrinsics']

        # Remark: extrinsics may contain only a subset of views! (no all views are contained in the reconstruction)
        #         Matching entries are determined by view['key'] == extrinsics['key']

        cams = []

        # Mapping from input images to reconstructed cameras
        image_index_to_camera_index = {}

        # IMPORTANT: Views contains number of input images
        #            (this may be more than the number of reconstructed poses)
        for rec_index, extrinsic in enumerate(extrinsics):    # Iterate over extrinsics, not views!

            camera = Camera()

            # The key is defined w.r.t. view indices (NOT reconstructed camera indices)
            view_index = int(extrinsic['key'])

            image_index_to_camera_index[view_index] = rec_index

            view = views[view_index]

            camera.file_name = view['value']['ptr_wrapper']['data']['filename']
            id_intrinsic = view['value']['ptr_wrapper']['data']['id_intrinsic']

            # handle intrinsic params
            intrinsic_params = intrinsics[int(id_intrinsic)]['value']['ptr_wrapper']['data']
            focal_length = intrinsic_params['focal_length']
            principal_point = intrinsic_params['principal_point']

            if 'disto_k3' in intrinsic_params:
                logger.info('3 Radial Distortion Parameters are not supported')
                assert False

            # For Radial there are several options: "None", disto_k1, disto_k3
            if 'disto_k1' in intrinsic_params:
                radial_distortion = float(intrinsic_params['disto_k1'][0])
            else:  # No radial distortion, i.e. pinhole camera model
                radial_distortion = 0

            camera.set_calibration(
                np.asarray([[focal_length, 0, principal_point[0]],
                          [0, focal_length, principal_point[1]],
                          [0, 0, 1]], dtype=float),
                radial_distortion
                )

            # handle extrinsic params (pose = extrinsic)

            extrinsic_params = extrinsic['value']
            cam_rotation_list = extrinsic_params['rotation']
            camera.set_rotation_mat(np.array(cam_rotation_list, dtype=float))
            camera._center = np.array(extrinsic_params['center'], dtype=float).T

            # the camera looks in the z direction
            cam_view_vec_cam_coordinates = np.array([0, 0, 1]).T

            # transform view direction from cam coordinates to world coordinates
            # for rotations the inverse is equal to the transpose
            rotation_inv_mat = camera.get_rotation_mat().T
            camera.normal = rotation_inv_mat.dot(cam_view_vec_cam_coordinates)

            camera.view_index = view_index

            cams.append(camera)
        return cams, image_index_to_camera_index
コード例 #9
0
    def convert_colmap_cams_to_cams(id_to_col_cameras, id_to_col_images,
                                    image_dp):
        # From photogrammetry_importer\ext\read_write_model.py
        #   CameraModel = collections.namedtuple(
        #       "CameraModel", ["model_id", "model_name", "num_params"])
        #   Camera = collections.namedtuple(
        #       "Camera", ["id", "model", "width", "height", "params"])
        #   BaseImage = collections.namedtuple(
        #       "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])

        cameras = []
        for col_image in id_to_col_images.values():
            current_camera = Camera()
            current_camera.id = col_image.id
            current_camera.set_quaternion(col_image.qvec)
            current_camera.set_camera_translation_vector_after_rotation(
                col_image.tvec)

            current_camera.image_dp = image_dp
            current_camera.file_name = col_image.name

            camera_model = id_to_col_cameras[col_image.camera_id]

            # op.report({'INFO'}, 'image_id: ' + str(col_image.id))
            # op.report({'INFO'}, 'camera_id: ' + str(col_image.camera_id))
            # op.report({'INFO'}, 'camera_model: ' + str(camera_model))

            current_camera.width = camera_model.width
            current_camera.height = camera_model.height

            fx, fy, cx, cy, skew = parse_camera_param_list(camera_model)
            camera_calibration_matrix = np.array([[fx, skew, cx], [0, fy, cy],
                                                  [0, 0, 1]])
            current_camera.set_calibration(camera_calibration_matrix,
                                           radial_distortion=0)

            cameras.append(current_camera)

        return cameras