예제 #1
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    pix_per_deg = 2
    om_pix = sim.get_pix_coords()
    plr_degs = om_pix / pix_per_deg
    plr_rads = plr_degs * math.pi / 180
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        depth, rgb = sim.omcam.cap()
        plr = ivy.concatenate([plr_rads, depth], -1)
        xyz_wrt_cam = ivy_mech.polar_to_cartesian_coords(plr)
        xyz_wrt_cam = ivy.reshape(xyz_wrt_cam, (-1, 3))
        xyz_wrt_cam_homo = ivy_mech.make_coordinates_homogeneous(xyz_wrt_cam)
        inv_ext_mat_trans = ivy.transpose(sim.omcam.get_inv_ext_mat(), (1, 0))
        xyz_wrt_world = ivy.matmul(xyz_wrt_cam_homo, inv_ext_mat_trans)[..., 0:3]
        with ivy.numpy.use:
            omni_cam_inv_ext_mat = ivy_mech.make_transformation_homogeneous(
                ivy.to_numpy(sim.omcam.get_inv_ext_mat()))
        vis.show_point_cloud(xyz_wrt_world, rgb, interactive,
                             sphere_inv_ext_mats=[omni_cam_inv_ext_mat], sphere_radii=[0.025])
        if not interactive:
            sim.omcam.set_pos(sim.omcam.get_pos()
                               + ivy.array([-0.01, 0.01, 0.]))
    sim.close()
    unset_framework()
예제 #2
0
    def setup_primitive_scene(self):

        # shape matrices
        shape_matrices = ivy.concatenate([ivy.reshape(ivy.array(obj.get_matrix(), 'float32'), (1, 3, 4))
                                              for obj in self._objects], 0)

        # shape dims
        x_dims = ivy.concatenate([ivy.reshape(ivy.array(
            obj.get_bounding_box()[1] - obj.get_bounding_box()[0], 'float32'), (1, 1)) for obj in self._objects], 0)
        y_dims = ivy.concatenate([ivy.reshape(ivy.array(
            obj.get_bounding_box()[3] - obj.get_bounding_box()[2], 'float32'), (1, 1)) for obj in self._objects], 0)
        z_dims = ivy.concatenate([ivy.reshape(ivy.array(
            obj.get_bounding_box()[5] - obj.get_bounding_box()[4], 'float32'), (1, 1)) for obj in self._objects], 0)
        shape_dims = ivy.concatenate((x_dims, y_dims, z_dims), -1)

        # primitve scene visualization
        if self._with_primitive_scene_vis:
            scene_vis = [Shape.create(PrimitiveShape.CUBOID, ivy.to_numpy(shape_dim).tolist())
                         for shape_dim in shape_dims]
            [obj.set_matrix(ivy.to_numpy(shape_mat).reshape(-1).tolist())
             for shape_mat, obj in zip(shape_matrices, scene_vis)]
            [obj.set_transparency(0.5) for obj in scene_vis]

        # sdf
        primitive_scene = PrimitiveScene(cuboid_ext_mats=ivy.inv(ivy_mech.make_transformation_homogeneous(
            shape_matrices))[..., 0:3, :], cuboid_dims=shape_dims)
        self.sdf = primitive_scene.sdf
예제 #3
0
 def measure_incremental_mat(self):
     inv_ext_mat = ivy.reshape(ivy.array(self._handle.get_matrix()), (3, 4))
     inv_ext_mat_homo = ivy_mech.make_transformation_homogeneous(inv_ext_mat)
     ext_mat_homo = ivy.inv(inv_ext_mat_homo)
     ext_mat = ext_mat_homo[0:3, :]
     rel_mat = ivy.matmul(ext_mat, self._inv_ext_mat_homo)
     self._inv_ext_mat_homo = inv_ext_mat_homo
     return rel_mat
예제 #4
0
def get_fundamental_matrix(full_mat1,
                           full_mat2,
                           camera_center1=None,
                           pinv_full_mat1=None,
                           batch_shape=None,
                           dev_str=None):
    """
    Compute fundamental matrix :math:`\mathbf{F}\in\mathbb{R}^{3×3}` between two cameras, given their extrinsic
    matrices :math:`\mathbf{E}_1\in\mathbb{R}^{3×4}` and :math:`\mathbf{E}_2\in\mathbb{R}^{3×4}`.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=262>`_
    bottom of page 244, section 9.2.2, equation 9.1

    :param full_mat1: Frame 1 full projection matrix *[batch_shape,3,4]*
    :type full_mat1: array
    :param full_mat2: Frame 2 full projection matrix *[batch_shape,3,4]*
    :type full_mat2: array
    :param camera_center1: Frame 1 camera center, inferred from full_mat1 if None *[batch_shape,3,1]*
    :type camera_center1: array, optional
    :param pinv_full_mat1: Frame 1 full projection matrix pseudo-inverse, inferred from full_mat1 if None *[batch_shape,4,3]*
    :type pinv_full_mat1: array, optional
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
    :type dev_str: str, optional
    :return: Fundamental matrix connecting frames 1 and 2 *[batch_shape,3,3]*
    """

    if batch_shape is None:
        batch_shape = full_mat1.shape[:-2]

    if dev_str is None:
        dev_str = _ivy.dev_str(full_mat1)

    # shapes as list
    batch_shape = list(batch_shape)

    if camera_center1 is None:
        inv_full_mat1 = _ivy.inv(
            _ivy_mech.make_transformation_homogeneous(full_mat1, batch_shape,
                                                      dev_str))[..., 0:3, :]
        camera_center1 = _ivy_svg.inv_ext_mat_to_camera_center(inv_full_mat1)

    if pinv_full_mat1 is None:
        pinv_full_mat1 = _ivy.pinv(full_mat1)

    # BS x 4 x 1
    camera_center1_homo = _ivy.concatenate(
        (camera_center1, _ivy.ones(batch_shape + [1, 1], dev_str=dev_str)), -2)

    # BS x 3
    e2 = _ivy.matmul(full_mat2, camera_center1_homo)[..., -1]

    # BS x 3 x 3
    e2_skew_symmetric = _ivy.linalg.vector_to_skew_symmetric_matrix(e2)

    # BS x 3 x 3
    return _ivy.matmul(e2_skew_symmetric, _ivy.matmul(full_mat2,
                                                      pinv_full_mat1))
예제 #5
0
def triangulate_depth(ds_pixel_coords,
                      full_mats,
                      inv_full_mats=None,
                      camera_centers=None,
                      method='cmp',
                      batch_shape=None,
                      image_dims=None):
    """
    Triangulate depth in frame 1, returning depth scaled homogeneous pixel co-ordinate image
    :math:`\mathbf{X}\in\mathbb{R}^{h×w×3}` in frame 1.\n

    :param ds_pixel_coords: Homogeneous pixel co-ordinate images: *[batch_shape,h,w,3]*
    :type ds_pixel_coords: array
    :param full_mats: Full projection matrices *[batch_shape,2,3,4]*
    :type full_mats: array
    :param inv_full_mats: Inverse full projection matrices, required for closest_mutual_points method *[batch_shape,2,3,4]*
    :type inv_full_mats: array, optional
    :param camera_centers: Camera centers, required for closest_mutual_points method *[batch_shape,2,3,1]*
    :type camera_centers: array, optional
    :param method: Triangulation method, one of [cmp|dlt], for closest mutual points or homogeneous dlt approach, closest_mutual_points by default
    :type method: str, optional
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs in None.
    :type image_dims: sequence of ints, optional
    :return: Depth scaled homogeneous pixel co-ordinates image in frame 1 *[batch_shape,h,w,3]*
    """

    if batch_shape is None:
        batch_shape = ds_pixel_coords.shape[:-4]

    if image_dims is None:
        image_dims = ds_pixel_coords.shape[-3:-1]

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    if method == 'cmt':

        if inv_full_mats is None:
            inv_full_mats = _ivy.inv(
                _ivy_mech.make_transformation_homogeneous(
                    full_mats, batch_shape + [2]))[..., 0:3, :]

        if camera_centers is None:
            camera_centers = _ivy_svg.inv_ext_mat_to_camera_center(
                inv_full_mats)

    try:
        return TRI_METHODS[method](ds_pixel_coords, full_mats, inv_full_mats,
                                   camera_centers, batch_shape, image_dims)
    except KeyError:
        raise Exception(
            'Triangulation method must be one of [cmp|dlt], but found {}'.
            format(method))
예제 #6
0
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # initialize scene
        if self.with_pyrep:
            for i in range(6):
                self._vision_sensors[i].remove()
                self._vision_sensor_bodies[i].remove()
                [item.remove() for item in self._vision_sensor_rays[i]]
            self._default_camera.set_position(np.array([-2.3518, 4.3953, 2.8949]))
            self._default_camera.set_orientation(np.array([i*np.pi/180 for i in [112.90, 27.329, -10.978]]))
            inv_ext_mat = ivy.reshape(ivy.array(self._default_vision_sensor.get_matrix(), 'float32'), (3, 4))
            self.default_camera_ext_mat_homo = ivy.inv(ivy_mech.make_transformation_homogeneous(inv_ext_mat))

            # public objects
            self.omcam = SimCam(self._spherical_vision_sensor)

            # wait for user input
            self._user_prompt('\nInitialized scene with an omni-directional camera in the centre.\n\n'
                              'You can click on the omni directional camera, which appears as a small floating black sphere, '
                              'then select the box icon with four arrows in the top panel of the simulator, '
                              'and then drag the camera around dynamically.\n'
                              'Starting to drag and then holding ctrl allows you to also drag the camera up and down. \n\n'
                              'This demo enables you to capture 10 different omni-directional images from the camera, '
                              'and render the associated 10 point clouds in an open3D visualizer.\n\n'
                              'Both visualizers can be translated and rotated by clicking either the left mouse button or the wheel, '
                              'and then dragging the mouse.\n'
                              'Scrolling the mouse wheel zooms the view in and out.\n\n'
                              'Both visualizers can be rotated and zoomed by clicking either the left mouse button or the wheel, '
                              'and then dragging with the mouse.\n\n'
                              'Press enter in the terminal to use method ivy_mech.polar_coords_to_cartesian_coords and '
                              'show the first cartesian point cloud reconstruction of the scene, '
                              'converted from the polar co-ordinates captured from the omni-directional camera.\n\n')

        else:
            # public objects
            self.omcam = DummyOmCam()
            self.default_camera_ext_mat_homo = ivy.array(
                [[-0.872, -0.489,  0., 0.099],
                 [-0.169,  0.301, -0.938, 0.994],
                 [0.459, -0.818, -0.346, 5.677],
                 [0., 0., 0., 1.]])

            # message
            print('\nInitialized dummy scene with an omni-directional camera in the centre.'
                  '\nClose the visualization window to use method ivy_mech.polar_coords_to_cartesian_coords and show'
                  'a cartesian point cloud reconstruction of the scene, '
                  'converted from the omni-directional camera polar co-ordinates\n')

            # plot scene before rotation
            if interactive:
                plt.imshow(mpimg.imread(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                     'ptcc_no_sim', 'before_capture.png')))
                plt.show()
예제 #7
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    xyzs = list()
    rgbs = list()
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        for cam in sim.cams:
            depth, rgb = cam.cap()
            xyz = sim.depth_to_xyz(depth, cam.get_inv_ext_mat(),
                                   cam.inv_calib_mat, [128] * 2)
            xyzs.append(xyz)
            rgbs.append(rgb)
        xyz = ivy.reshape(ivy.concatenate(xyzs, 1), (-1, 3))
        rgb = ivy.reshape(ivy.concatenate(rgbs, 1), (-1, 3))
        voxels = [
            ivy.to_numpy(item)
            for item in ivy_vision.coords_to_voxel_grid(xyz, [100] * 3,
                                                        features=rgb)
        ]
        cuboid_inv_ext_mats = [
            ivy.to_numpy(
                ivy_mech.make_transformation_homogeneous(
                    cam.get_inv_ext_mat())) for cam in sim.cams
        ]
        with ivy.numpy.use:
            vis.show_voxel_grid(voxels,
                                interactive,
                                cuboid_inv_ext_mats=cuboid_inv_ext_mats,
                                cuboid_dims=[
                                    np.array([0.045, 0.045, 0.112])
                                    for _ in sim.cams
                                ])
        xyzs.clear()
        rgbs.clear()
    sim.close()
    unset_framework()
예제 #8
0
    def setup_primitive_scene_no_sim(self, box_pos=None):

        # lists
        shape_matrices_list = list()
        shape_dims_list = list()

        this_dir = os.path.dirname(os.path.realpath(__file__))
        for i in range(11):
            shape_mat = np.load(os.path.join(this_dir, 'no_sim/obj_inv_ext_mat_{}.npy'.format(i)))
            if i == 10 and box_pos is not None:
                shape_mat[..., -1:] = box_pos.reshape((1, 3, 1))
            shape_matrices_list.append(ivy.array(shape_mat, 'float32'))
            shape_dims_list.append(
                ivy.array(np.load(os.path.join(this_dir, 'no_sim/obj_bbx_{}.npy'.format(i))), 'float32')
            )

        # matices
        shape_matrices = ivy.concatenate(shape_matrices_list, 0)
        shape_dims = ivy.concatenate(shape_dims_list, 0)

        # sdf
        primitive_scene = PrimitiveScene(cuboid_ext_mats=ivy.inv(ivy_mech.make_transformation_homogeneous(
            shape_matrices))[..., 0:3, :], cuboid_dims=shape_dims)
        self.sdf = primitive_scene.sdf
예제 #9
0
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # initialize scene
        if self.with_pyrep:
            self._spherical_vision_sensor.remove()
            for i in range(6):
                self._vision_sensors[i].remove()
                self._vision_sensor_bodies[i].remove()
                [ray.remove() for ray in self._vision_sensor_rays[i]]
            self._box.set_position(np.array([0.55, 0, 0.9]))
            self._robot.set_position(np.array([0.85003, -0.024983, 0.77837]))
            self._robot._ik_target.set_position(np.array([0, 0, -1]))
            self._robot.get_tip().set_parent(self._robot._ik_target)
            self._robot.get_tip().set_position(np.array([0, 0, -1]))
            robot_start_config = ivy.array(
                [100., 100., 240., 180., 180., 120.]) * np.pi / 180
            [
                j.set_joint_position(p, False)
                for j, p in zip(self._robot.joints,
                                ivy.to_numpy(robot_start_config).tolist())
            ]
            robot_target_config = ivy.array([260., 100., 220., 0., 180., 45.
                                             ]) * np.pi / 180
            self._robot_target.set_position(
                np.array([0.85003, -0.024983, 0.77837]))
            self._robot_target._ik_target.set_position(np.array([0, 0, -1]))
            self._robot_target.get_tip().set_parent(
                self._robot_target._ik_target)
            self._robot_target.get_tip().set_position(np.array([0, 0, -1]))
            [
                j.set_joint_position(p, False)
                for j, p in zip(self._robot_target.joints,
                                ivy.to_numpy(robot_target_config).tolist())
            ]
            self._default_camera.set_position(
                np.array([0.094016, -1.2767, 1.7308]))
            self._default_camera.set_orientation(
                np.array([i * np.pi / 180
                          for i in [-121.32, 27.760, -164.18]]))

            input(
                '\nScene initialized.\n\n'
                'The simulator visualizer can be translated and rotated by clicking either the left mouse button or the wheel, '
                'and then dragging the mouse.\n'
                'Scrolling the mouse wheel zooms the view in and out.\n\n'
                'You can click on any object either in the scene or the left hand panel, '
                'then select the box icon with four arrows in the top panel of the simulator, '
                'and then drag the object around dynamically.\n'
                'Starting to drag and then holding ctrl allows you to also drag the object up and down.\n'
                'Clicking the top icon with a box and two rotating arrows similarly allows rotation of the object.\n\n'
                'The joint angles of either the robot or target robot configuration can also be changed.\n'
                'To do this, Open the Mico or MicoTarget drop-downs on the left, and click on one of the joints "Mico_jointx", '
                'and then click on the magnifying glass over a box on the left-most panel.\n'
                'In the window that opens, change the value in the field Position [deg], and close the window again.\n\n'
                'Once you have aranged the scene as desired, press enter in the terminal to continue with the demo...\n'
            )

            # primitive scene
            self.setup_primitive_scene()

            # robot configs
            robot_start_config = ivy.array(self._robot.get_joint_positions(),
                                           'float32')
            robot_target_config = ivy.array(
                self._robot_target.get_joint_positions(), 'float32')

            # ivy robot
            self._ivy_manipulator = MicoManipulator(
                ivy_mech.make_transformation_homogeneous(
                    ivy.reshape(ivy.array(self._robot_base.get_matrix()),
                                (3, 4))))

            # spline path
            interpolated_joint_path = ivy.transpose(
                ivy.linspace(robot_start_config, robot_target_config, 100),
                (1, 0))
            multi_spline_points = ivy.transpose(
                self._ivy_manipulator.sample_links(interpolated_joint_path),
                (1, 0, 2))
            multi_spline_sdf_vals = ivy.reshape(
                self.sdf(ivy.reshape(multi_spline_points, (-1, 3))),
                (-1, 100, 1))
            self.update_path_visualization(multi_spline_points,
                                           multi_spline_sdf_vals, None)

            # public objects
            self.ivy_manipulator = self._ivy_manipulator
            self.robot_start_config = robot_start_config
            self.robot_target_config = robot_target_config

            # wait for user input
            self._user_prompt(
                '\nInitialized scene with a robot and a target robot configuration to reach.'
                '\nPress enter in the terminal to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n'
            )

        else:

            # primitive scene
            self.setup_primitive_scene_no_sim(box_pos=np.array([0.55, 0, 0.9]))

            # ivy robot
            base_inv_ext_mat = ivy.array([[1, 0, 0, 0.84999895],
                                          [0, 1, 0, -0.02500308],
                                          [0, 0, 1, 0.70000124]])
            self.ivy_manipulator = MicoManipulator(
                ivy_mech.make_transformation_homogeneous(base_inv_ext_mat))
            self.robot_start_config = ivy.array(
                [100., 100., 240., 180., 180., 120.]) * np.pi / 180
            self.robot_target_config = ivy.array(
                [260., 100., 220., 0., 180., 45.]) * np.pi / 180

            # message
            print(
                '\nInitialized dummy scene with a robot and a target robot configuration to reach.'
                '\nClose the visualization window to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n'
            )

            # plot scene before rotation
            if interactive:
                plt.imshow(
                    mpimg.imread(
                        os.path.join(
                            os.path.dirname(os.path.realpath(__file__)),
                            'msp_no_sim', 'path_0.png')))
                plt.show()

        # message
        print('\nOptimizing spline path...\n')
예제 #10
0
 def get_ext_mat(self):
     return ivy.inv(ivy_mech.make_transformation_homogeneous(self.get_inv_ext_mat()))[0:3, :]
예제 #11
0
파일: run_through.py 프로젝트: ivy-dl/mech
def main(f=None):

    # Framework Setup #
    # ----------------#

    # choose random framework

    f = choose_random_framework() if f is None else f
    set_framework(f)

    # Orientation #
    # ------------#

    # rotation representations

    # 3
    rot_vec = ivy.array([0., 1., 0.])

    # 3 x 3
    rot_mat = ivy_mech.rot_vec_to_rot_mat(rot_vec)

    # 3
    euler_angles = ivy_mech.rot_mat_to_euler(rot_mat, 'zyx')

    # 4
    quat = ivy_mech.euler_to_quaternion(euler_angles)

    # 4
    axis_and_angle = ivy_mech.quaternion_to_axis_angle(quat)

    # 3
    rot_vec_again = axis_and_angle[..., :-1] * axis_and_angle[..., -1:]

    # Pose #
    # -----#

    # pose representations

    # 3
    position = ivy.ones_like(rot_vec)

    # 6
    rot_vec_pose = ivy.concatenate((position, rot_vec), 0)

    # 3 x 4
    mat_pose = ivy_mech.rot_vec_pose_to_mat_pose(rot_vec_pose)

    # 6
    euler_pose = ivy_mech.mat_pose_to_euler_pose(mat_pose)

    # 7
    quat_pose = ivy_mech.euler_pose_to_quaternion_pose(euler_pose)

    # 6
    rot_vec_pose_again = ivy_mech.quaternion_pose_to_rot_vec_pose(quat_pose)

    # Position #
    # ---------#

    # conversions of positional representation

    # 3
    cartesian_coord = ivy.random_uniform(0., 1., (3, ))

    # 3
    polar_coord = ivy_mech.cartesian_to_polar_coords(cartesian_coord)

    # 3
    cartesian_coord_again = ivy_mech.polar_to_cartesian_coords(polar_coord)

    # cartesian co-ordinate frame-of-reference transformations

    # 3 x 4
    trans_mat = ivy.random_uniform(0., 1., (3, 4))

    # 4
    cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        cartesian_coord)

    # 3
    trans_cartesian_coord = ivy.matmul(
        trans_mat, ivy.expand_dims(cartesian_coord_homo, -1))[:, 0]

    # 4
    trans_cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        trans_cartesian_coord)

    # 4 x 4
    trans_mat_homo = ivy_mech.make_transformation_homogeneous(trans_mat)

    # 3 x 4
    inv_trans_mat = ivy.inv(trans_mat_homo)[0:3]

    # 3
    cartesian_coord_again = ivy.matmul(
        inv_trans_mat, ivy.expand_dims(trans_cartesian_coord_homo, -1))[:, 0]

    # message
    print('End of Run Through Demo!')
예제 #12
0
def depth_from_flow_and_cam_mats(flow,
                                 full_mats,
                                 inv_full_mats=None,
                                 camera_centers=None,
                                 uniform_pixel_coords=None,
                                 triangulation_method='cmp',
                                 batch_shape=None,
                                 image_dims=None,
                                 dev_str=None):
    """
    Compute depth map :math:`\mathbf{X}\in\mathbb{R}^{h×w×1}` in frame 1 using optical flow
    :math:`\mathbf{U}_{1→2}\in\mathbb{R}^{h×w×2}` from frame 1 to 2, and the camera geometry.\n

    :param flow: Optical flow from frame 1 to 2 *[batch_shape,h,w,2]*
    :type flow: array
    :param full_mats: Full projection matrices *[batch_shape,2,3,4]*
    :type full_mats: array
    :param inv_full_mats: Inverse full projection matrices, inferred from full_mats if None and 'cmp' triangulation method *[batch_shape,2,3,4]*
    :type inv_full_mats: array, optional
    :param camera_centers: Camera centers, inferred from inv_full_mats if None and 'cmp' triangulation method *[batch_shape,2,3,1]*
    :type camera_centers: array, optional
    :param uniform_pixel_coords: Homogeneous uniform (integer) pixel co-ordinate images, inferred from image_dims if None *[batch_shape,h,w,3]*
    :type uniform_pixel_coords: array, optional
    :param triangulation_method: Triangulation method, one of [cmp|dlt], for closest mutual points or homogeneous dlt approach, closest_mutual_points by default
    :type triangulation_method: str, optional
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs in None.
    :type image_dims: sequence of ints, optional
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
    :type dev_str: str, optional
    :return: Depth map in frame 1 *[batch_shape,h,w,1]*
    """

    if batch_shape is None:
        batch_shape = flow.shape[:-3]

    if image_dims is None:
        image_dims = flow.shape[-3:-1]

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    if dev_str is None:
        dev_str = _ivy.dev_str(flow)

    if inv_full_mats is None:
        inv_full_mats = _ivy.inv(
            _ivy_mech.make_transformation_homogeneous(full_mats,
                                                      batch_shape + [2],
                                                      dev_str))[..., 0:3, :]

    if camera_centers is None:
        camera_centers = _ivy_svg.inv_ext_mat_to_camera_center(inv_full_mats)

    if uniform_pixel_coords is None:
        uniform_pixel_coords = _ivy_svg.create_uniform_pixel_coords_image(
            image_dims, batch_shape, dev_str=dev_str)

    # BS x H x W x 3
    flow_homo = _ivy.concatenate(
        (flow, _ivy.zeros(batch_shape + image_dims + [1], dev_str=dev_str)),
        -1)

    # BS x H x W x 3
    transformed_pixel_coords = uniform_pixel_coords + flow_homo

    # BS x 2 x H x W x 3
    pixel_coords = _ivy.concatenate(
        (_ivy.expand_dims(uniform_pixel_coords, -4),
         _ivy.expand_dims(transformed_pixel_coords, -4)), -4)

    # BS x H x W x 1
    return _ivy_tvg.triangulate_depth(pixel_coords, full_mats, inv_full_mats,
                                      camera_centers, triangulation_method,
                                      batch_shape, image_dims)[..., -1:]
예제 #13
0
 def __init__(self, pyrep_handle, cam):
     self._handle = pyrep_handle
     self._inv_ext_mat_homo = ivy_mech.make_transformation_homogeneous(
         ivy.reshape(ivy.array(self._handle.get_matrix()), (3, 4)))
     self.cam = cam
예제 #14
0
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # initialize scene
        if self.with_pyrep:
            self._spherical_vision_sensor.remove()
            for i in range(0, 3):
                [item.set_color([0.2, 0.2, 0.8]) for item in self._vision_sensor_rays[i]]
            for i in range(4, 6):
                self._vision_sensors[i].remove()
                self._vision_sensor_bodies[i].remove()
                [item.remove() for item in self._vision_sensor_rays[i]]
            self._plant.remove()
            self._dining_chair_0.remove()
            self._dining_chair_1.remove()
            self._dining_table.set_position(np.array([0.3, 0, 0.55]))
            self._dining_table.set_orientation(np.array([np.pi / 2, 0., 0.]))
            self._swivel_chair.set_position(np.array([0.33, 0.98, 0.46]))
            self._swivel_chair.set_orientation(np.array([0., 0., np.pi]))
            self._vision_sensor_0.set_perspective_angle(60)
            self._vision_sensor_0.set_resolution([1024, 1024])
            self._vision_sensor_body_0.set_position(np.array([1.35, -0.05, 1.95]))
            self._vision_sensor_body_0.set_orientation(np.array([-145.81*np.pi/180, -27.763*np.pi/180, 136.5*np.pi/180]))
            self._vision_sensor_1.set_perspective_angle(60)
            self._vision_sensor_1.set_resolution([1024, 1024])
            self._vision_sensor_body_1.set_position(np.array([1.65, -2.075, 0.875]))
            self._vision_sensor_body_1.set_orientation(np.array([-91.181*np.pi/180, -30.478*np.pi/180, -171.39*np.pi/180]))
            self._vision_sensor_2.set_perspective_angle(60)
            self._vision_sensor_2.set_resolution([1024, 1024])
            self._vision_sensor_body_2.set_position(np.array([-0.94, -1.71, 0.994]))
            self._vision_sensor_body_2.set_orientation(np.array([-116.22*np.pi/180, 39.028*np.pi/180, -138.88*np.pi/180]))

            self._vision_sensor_3.set_perspective_angle(60)
            self._vision_sensor_3.set_resolution([512, 512])
            self._vision_sensor_body_3.set_position(np.array([0.35, -2.05, 0.68]))
            self._vision_sensor_body_3.set_orientation(np.array([-90.248*np.pi/180, -1.2555*np.pi/180, -179.88*np.pi/180]))

            self._default_camera.set_position(np.array([2.4732, -3.2641, 2.9269]))
            self._default_camera.set_orientation(np.array([i*np.pi/180 for i in [-134.8, -33.52, 151.26]]))

            # public objects
            self.cams = [SimCam(cam) for cam in self._vision_sensors[0:3]]
            self.target_cam = SimCam(self._vision_sensor_3)

            # wait for user input
            self._user_prompt(
                '\nInitialized scene with 3 acquiring projective cameras (blue rays) and 1 target projective camera (green rays) '
                'facing the overturned table.\n\n'
                'The simulator visualizer can be translated and rotated by clicking either the left mouse button or the wheel, '
                'and then dragging the mouse.\n'
                'Scrolling the mouse wheel zooms the view in and out.\n\n'
                'You can click on the object "vision_senor_3" in the left hand panel, '
                'then select the box icon with four arrows in the top panel of the simulator, '
                'and then drag the target camera around dynamically.\n'
                'Starting to drag and then holding ctrl allows you to also drag the cameras up and down.\n'
                'Clicking the top icon with a box and two rotating arrows similarly allows rotation of the camera.\n\n'
                'This demo enables you to capture images from the cameras 10 times, '
                'and render the associated images in the target frame.\n\n'
                '\nPress enter to use method ivy_vision.render_pixel_coords and show the first renderings for the target frame, '
                'produced by projecting the depth and color values from the 3 acquiring frames.'
                '\nRenderings are shown both with and without the use of a depth buffer in the ivy_vision method.\n'
                'If the image window appears blank at first, maximize it to show the renderings.')

        else:

            cam_names = ['vs{}'.format(i) for i in range(3)] + ['tvs']
            pp_offsets = ivy.array([item/2 - 0.5 for item in [1024, 1024]])
            persp_angles = ivy.array([60 * np.pi/180]*2)
            intrinsics = ivy_vision.persp_angles_and_pp_offsets_to_intrinsics_object(
                persp_angles, pp_offsets, [1024, 1024])
            calib_mat = intrinsics.calib_mats
            inv_calib_mat = intrinsics.inv_calib_mats

            pp_offsets = ivy.array([item/2 - 0.5 for item in [512, 512]])
            persp_angles = ivy.array([60 * np.pi/180]*2)
            target_intrinsics = ivy_vision.persp_angles_and_pp_offsets_to_intrinsics_object(
                persp_angles, pp_offsets, [512, 512])
            target_calib_mat = target_intrinsics.calib_mats
            target_inv_calib_mat = target_intrinsics.inv_calib_mats

            cam_positions = [ivy.array([1.35, -0.05, 1.95]),
                             ivy.array([1.65, -2.075, 0.875]),
                             ivy.array([-0.94, -1.71, 0.994]),
                             ivy.array([0.35, -2.05, 0.68])]

            cam_quaternions = [ivy.array([-0.40934521,  0.83571182,  0.35003018, -0.10724328]),
                               ivy.array([-0.13167774,  0.7011009,  0.65917628, -0.23791856]),
                               ivy.array([0.44628197, 0.68734518, 0.56583211, 0.09068139]),
                               ivy.array([-0.00698829,  0.70860066,  0.70552395, -0.00850271])]

            cam_quat_poses = [ivy.concatenate((pos, eul), -1) for pos, eul in zip(cam_positions, cam_quaternions)]
            cam_inv_ext_mats = [ivy_mech.quaternion_pose_to_mat_pose(qp) for qp in cam_quat_poses]
            cam_ext_mats = [ivy.inv(ivy_mech.make_transformation_homogeneous(iem))[..., 0:3, :]
                            for iem in cam_inv_ext_mats]
            self.cams = [DummyCam(calib_mat, inv_calib_mat, em, iem, n)
                         for em, iem, n in zip(cam_ext_mats, cam_inv_ext_mats[:-1], cam_names[:-1])]
            self.target_cam = DummyCam(target_calib_mat, target_inv_calib_mat, cam_ext_mats[-1], cam_inv_ext_mats[-1],
                                       cam_names[-1])

            # message
            print('\nInitialized dummy scene with 3 acquiring projective cameras and 1 target projective camera '
                  'facing the overturned table.'
                  '\nClose the visualization window to use method ivy_vision.render_pixel_coords and show renderings for the target frame, '
                  'produced by projecting the depth and color values from the 3 acquiring frames.'
                  '\nRenderings are shown both with and without the use of a depth buffer in the ivy_vision method.\n')

            # plot scene before rotation
            if interactive:
                import matplotlib.pyplot as plt
                import matplotlib.image as mpimg
                plt.imshow(mpimg.imread(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                     'ri_no_sim', 'before_capture.png')))
                plt.show()
예제 #15
0
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # initialize scene
        if self.with_pyrep:
            for i in range(6):
                [item.remove() for item in self._vision_sensor_rays[i]]
            self._spherical_vision_sensor.remove()
            self._default_camera.set_position(
                np.array([-2.3518, 4.3953, 2.8949]))
            self._default_camera.set_orientation(
                np.array([i * np.pi / 180 for i in [112.90, 27.329, -10.978]]))
            inv_ext_mat = ivy.reshape(
                ivy.array(self._default_vision_sensor.get_matrix(), 'float32'),
                (3, 4))
            self.default_camera_ext_mat_homo = ivy.inv(
                ivy_mech.make_transformation_homogeneous(inv_ext_mat))

            # public objects
            self.cams = [SimCam(cam) for cam in self._vision_sensors]

            # wait for user input
            self._user_prompt(
                '\nInitialized scene with 6 projective cameras in the centre.\n\n'
                'You can click on the dummy object "vision_senors" in the left hand panel, '
                'then select the box icon with four arrows in the top panel of the simulator, '
                'and then drag the cameras around dynamically.\n'
                'Starting to drag and then holding ctrl allows you to also drag the cameras up and down. \n\n'
                'This demo enables you to capture images from the cameras 10 times, '
                'and render the associated 10 voxel grids in an open3D visualizer.\n\n'
                'Both visualizers can be translated and rotated by clicking either the left mouse button or the wheel, '
                'and then dragging the mouse.\n'
                'Scrolling the mouse wheel zooms the view in and out.\n\n'
                'Press enter in the terminal to use method ivy_vision.coords_to_bounding_voxel_grid and show the first voxel grid '
                'reconstruction of the scene, produced using the 6 depth and color images from the projective cameras.\n'
            )
        else:

            cam_names = ['vs{}'.format(i) for i in range(6)]
            pp_offsets = ivy.array([item / 2 - 0.5 for item in [128, 128]])
            persp_angles = ivy.array([90 * np.pi / 180] * 2)
            intrinsics = ivy_vision.persp_angles_and_pp_offsets_to_intrinsics_object(
                persp_angles, pp_offsets, [128, 128])
            inv_calib_mat = intrinsics.inv_calib_mats
            cam_positions = [ivy.array([0., 0., 1.5]) for _ in range(6)]
            cam_quaternions = [
                ivy.array([-0.5, 0.5, 0.5, -0.5]),
                ivy.array([0.707, 0, 0, 0.707]),
                ivy.array([1., 0., 0., 0.]),
                ivy.array([0.5, 0.5, 0.5, 0.5]),
                ivy.array([0, 0.707, 0.707, 0]),
                ivy.array([0., 0., 0., 1.])
            ]
            cam_quat_poses = [
                ivy.concatenate((pos, eul), -1)
                for pos, eul in zip(cam_positions, cam_quaternions)
            ]
            cam_inv_ext_mats = [
                ivy_mech.quaternion_pose_to_mat_pose(qp)
                for qp in cam_quat_poses
            ]
            self.cams = [
                DummyCam(inv_calib_mat, iem, n)
                for iem, n in zip(cam_inv_ext_mats, cam_names)
            ]
            self.default_camera_ext_mat_homo = ivy.array(
                [[-0.872, -0.489, 0., 0.099], [-0.169, 0.301, -0.938, 0.994],
                 [0.459, -0.818, -0.346, 5.677], [0., 0., 0., 1.]])

            # message
            print(
                '\nInitialized dummy scene with 6 projective cameras in the centre.'
                '\nClose the visualization window to use method ivy_vision.coords_to_bounding_voxel_grid and show a voxel grid '
                'reconstruction of the scene, produced using the 6 depth and color images from the projective cameras.\n'
            )

            # plot scene before rotation
            if interactive:
                plt.imshow(
                    mpimg.imread(
                        os.path.join(
                            os.path.dirname(os.path.realpath(__file__)),
                            'ctvg_no_sim', 'before_capture.png')))
                plt.show()