Example #1
0
def transform(coords, trans, batch_shape=None, image_dims=None):
    """
    Transform image of :math:`n`-dimensional co-ordinates :math:`\mathbf{x}\in\mathbb{R}^{h×w×n}` by
    transformation matrix :math:`\mathbf{f}\in\mathbb{R}^{m×n}`, to produce image of transformed co-ordinates
    :math:`\mathbf{x}_{trans}\in\mathbb{R}^{h×w×m}`.\n
    `[reference] <https://en.wikipedia.org/wiki/Matrix_multiplication>`_

    :param coords: Co-ordinate image *[batch_shape,height,width,n]*
    :type coords: array
    :param trans: Transformation matrix *[batch_shape,m,n]*
    :type trans: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs if None.
    :type image_dims: sequence of ints
    :return: Transformed co-ordinate image *[batch_shape,height,width,m]*
    """

    if batch_shape is None:
        batch_shape = coords.shape[:-3]

    if image_dims is None:
        image_dims = coords.shape[-3:-1]

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # transpose idxs
    num_batch_dims = len(batch_shape)
    transpose_idxs = list(
        range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims]

    # BS x (HxW) x N
    coords_flattened = _ivy.reshape(
        coords, batch_shape + [image_dims[0] * image_dims[1], -1])

    # BS x N x (HxW)
    coords_reshaped = _ivy.transpose(coords_flattened, transpose_idxs)

    # BS x M x (HxW)
    transformed_coords_vector = _ivy.matmul(trans, coords_reshaped)

    # BS x (HxW) x M
    transformed_coords_vector_transposed = _ivy.transpose(
        transformed_coords_vector, transpose_idxs)

    # BS x H x W x M
    return _ivy.reshape(transformed_coords_vector_transposed,
                        batch_shape + image_dims + [-1])
Example #2
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    pix_per_deg = 2
    om_pix = sim.get_pix_coords()
    plr_degs = om_pix / pix_per_deg
    plr_rads = plr_degs * math.pi / 180
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        depth, rgb = sim.omcam.cap()
        plr = ivy.concatenate([plr_rads, depth], -1)
        xyz_wrt_cam = ivy_mech.polar_to_cartesian_coords(plr)
        xyz_wrt_cam = ivy.reshape(xyz_wrt_cam, (-1, 3))
        xyz_wrt_cam_homo = ivy_mech.make_coordinates_homogeneous(xyz_wrt_cam)
        inv_ext_mat_trans = ivy.transpose(sim.omcam.get_inv_ext_mat(), (1, 0))
        xyz_wrt_world = ivy.matmul(xyz_wrt_cam_homo, inv_ext_mat_trans)[..., 0:3]
        with ivy.numpy.use:
            omni_cam_inv_ext_mat = ivy_mech.make_transformation_homogeneous(
                ivy.to_numpy(sim.omcam.get_inv_ext_mat()))
        vis.show_point_cloud(xyz_wrt_world, rgb, interactive,
                             sphere_inv_ext_mats=[omni_cam_inv_ext_mat], sphere_radii=[0.025])
        if not interactive:
            sim.omcam.set_pos(sim.omcam.get_pos()
                               + ivy.array([-0.01, 0.01, 0.]))
    sim.close()
    unset_framework()
Example #3
0
def projection_matrix_pseudo_inverse(proj_mat, batch_shape=None):
    """
    Given projection matrix :math:`\mathbf{P}\in\mathbb{R}^{3×4}`, compute it's pseudo-inverse
    :math:`\mathbf{P}^+\in\mathbb{R}^{4×3}`.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf?_ijt=25ihpil89dmfo4da975v402ogc#page=179>`_
    bottom of page 161, section 6.2.2
    
    :param proj_mat: Projection matrix *[batch_shape,3,4]*
    :type proj_mat: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :return: Projection matrix pseudo-inverse *[batch_shape,4,3]*
    """

    if batch_shape is None:
        batch_shape = proj_mat.shape[:-2]

    # shapes as list
    batch_shape = list(batch_shape)

    # transpose idxs
    num_batch_dims = len(batch_shape)
    transpose_idxs = list(
        range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims]

    # BS x 4 x 3
    matrix_transposed = _ivy.transpose(proj_mat, transpose_idxs)

    # BS x 4 x 3
    return _ivy.matmul(matrix_transposed,
                       _ivy.inv(_ivy.matmul(proj_mat, matrix_transposed)))
Example #4
0
def _fit_spline(train_points, train_values, order):

    # shapes
    train_points_shape = train_points.shape
    batch_shape = list(train_points_shape[:-2])
    num_batch_dims = len(batch_shape)
    n = train_points_shape[-2]
    pd = train_values.shape[-1]

    # BS x N x 1
    c = train_points

    # BS x N x PD
    f_ = train_values

    # BS x N x N
    matrix_a = _phi(_pairwise_distance(c, c), order)

    # BS x N x 1
    ones = _ivy.ones_like(c[..., :1])

    # BS x N x 2
    matrix_b = _ivy.concatenate([c, ones], -1)

    # BS x 2 x N
    matrix_b_trans = _ivy.transpose(
        matrix_b,
        list(range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims])

    # BS x N+2 x N
    left_block = _ivy.concatenate([matrix_a, matrix_b_trans], -2)

    # BS x 2 x 2
    lhs_zeros = _ivy.zeros(batch_shape + [2, 2])

    # BS x N+2 x 2
    right_block = _ivy.concatenate([matrix_b, lhs_zeros], -2)

    # BS x N+2 x N+2
    lhs = _ivy.concatenate([left_block, right_block], -1)

    # BS x 2 x PD
    rhs_zeros = _ivy.zeros(batch_shape + [2, pd])

    # BS x N+2 x PD
    rhs = _ivy.concatenate([f_, rhs_zeros], -2)

    # BS x N+2 x PD
    w_v = _ivy.matmul(_ivy.pinv(lhs), rhs)

    # BS x N x PD
    w = w_v[..., :n, :]

    # BS x 2 x PD
    v = w_v[..., n:, :]

    # BS x N x PD,    BS x 2 x PD
    return w, v
Example #5
0
def create_uniform_pixel_coords_image(image_dims, batch_shape=None, normalized=False, dev_str=None):
    """
    Create image of homogeneous integer :math:`xy` pixel co-ordinates :math:`\mathbf{X}\in\mathbb{Z}^{h×w×3}`, stored
    as floating point values. The origin is at the top-left corner of the image, with :math:`+x` rightwards, and
    :math:`+y` downwards. The final homogeneous dimension are all ones. In subsequent use of this image, the depth of
    each pixel can be represented using this same homogeneous representation, by simply scaling each 3-vector by the
    depth value. The final dimension therefore always holds the depth value, while the former two dimensions hold depth
    scaled pixel co-ordinates.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=172>`_
    deduction from top of page 154, section 6.1, equation 6.1

    :param image_dims: Image dimensions.
    :type image_dims: sequence of ints.
    :param batch_shape: Shape of batch. Assumed no batch dimensions if None.
    :type batch_shape: sequence of ints, optional
    :param normalized: Whether to normalize x-y pixel co-ordinates to the range 0-1.
    :type normalized: bool
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
    :type dev_str: str, optional
    :return: Image of homogeneous pixel co-ordinates *[batch_shape,height,width,3]*
    """

    # shapes as lists
    batch_shape = [] if batch_shape is None else batch_shape
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # other shape specs
    num_batch_dims = len(batch_shape)
    flat_shape = [1] * num_batch_dims + image_dims + [3]
    tile_shape = batch_shape + [1] * 3

    # H x W x 1
    pixel_x_coords = _ivy.cast(_ivy.reshape(_ivy.tile(_ivy.arange(image_dims[1], dev_str=dev_str), [image_dims[0]]),
                                            (image_dims[0], image_dims[1], 1)), 'float32')
    if normalized:
        pixel_x_coords = pixel_x_coords / (float(image_dims[1]) + MIN_DENOMINATOR)

    # W x H x 1
    pixel_y_coords_ = _ivy.cast(_ivy.reshape(_ivy.tile(_ivy.arange(image_dims[0], dev_str=dev_str), [image_dims[1]]),
                                             (image_dims[1], image_dims[0], 1)), 'float32')

    # H x W x 1
    pixel_y_coords = _ivy.transpose(pixel_y_coords_, (1, 0, 2))
    if normalized:
        pixel_y_coords = pixel_y_coords / (float(image_dims[0]) + MIN_DENOMINATOR)

    # H x W x 1
    ones = _ivy.ones_like(pixel_x_coords, dev_str=dev_str)

    # BS x H x W x 3
    return _ivy.tile(_ivy.reshape(_ivy.concatenate((pixel_x_coords, pixel_y_coords, ones), -1),
                                  flat_shape), tile_shape)
def main(interactive=True, try_use_sim=True, f=None):

    # config
    this_dir = os.path.dirname(os.path.realpath(__file__))
    f = choose_random_framework(excluded=['numpy']) if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    lr = 0.5
    num_anchors = 3
    num_sample_points = 100

    # spline start
    anchor_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, 2 + num_anchors), -1), 'float32')
    query_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, num_sample_points), -1), 'float32')

    # learnable parameters
    robot_start_config = ivy.array(ivy.cast(sim.robot_start_config, 'float32'))
    robot_target_config = ivy.array(
        ivy.cast(sim.robot_target_config, 'float32'))
    learnable_anchor_vals = ivy.variable(
        ivy.cast(
            ivy.transpose(
                ivy.linspace(robot_start_config, robot_target_config,
                             2 + num_anchors)[..., 1:-1], (1, 0)), 'float32'))

    # optimizer
    optimizer = ivy.SGD(lr=lr)

    # optimize
    it = 0
    colliding = True
    clearance = 0
    joint_query_vals = None
    while colliding:
        total_cost, grads, joint_query_vals, link_positions, sdf_vals = ivy.execute_with_gradients(
            lambda xs: compute_cost_and_sdfs(xs[
                'w'], anchor_points, robot_start_config, robot_target_config,
                                             query_points, sim),
            Container({'w': learnable_anchor_vals}))
        colliding = ivy.reduce_min(sdf_vals[2:]) < clearance
        sim.update_path_visualization(
            link_positions, sdf_vals,
            os.path.join(this_dir, 'msp_no_sim', 'path_{}.png'.format(it)))
        learnable_anchor_vals = optimizer.step(
            Container({'w': learnable_anchor_vals}), grads)['w']
        it += 1
    sim.execute_motion(joint_query_vals)
    sim.close()
    unset_framework()
def compute_cost_and_sdfs(learnable_anchor_vals, anchor_points,
                          start_anchor_val, end_anchor_val, query_points, sim):
    anchor_vals = ivy.concatenate(
        (ivy.expand_dims(start_anchor_val, 0), learnable_anchor_vals,
         ivy.expand_dims(end_anchor_val, 0)), 0)
    joint_angles = ivy_robot.sample_spline_path(anchor_points, anchor_vals,
                                                query_points)
    link_positions = ivy.transpose(
        sim.ivy_manipulator.sample_links(joint_angles), (1, 0, 2))
    length_cost = compute_length(link_positions)
    sdf_vals = sim.sdf(ivy.reshape(link_positions, (-1, 3)))
    coll_cost = -ivy.reduce_mean(sdf_vals)
    total_cost = length_cost + coll_cost * 10
    return total_cost[0], joint_angles, link_positions, ivy.reshape(
        sdf_vals, (-1, 100, 1))
Example #8
0
def compute_cost_and_sdfs(learnable_anchor_vals, anchor_points,
                          start_anchor_val, end_anchor_val, query_points, sim):
    anchor_vals = ivy.concatenate(
        (ivy.expand_dims(start_anchor_val, 0), learnable_anchor_vals,
         ivy.expand_dims(end_anchor_val, 0)), 0)
    poses = ivy_robot.sample_spline_path(anchor_points, anchor_vals,
                                         query_points)
    inv_ext_mat_query_vals = ivy_mech.rot_vec_pose_to_mat_pose(poses)
    body_positions = ivy.transpose(
        sim.ivy_drone.sample_body(inv_ext_mat_query_vals), (1, 0, 2))
    length_cost = compute_length(body_positions)
    sdf_vals = sim.sdf(ivy.reshape(body_positions, (-1, 3)))
    coll_cost = -ivy.reduce_mean(sdf_vals)
    total_cost = length_cost + coll_cost * 10
    return total_cost[0], poses, body_positions, ivy.reshape(
        sdf_vals, (-1, 100, 1))
Example #9
0
def erosion(tensor: ivy.Array, kernel: ivy.Array) -> ivy.Array:
    r"""Returns the eroded image applying the same kernel in each channel.

    The kernel must have 2 dimensions, each one defined by an odd number.

    Args:
       tensor (ivy.Array): Image with shape :math:`(B, C, H, W)`.
       kernel (ivy.Array): Structuring element with shape :math:`(H, W)`.

    Returns:
       ivy.Array: Eroded image with shape :math:`(B, C, H, W)`.

    Example:
        >>> tensor = ivy.random_uniform(shape=(1, 3, 5, 5))
        >>> kernel = ivy.ones((5, 5))
        >>> output = erosion(tensor, kernel)
    """
    if ivy.backend == 'torch' and not isinstance(tensor, ivy.Array):
        raise TypeError("Input type is not an ivy.Array. Got {}".format(type(tensor)))

    if len(tensor.shape) != 4:
        raise ValueError("Input size must have 4 dimensions. Got {}".format(
            ivy.get_num_dims(tensor)))

    if ivy.backend == 'torch' and not isinstance(kernel, ivy.Array):
        raise TypeError("Kernel type is not a ivy.Array. Got {}".format(type(kernel)))

    if len(kernel.shape) != 2:
        raise ValueError("Kernel size must have 2 dimensions. Got {}".format(
            ivy.get_num_dims(kernel)))

    # prepare kernel
    se_e: ivy.Array = kernel - 1.
    kernel_e: ivy.Array = ivy.transpose(_se_to_mask(se_e), (2, 3, 1, 0))

    # pad
    se_h, se_w = kernel.shape
    pad_e: List[List[int]] = [[0]*2, [0]*2, [se_h // 2, se_w // 2], [se_h // 2, se_w // 2]]

    output: ivy.Array = ivy.reshape(tensor,
                                     (tensor.shape[0] * tensor.shape[1], 1, tensor.shape[2], tensor.shape[3]))
    output = ivy.constant_pad(output, pad_e, value=1.)
    output = ivy.reduce_min(ivy.conv2d(output, kernel_e, 1, 'VALID', data_format='NCHW') -
                            ivy.reshape(se_e, (1, -1, 1, 1)), [1])
    return ivy.reshape(output, tensor.shape)
Example #10
0
def dilation(tensor: ivy.Array, kernel: ivy.Array) -> ivy.Array:
    r"""Returns the dilated image applying the same kernel in each channel.

    The kernel must have 2 dimensions, each one defined by an odd number.

    Args:
       tensor (ivy.Array): Image with shape :math:`(B, C, H, W)`.
       kernel (ivy.Array): Structuring element with shape :math:`(H, W)`.

    Returns:
       ivy.Array: Dilated image with shape :math:`(B, C, H, W)`.

    Example:
        >>> tensor = ivy.random_uniform(shape=(1, 3, 5, 5))
        >>> kernel = ivy.ones((3, 3))
        >>> dilated_img = dilation(tensor, kernel)
    """
    if ivy.backend == 'torch' and not isinstance(tensor, ivy.Array):
        raise TypeError("Input type is not an ivy.Array. Got {}".format(type(tensor)))

    if len(tensor.shape) != 4:
        raise ValueError("Input size must have 4 dimensions. Got {}".format(
            ivy.get_num_dims(tensor)))

    if ivy.backend == 'torch' and not isinstance(kernel, ivy.Array):
        raise TypeError("Kernel type is not a ivy.Array. Got {}".format(type(kernel)))

    if len(kernel.shape) != 2:
        raise ValueError("Kernel size must have 2 dimensions. Got {}".format(
            ivy.get_num_dims(kernel)))

    # prepare kernel
    se_d: ivy.Array = kernel - 1.
    kernel_d: ivy.Array = ivy.transpose(_se_to_mask(se_d), (2, 3, 1, 0))

    # pad
    se_h, se_w = kernel.shape

    output: ivy.Array = ivy.reshape(tensor,
                                     (tensor.shape[0] * tensor.shape[1], 1, tensor.shape[2], tensor.shape[3]))
    output = ivy.reduce_max(ivy.conv2d(output, kernel_d, 1, 'SAME', data_format='NCHW') +
                            ivy.reshape(se_d, (1, -1, 1, 1)), [1])
    return ivy.reshape(output, tensor.shape)
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # initialize scene
        if self.with_pyrep:
            self._spherical_vision_sensor.remove()
            for i in range(6):
                self._vision_sensors[i].remove()
                self._vision_sensor_bodies[i].remove()
                [ray.remove() for ray in self._vision_sensor_rays[i]]
            self._box.set_position(np.array([0.55, 0, 0.9]))
            self._robot.set_position(np.array([0.85003, -0.024983, 0.77837]))
            self._robot._ik_target.set_position(np.array([0, 0, -1]))
            self._robot.get_tip().set_parent(self._robot._ik_target)
            self._robot.get_tip().set_position(np.array([0, 0, -1]))
            robot_start_config = ivy.array(
                [100., 100., 240., 180., 180., 120.]) * np.pi / 180
            [
                j.set_joint_position(p, False)
                for j, p in zip(self._robot.joints,
                                ivy.to_numpy(robot_start_config).tolist())
            ]
            robot_target_config = ivy.array([260., 100., 220., 0., 180., 45.
                                             ]) * np.pi / 180
            self._robot_target.set_position(
                np.array([0.85003, -0.024983, 0.77837]))
            self._robot_target._ik_target.set_position(np.array([0, 0, -1]))
            self._robot_target.get_tip().set_parent(
                self._robot_target._ik_target)
            self._robot_target.get_tip().set_position(np.array([0, 0, -1]))
            [
                j.set_joint_position(p, False)
                for j, p in zip(self._robot_target.joints,
                                ivy.to_numpy(robot_target_config).tolist())
            ]
            self._default_camera.set_position(
                np.array([0.094016, -1.2767, 1.7308]))
            self._default_camera.set_orientation(
                np.array([i * np.pi / 180
                          for i in [-121.32, 27.760, -164.18]]))

            input(
                '\nScene initialized.\n\n'
                'The simulator visualizer can be translated and rotated by clicking either the left mouse button or the wheel, '
                'and then dragging the mouse.\n'
                'Scrolling the mouse wheel zooms the view in and out.\n\n'
                'You can click on any object either in the scene or the left hand panel, '
                'then select the box icon with four arrows in the top panel of the simulator, '
                'and then drag the object around dynamically.\n'
                'Starting to drag and then holding ctrl allows you to also drag the object up and down.\n'
                'Clicking the top icon with a box and two rotating arrows similarly allows rotation of the object.\n\n'
                'The joint angles of either the robot or target robot configuration can also be changed.\n'
                'To do this, Open the Mico or MicoTarget drop-downs on the left, and click on one of the joints "Mico_jointx", '
                'and then click on the magnifying glass over a box on the left-most panel.\n'
                'In the window that opens, change the value in the field Position [deg], and close the window again.\n\n'
                'Once you have aranged the scene as desired, press enter in the terminal to continue with the demo...\n'
            )

            # primitive scene
            self.setup_primitive_scene()

            # robot configs
            robot_start_config = ivy.array(self._robot.get_joint_positions(),
                                           'float32')
            robot_target_config = ivy.array(
                self._robot_target.get_joint_positions(), 'float32')

            # ivy robot
            self._ivy_manipulator = MicoManipulator(
                ivy_mech.make_transformation_homogeneous(
                    ivy.reshape(ivy.array(self._robot_base.get_matrix()),
                                (3, 4))))

            # spline path
            interpolated_joint_path = ivy.transpose(
                ivy.linspace(robot_start_config, robot_target_config, 100),
                (1, 0))
            multi_spline_points = ivy.transpose(
                self._ivy_manipulator.sample_links(interpolated_joint_path),
                (1, 0, 2))
            multi_spline_sdf_vals = ivy.reshape(
                self.sdf(ivy.reshape(multi_spline_points, (-1, 3))),
                (-1, 100, 1))
            self.update_path_visualization(multi_spline_points,
                                           multi_spline_sdf_vals, None)

            # public objects
            self.ivy_manipulator = self._ivy_manipulator
            self.robot_start_config = robot_start_config
            self.robot_target_config = robot_target_config

            # wait for user input
            self._user_prompt(
                '\nInitialized scene with a robot and a target robot configuration to reach.'
                '\nPress enter in the terminal to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n'
            )

        else:

            # primitive scene
            self.setup_primitive_scene_no_sim(box_pos=np.array([0.55, 0, 0.9]))

            # ivy robot
            base_inv_ext_mat = ivy.array([[1, 0, 0, 0.84999895],
                                          [0, 1, 0, -0.02500308],
                                          [0, 0, 1, 0.70000124]])
            self.ivy_manipulator = MicoManipulator(
                ivy_mech.make_transformation_homogeneous(base_inv_ext_mat))
            self.robot_start_config = ivy.array(
                [100., 100., 240., 180., 180., 120.]) * np.pi / 180
            self.robot_target_config = ivy.array(
                [260., 100., 220., 0., 180., 45.]) * np.pi / 180

            # message
            print(
                '\nInitialized dummy scene with a robot and a target robot configuration to reach.'
                '\nClose the visualization window to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n'
            )

            # plot scene before rotation
            if interactive:
                plt.imshow(
                    mpimg.imread(
                        os.path.join(
                            os.path.dirname(os.path.realpath(__file__)),
                            'msp_no_sim', 'path_0.png')))
                plt.show()

        # message
        print('\nOptimizing spline path...\n')
Example #12
0
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # ivy robot
        rel_body_points = ivy.array([[0., 0., 0.], [-0.15, 0., -0.15],
                                     [-0.15, 0., 0.15], [0.15, 0., -0.15],
                                     [0.15, 0., 0.15]])
        self.ivy_drone = RigidMobile(rel_body_points)

        # initialize scene
        if self.with_pyrep:
            self._spherical_vision_sensor.remove()
            for i in range(6):
                self._vision_sensors[i].remove()
                self._vision_sensor_bodies[i].remove()
                [ray.remove() for ray in self._vision_sensor_rays[i]]
            drone_start_pos = np.array([-1.15, -1.028, 0.6])
            target_pos = np.array([1.025, 1.125, 0.6])
            self._drone.set_position(drone_start_pos)
            self._drone.set_orientation(
                np.array([-90, 50, -180]) * np.pi / 180)
            self._target.set_position(target_pos)
            self._target.set_orientation(
                np.array([-90, 50, -180]) * np.pi / 180)
            self._default_camera.set_position(
                np.array([-3.2835, -0.88753, 1.3773]))
            self._default_camera.set_orientation(
                np.array([-151.07, 70.079, -120.45]) * np.pi / 180)

            input(
                '\nScene initialized.\n\n'
                'The simulator visualizer can be translated and rotated by clicking either the left mouse button or the wheel, '
                'and then dragging the mouse.\n'
                'Scrolling the mouse wheel zooms the view in and out.\n\n'
                'You can click on any object either in the scene or the left hand panel, '
                'then select the box icon with four arrows in the top panel of the simulator, '
                'and then drag the object around dynamically.\n'
                'Starting to drag and then holding ctrl allows you to also drag the object up and down.\n'
                'Clicking the top icon with a box and two rotating arrows similarly allows rotation of the object.\n\n'
                'Once you have aranged the scene as desired, press enter in the terminal to continue with the demo...\n'
            )

            # primitive scene
            self.setup_primitive_scene()

            # public objects
            drone_starting_inv_ext_mat = ivy.array(
                np.reshape(self._drone.get_matrix(), (3, 4)), 'float32')
            drone_start_rot_vec_pose = ivy_mech.mat_pose_to_rot_vec_pose(
                drone_starting_inv_ext_mat)
            self.drone_start_pose = drone_start_rot_vec_pose
            target_inv_ext_mat = ivy.array(
                np.reshape(self._target.get_matrix(), (3, 4)), 'float32')
            target_rot_vec_pose = ivy_mech.mat_pose_to_rot_vec_pose(
                target_inv_ext_mat)
            self.drone_target_pose = target_rot_vec_pose

            # spline path
            drone_start_to_target_poses = ivy.transpose(
                ivy.linspace(self.drone_start_pose, self.drone_target_pose,
                             100), (1, 0))
            drone_start_to_target_inv_ext_mats = ivy_mech.rot_vec_pose_to_mat_pose(
                drone_start_to_target_poses)
            drone_start_to_target_positions =\
                ivy.transpose(self.ivy_drone.sample_body(drone_start_to_target_inv_ext_mats), (1, 0, 2))
            initil_sdf_vals = ivy.reshape(
                self.sdf(
                    ivy.reshape(
                        ivy.cast(drone_start_to_target_positions, 'float32'),
                        (-1, 3))), (-1, 100, 1))
            self.update_path_visualization(drone_start_to_target_positions,
                                           initil_sdf_vals, None)

            # wait for user input
            self._user_prompt(
                '\nInitialized scene with a drone and a target position to reach.'
                '\nPress enter in the terminal to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target whilst avoiding the objects in the scene...\n'
            )

        else:

            # primitive scene
            self.setup_primitive_scene_no_sim()

            # public objects
            self.drone_start_pose = ivy.array(
                [-1.1500, -1.0280, 0.6000, 0.7937, 1.7021, 1.7021])
            self.drone_target_pose = ivy.array(
                [1.0250, 1.1250, 0.6000, 0.7937, 1.7021, 1.7021])

            # message
            print(
                '\nInitialized dummy scene with a drone and a target position to reach.'
                '\nClose the visualization window to use method ivy_robot.interpolate_spline_points '
                'to plan a spline path which reaches the target whilst avoiding the objects in the scene...\n'
            )

            # plot scene before rotation
            if interactive:
                plt.imshow(
                    mpimg.imread(
                        os.path.join(
                            os.path.dirname(os.path.realpath(__file__)),
                            'dsp_no_sim', 'path_0.png')))
                plt.show()

        print('\nOptimizing spline path...\n')
Example #13
0
def cuboid_signed_distances(cuboid_ext_mats,
                            cuboid_dims,
                            query_positions,
                            batch_shape=None):
    """
    Return the signed distances of a set of query points from the cuboid surfaces.\n
    `[reference] <https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm>`_

    :param cuboid_ext_mats: Extrinsic matrices of the cuboids *[batch_shape,num_cuboids,3,4]*
    :type cuboid_ext_mats: array
    :param cuboid_dims: Dimensions of the cuboids, in the order x, y, z *[batch_shape,num_cuboids,3]*
    :type cuboid_dims: array
    :param query_positions: Points for which to query the signed distances *[batch_shape,num_points,3]*
    :type query_positions: array
    :param batch_shape: Shape of batch. Assumed no batches if None.
    :type batch_shape: sequence of ints, optional
    :return: The distances of the query points from the closest cuboid surface *[batch_shape,num_points,1]*
    """

    if batch_shape is None:
        batch_shape = cuboid_ext_mats.shape[:-3]

    # shapes as list
    batch_shape = list(batch_shape)
    num_batch_dims = len(batch_shape)
    batch_dims_for_trans = list(range(num_batch_dims))
    num_cuboids = cuboid_ext_mats.shape[-3]
    num_points = query_positions.shape[-2]

    # BS x 3 x NP
    query_positions_trans = _ivy.transpose(
        query_positions,
        batch_dims_for_trans + [num_batch_dims + 1, num_batch_dims])

    # BS x 1 x NP
    ones = _ivy.ones_like(query_positions_trans[..., 0:1, :])

    # BS x 4 x NP
    query_positions_trans_homo = _ivy.concatenate(
        (query_positions_trans, ones), -2)

    # BS x NCx3 x 4
    cuboid_ext_mats_flat = _ivy.reshape(cuboid_ext_mats, batch_shape + [-1, 4])

    # BS x NCx3 x NP
    rel_query_positions_trans_flat = _ivy.matmul(cuboid_ext_mats_flat,
                                                 query_positions_trans_homo)

    # BS x NC x 3 x NP
    rel_query_positions_trans = _ivy.reshape(
        rel_query_positions_trans_flat,
        batch_shape + [num_cuboids, 3, num_points])

    # BS x NC x NP x 3
    rel_query_positions = _ivy.transpose(
        rel_query_positions_trans, batch_dims_for_trans +
        [num_batch_dims, num_batch_dims + 2, num_batch_dims + 1])
    q = _ivy.abs(rel_query_positions) - _ivy.expand_dims(cuboid_dims / 2, -2)
    q_max_clipped = _ivy.maximum(q, 1e-12)

    # BS x NC x NP x 1
    q_min_clipped = _ivy.minimum(_ivy.reduce_max(q, -1, keepdims=True), 0.)
    q_max_clipped_len = _ivy.reduce_sum(q_max_clipped**2, -1,
                                        keepdims=True)**0.5
    sdfs = q_max_clipped_len + q_min_clipped

    # BS x NP x 1
    return _ivy.reduce_min(sdfs, -3)
Example #14
0
def project_cam_coords_with_object_transformations(cam_coords_1,
                                                   id_image,
                                                   obj_ids,
                                                   obj_trans,
                                                   cam_1_to_2_ext_mat,
                                                   batch_shape=None,
                                                   image_dims=None):
    """
    Compute velocity image from co-ordinate image, id image, and object transformations.

    :param cam_coords_1: Camera-centric homogeneous co-ordinates image in frame t *[batch_shape,h,w,4]*
    :type cam_coords_1: array
    :param id_image: Image containing per-pixel object ids *[batch_shape,h,w,1]*
    :type id_image: array
    :param obj_ids: Object ids *[batch_shape,num_obj,1]*
    :type obj_ids: array
    :param obj_trans: Object transformations for this frame over time *[batch_shape,num_obj,3,4]*
    :type obj_trans: array
    :param cam_1_to_2_ext_mat: Camera 1 to camera 2 extrinsic projection matrix *[batch_shape,3,4]*
    :type cam_1_to_2_ext_mat: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs in None.
    :type image_dims: sequence of ints, optional
    :return: Relative velocity image *[batch_shape,h,w,3]*
    """

    if batch_shape is None:
        batch_shape = cam_coords_1.shape[:-3]

    if image_dims is None:
        image_dims = cam_coords_1.shape[-3:-1]

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)
    num_batch_dims = len(batch_shape)

    # Transform the co-ordinate image by each transformation

    # BS x (num_obj x 3) x 4
    obj_trans = _ivy.reshape(obj_trans, batch_shape + [-1, 4])

    # BS x 4 x H x W
    cam_coords_1_ = _ivy.transpose(
        cam_coords_1,
        list(range(num_batch_dims)) + [i + num_batch_dims for i in [2, 0, 1]])

    # BS x 4 x (HxW)
    cam_coords_1_ = _ivy.reshape(cam_coords_1_, batch_shape + [4, -1])

    # BS x (num_obj x 3) x (HxW)
    cam_coords_2_all_obj_trans = _ivy.matmul(obj_trans, cam_coords_1_)

    # BS x (HxW) x (num_obj x 3)
    cam_coords_2_all_obj_trans = \
        _ivy.transpose(cam_coords_2_all_obj_trans, list(range(num_batch_dims)) + [i + num_batch_dims for i in [1, 0]])

    # BS x H x W x num_obj x 3
    cam_coords_2_all_obj_trans = _ivy.reshape(
        cam_coords_2_all_obj_trans, batch_shape + image_dims + [-1, 3])

    # Multiplier

    # BS x 1 x 1 x num_obj
    obj_ids = _ivy.reshape(obj_ids, batch_shape + [1, 1] + [-1])

    # BS x H x W x num_obj x 1
    multiplier = _ivy.cast(_ivy.expand_dims(obj_ids == id_image, -1),
                           'float32')

    # compute validity mask, for pixels which are on moving objects

    # BS x H x W x 1
    motion_mask = _ivy.reduce_sum(multiplier, -2) > 0

    # make invalid transformations equal to zero

    # BS x H x W x num_obj x 3
    cam_coords_2_all_obj_trans_w_zeros = cam_coords_2_all_obj_trans * multiplier

    # reduce to get only valid transformations

    # BS x H x W x 3
    cam_coords_2_all_obj_trans = _ivy.reduce_sum(
        cam_coords_2_all_obj_trans_w_zeros, -2)

    # find cam coords to for zero motion pixels

    # BS x H x W x 3
    cam_coords_2_wo_motion = _ivy_tvg.cam_to_cam_coords(
        cam_coords_1, cam_1_to_2_ext_mat, batch_shape, image_dims)

    # BS x H x W x 4
    cam_coords_2_all_trans_homo =\
        _ivy_mech.make_coordinates_homogeneous(cam_coords_2_all_obj_trans, batch_shape + image_dims)
    cam_coords_2 = _ivy.where(motion_mask, cam_coords_2_all_trans_homo,
                              cam_coords_2_wo_motion)

    # return

    # BS x H x W x 3,    BS x H x W x 1
    return cam_coords_2, motion_mask
Example #15
0
    def sample_links(self, joint_angles, link_num=None, samples_per_metre=25, batch_shape=None):
        """
        Sample links of the robot at uniformly distributed cartesian positions.

        :param joint_angles: Joint angles of the robot *[batch_shape,num_joints]*
        :type joint_angles: array
        :param link_num: Link number for which to compute matrices up to. Default is the last link.
        :type link_num: int, optional
        :param samples_per_metre: Number of samples per metre of robot link
        :type samples_per_metre: int
        :param batch_shape: Shape of batch. Inferred from inputs if None.
        :type batch_shape: sequence of ints, optional
        :return: The sampled link cartesian positions *[batch_shape,total_sampling_chain_length,3]*
        """

        if link_num is None:
            link_num = self._num_joints
        if batch_shape is None:
            batch_shape = joint_angles.shape[:-1]
        batch_shape = list(batch_shape)
        num_batch_dims = len(batch_shape)
        batch_dims_for_trans = list(range(num_batch_dims))

        # BS x NJ x 4 x 4
        link_matrices = self.compute_link_matrices(joint_angles, link_num, batch_shape)

        # BS x LN+1 x 3
        link_positions = link_matrices[..., 0:3, -1]

        # BS x LN x 3
        segment_starts = link_positions[..., :-1, :]
        segment_ends = link_positions[..., 1:, :]

        # LN
        segment_sizes = _ivy.cast(_ivy.ceil(
            self._link_lengths[0:link_num] * samples_per_metre), 'int32')

        # list of segments
        segments_list = list()

        for link_idx in range(link_num):

            segment_size = segment_sizes[link_idx]

            # BS x 1 x 3
            segment_start = segment_starts[..., link_idx:link_idx + 1, :]
            segment_end = segment_ends[..., link_idx:link_idx + 1, :]

            # BS x segment_size x 3
            segment = _ivy.linspace(segment_start, segment_end, segment_size, axis=-2)[..., 0, :, :]
            if link_idx == link_num - 1 or segment_size == 1:
                segments_list.append(segment)
            else:
                segments_list.append(segment[..., :-1, :])

        # BS x total_robot_chain_length x 3
        all_segments = _ivy.concatenate(segments_list, -2)

        # BS x total_robot_chain_length x 4
        all_segments_homo = _ivy_mech.make_coordinates_homogeneous(all_segments)

        # 4 x BSxtotal_robot_chain_length
        all_segments_homo_trans = _ivy.reshape(_ivy.transpose(
            all_segments_homo, [num_batch_dims + 1] + batch_dims_for_trans + [num_batch_dims]), (4, -1))

        # 3 x BSxtotal_robot_chain_length
        transformed_trans = _ivy.matmul(self._base_inv_ext_mat[..., 0:3, :], all_segments_homo_trans)

        # BS x total_robot_chain_length x 3
        return _ivy.transpose(_ivy.reshape(
            transformed_trans, [3] + batch_shape + [-1]),
            [i+1 for i in batch_dims_for_trans] + [num_batch_dims+1] + [0])