Esempio n. 1
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    pix_per_deg = 2
    om_pix = sim.get_pix_coords()
    plr_degs = om_pix / pix_per_deg
    plr_rads = plr_degs * math.pi / 180
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        depth, rgb = sim.omcam.cap()
        plr = ivy.concatenate([plr_rads, depth], -1)
        xyz_wrt_cam = ivy_mech.polar_to_cartesian_coords(plr)
        xyz_wrt_cam = ivy.reshape(xyz_wrt_cam, (-1, 3))
        xyz_wrt_cam_homo = ivy_mech.make_coordinates_homogeneous(xyz_wrt_cam)
        inv_ext_mat_trans = ivy.transpose(sim.omcam.get_inv_ext_mat(), (1, 0))
        xyz_wrt_world = ivy.matmul(xyz_wrt_cam_homo, inv_ext_mat_trans)[..., 0:3]
        with ivy.numpy.use:
            omni_cam_inv_ext_mat = ivy_mech.make_transformation_homogeneous(
                ivy.to_numpy(sim.omcam.get_inv_ext_mat()))
        vis.show_point_cloud(xyz_wrt_world, rgb, interactive,
                             sphere_inv_ext_mats=[omni_cam_inv_ext_mat], sphere_radii=[0.025])
        if not interactive:
            sim.omcam.set_pos(sim.omcam.get_pos()
                               + ivy.array([-0.01, 0.01, 0.]))
    sim.close()
    unset_framework()
Esempio n. 2
0
    def __init__(self, rel_body_points):
        """
        Initialize rigid mobile robot instance

        :param rel_body_points: Relative body points *[num_body_points,3]*
        :type rel_body_points: array
        """

        # 4 x NBP
        self._rel_body_points_homo_trans =\
            _ivy.swapaxes(ivy_mech.make_coordinates_homogeneous(rel_body_points), 0, 1)
Esempio n. 3
0
File: esm.py Progetto: wx-b/memory
    def _frame_to_omni_frame_projection(
            self, cam_rel_poses, cam_rel_mats, uniform_sphere_pixel_coords,
            cam_coords_f1, cam_feat_f1, rel_pose_covs, image_var_f1,
            holes_prior, holes_prior_var, batch_size, num_timesteps, num_cams,
            image_dims):
        """
        Project mean and variance values from frame 1 to omni frame 2, using scatter and quantize operations.
        :param cam_rel_poses: Relative pose of camera to agent *[batch_size, n, c, 6]*
        :param cam_rel_mats: Relative transformation matrix from camera to agent *[batch_size, n, c, 3, 4]*
        :param uniform_sphere_pixel_coords: Pixel coords *[batch_size, n, h, w, 3]*
        :param cam_coords_f1: Camera co-ordinates to project *[batch_size, n, c, h_in, w_in, 3]*
        :param cam_feat_f1: Mean feature values to project *[batch_size, n, c, h_in, w_in, f]*
        :param rel_pose_covs: Pose covariances *[batch_size, n, c, 6, 6]*
        :param image_var_f1: Angular pixel, radius and feature variance values to project *[batch_size, n, c, h_in, w_in, 3+f]*
        :param holes_prior: Prior values for holes which arise during the projection *[batch_size, n, h, w, 3+f]*
        :param holes_prior_var: Prior variance for holes which arise during the projection *[batch_size, n, h, w, 3+f]*
        :param batch_size: Size of batch
        :param num_timesteps: Number of timesteps
        :param num_cams: Number of cameras
        :param image_dims: Image dimensions
        :return: Projected mean *[batch_size, 1, h, w, 3+f]* and variance *[batch_size, 1, h, w, 3+f]*
        """

        # cam 1 to cam 2 coords

        cam_coords_f2 = ivy_vision.cam_to_cam_coords(
            ivy_mech.make_coordinates_homogeneous(
                cam_coords_f1,
                [batch_size, num_timesteps, num_cams] + image_dims),
            cam_rel_mats, [batch_size, num_timesteps, num_cams], image_dims)

        # cam 2 to sphere 2 coords

        sphere_coords_f2 = ivy_vision.cam_to_sphere_coords(cam_coords_f2)
        image_var_f2 = image_var_f1

        # angular pixel coords

        # B x N x C x H x W x 3
        angular_pixel_coords_f2 = \
            ivy_vision.sphere_to_angular_pixel_coords(sphere_coords_f2, self._pixels_per_degree)

        # constant feature projection

        # B x N x C x H x W x (3+F)
        projected_coords_f2 = ivy.concatenate([angular_pixel_coords_f2] +
                                              [cam_feat_f1], -1)

        # reshaping to fit quantization dimension requirements

        # B x N x (CxHxW) x (3+F)
        projected_coords_f2_flat = \
            ivy.reshape(projected_coords_f2,
                        [batch_size, num_timesteps, num_cams * image_dims[0] * image_dims[1], -1])

        # B x N x (CxHxW) x (3+F)
        image_var_f2_flat = ivy.reshape(image_var_f2, [
            batch_size, num_timesteps,
            num_cams * image_dims[0] * image_dims[1], -1
        ])

        # quantized result from all scene cameras

        # B x N x OH x OW x (3+F)   # B x N x OH x OW x (3+F)
        return ivy_vision.quantize_to_image(
            pixel_coords=projected_coords_f2_flat[..., 0:2],
            final_image_dims=self._sphere_img_dims,
            feat=projected_coords_f2_flat[..., 2:],
            feat_prior=holes_prior,
            with_db=self._with_depth_buffer,
            pixel_coords_var=image_var_f2_flat[..., 0:2],
            feat_var=image_var_f2_flat[..., 2:],
            pixel_coords_prior_var=holes_prior_var[..., 0:2],
            feat_prior_var=holes_prior_var[..., 2:],
            var_threshold=self._var_threshold,
            uniform_pixel_coords=uniform_sphere_pixel_coords,
            batch_shape=(batch_size, num_timesteps),
            dev_str=self._dev_str)[0:2]
Esempio n. 4
0
def main(f=None):

    # Framework Setup #
    # ----------------#

    # choose random framework

    f = choose_random_framework() if f is None else f
    set_framework(f)

    # Orientation #
    # ------------#

    # rotation representations

    # 3
    rot_vec = ivy.array([0., 1., 0.])

    # 3 x 3
    rot_mat = ivy_mech.rot_vec_to_rot_mat(rot_vec)

    # 3
    euler_angles = ivy_mech.rot_mat_to_euler(rot_mat, 'zyx')

    # 4
    quat = ivy_mech.euler_to_quaternion(euler_angles)

    # 4
    axis_and_angle = ivy_mech.quaternion_to_axis_angle(quat)

    # 3
    rot_vec_again = axis_and_angle[..., :-1] * axis_and_angle[..., -1:]

    # Pose #
    # -----#

    # pose representations

    # 3
    position = ivy.ones_like(rot_vec)

    # 6
    rot_vec_pose = ivy.concatenate((position, rot_vec), 0)

    # 3 x 4
    mat_pose = ivy_mech.rot_vec_pose_to_mat_pose(rot_vec_pose)

    # 6
    euler_pose = ivy_mech.mat_pose_to_euler_pose(mat_pose)

    # 7
    quat_pose = ivy_mech.euler_pose_to_quaternion_pose(euler_pose)

    # 6
    rot_vec_pose_again = ivy_mech.quaternion_pose_to_rot_vec_pose(quat_pose)

    # Position #
    # ---------#

    # conversions of positional representation

    # 3
    cartesian_coord = ivy.random_uniform(0., 1., (3, ))

    # 3
    polar_coord = ivy_mech.cartesian_to_polar_coords(cartesian_coord)

    # 3
    cartesian_coord_again = ivy_mech.polar_to_cartesian_coords(polar_coord)

    # cartesian co-ordinate frame-of-reference transformations

    # 3 x 4
    trans_mat = ivy.random_uniform(0., 1., (3, 4))

    # 4
    cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        cartesian_coord)

    # 3
    trans_cartesian_coord = ivy.matmul(
        trans_mat, ivy.expand_dims(cartesian_coord_homo, -1))[:, 0]

    # 4
    trans_cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        trans_cartesian_coord)

    # 4 x 4
    trans_mat_homo = ivy_mech.make_transformation_homogeneous(trans_mat)

    # 3 x 4
    inv_trans_mat = ivy.inv(trans_mat_homo)[0:3]

    # 3
    cartesian_coord_again = ivy.matmul(
        inv_trans_mat, ivy.expand_dims(trans_cartesian_coord_homo, -1))[:, 0]

    # message
    print('End of Run Through Demo!')
Esempio n. 5
0
def project_cam_coords_with_object_transformations(cam_coords_1,
                                                   id_image,
                                                   obj_ids,
                                                   obj_trans,
                                                   cam_1_to_2_ext_mat,
                                                   batch_shape=None,
                                                   image_dims=None):
    """
    Compute velocity image from co-ordinate image, id image, and object transformations.

    :param cam_coords_1: Camera-centric homogeneous co-ordinates image in frame t *[batch_shape,h,w,4]*
    :type cam_coords_1: array
    :param id_image: Image containing per-pixel object ids *[batch_shape,h,w,1]*
    :type id_image: array
    :param obj_ids: Object ids *[batch_shape,num_obj,1]*
    :type obj_ids: array
    :param obj_trans: Object transformations for this frame over time *[batch_shape,num_obj,3,4]*
    :type obj_trans: array
    :param cam_1_to_2_ext_mat: Camera 1 to camera 2 extrinsic projection matrix *[batch_shape,3,4]*
    :type cam_1_to_2_ext_mat: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs in None.
    :type image_dims: sequence of ints, optional
    :return: Relative velocity image *[batch_shape,h,w,3]*
    """

    if batch_shape is None:
        batch_shape = cam_coords_1.shape[:-3]

    if image_dims is None:
        image_dims = cam_coords_1.shape[-3:-1]

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)
    num_batch_dims = len(batch_shape)

    # Transform the co-ordinate image by each transformation

    # BS x (num_obj x 3) x 4
    obj_trans = _ivy.reshape(obj_trans, batch_shape + [-1, 4])

    # BS x 4 x H x W
    cam_coords_1_ = _ivy.transpose(
        cam_coords_1,
        list(range(num_batch_dims)) + [i + num_batch_dims for i in [2, 0, 1]])

    # BS x 4 x (HxW)
    cam_coords_1_ = _ivy.reshape(cam_coords_1_, batch_shape + [4, -1])

    # BS x (num_obj x 3) x (HxW)
    cam_coords_2_all_obj_trans = _ivy.matmul(obj_trans, cam_coords_1_)

    # BS x (HxW) x (num_obj x 3)
    cam_coords_2_all_obj_trans = \
        _ivy.transpose(cam_coords_2_all_obj_trans, list(range(num_batch_dims)) + [i + num_batch_dims for i in [1, 0]])

    # BS x H x W x num_obj x 3
    cam_coords_2_all_obj_trans = _ivy.reshape(
        cam_coords_2_all_obj_trans, batch_shape + image_dims + [-1, 3])

    # Multiplier

    # BS x 1 x 1 x num_obj
    obj_ids = _ivy.reshape(obj_ids, batch_shape + [1, 1] + [-1])

    # BS x H x W x num_obj x 1
    multiplier = _ivy.cast(_ivy.expand_dims(obj_ids == id_image, -1),
                           'float32')

    # compute validity mask, for pixels which are on moving objects

    # BS x H x W x 1
    motion_mask = _ivy.reduce_sum(multiplier, -2) > 0

    # make invalid transformations equal to zero

    # BS x H x W x num_obj x 3
    cam_coords_2_all_obj_trans_w_zeros = cam_coords_2_all_obj_trans * multiplier

    # reduce to get only valid transformations

    # BS x H x W x 3
    cam_coords_2_all_obj_trans = _ivy.reduce_sum(
        cam_coords_2_all_obj_trans_w_zeros, -2)

    # find cam coords to for zero motion pixels

    # BS x H x W x 3
    cam_coords_2_wo_motion = _ivy_tvg.cam_to_cam_coords(
        cam_coords_1, cam_1_to_2_ext_mat, batch_shape, image_dims)

    # BS x H x W x 4
    cam_coords_2_all_trans_homo =\
        _ivy_mech.make_coordinates_homogeneous(cam_coords_2_all_obj_trans, batch_shape + image_dims)
    cam_coords_2 = _ivy.where(motion_mask, cam_coords_2_all_trans_homo,
                              cam_coords_2_wo_motion)

    # return

    # BS x H x W x 3,    BS x H x W x 1
    return cam_coords_2, motion_mask
Esempio n. 6
0
    def sample_links(self, joint_angles, link_num=None, samples_per_metre=25, batch_shape=None):
        """
        Sample links of the robot at uniformly distributed cartesian positions.

        :param joint_angles: Joint angles of the robot *[batch_shape,num_joints]*
        :type joint_angles: array
        :param link_num: Link number for which to compute matrices up to. Default is the last link.
        :type link_num: int, optional
        :param samples_per_metre: Number of samples per metre of robot link
        :type samples_per_metre: int
        :param batch_shape: Shape of batch. Inferred from inputs if None.
        :type batch_shape: sequence of ints, optional
        :return: The sampled link cartesian positions *[batch_shape,total_sampling_chain_length,3]*
        """

        if link_num is None:
            link_num = self._num_joints
        if batch_shape is None:
            batch_shape = joint_angles.shape[:-1]
        batch_shape = list(batch_shape)
        num_batch_dims = len(batch_shape)
        batch_dims_for_trans = list(range(num_batch_dims))

        # BS x NJ x 4 x 4
        link_matrices = self.compute_link_matrices(joint_angles, link_num, batch_shape)

        # BS x LN+1 x 3
        link_positions = link_matrices[..., 0:3, -1]

        # BS x LN x 3
        segment_starts = link_positions[..., :-1, :]
        segment_ends = link_positions[..., 1:, :]

        # LN
        segment_sizes = _ivy.cast(_ivy.ceil(
            self._link_lengths[0:link_num] * samples_per_metre), 'int32')

        # list of segments
        segments_list = list()

        for link_idx in range(link_num):

            segment_size = segment_sizes[link_idx]

            # BS x 1 x 3
            segment_start = segment_starts[..., link_idx:link_idx + 1, :]
            segment_end = segment_ends[..., link_idx:link_idx + 1, :]

            # BS x segment_size x 3
            segment = _ivy.linspace(segment_start, segment_end, segment_size, axis=-2)[..., 0, :, :]
            if link_idx == link_num - 1 or segment_size == 1:
                segments_list.append(segment)
            else:
                segments_list.append(segment[..., :-1, :])

        # BS x total_robot_chain_length x 3
        all_segments = _ivy.concatenate(segments_list, -2)

        # BS x total_robot_chain_length x 4
        all_segments_homo = _ivy_mech.make_coordinates_homogeneous(all_segments)

        # 4 x BSxtotal_robot_chain_length
        all_segments_homo_trans = _ivy.reshape(_ivy.transpose(
            all_segments_homo, [num_batch_dims + 1] + batch_dims_for_trans + [num_batch_dims]), (4, -1))

        # 3 x BSxtotal_robot_chain_length
        transformed_trans = _ivy.matmul(self._base_inv_ext_mat[..., 0:3, :], all_segments_homo_trans)

        # BS x total_robot_chain_length x 3
        return _ivy.transpose(_ivy.reshape(
            transformed_trans, [3] + batch_shape + [-1]),
            [i+1 for i in batch_dims_for_trans] + [num_batch_dims+1] + [0])
Esempio n. 7
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    with_mxnd = f is ivy.mxnd
    if with_mxnd:
        print('\nMXnet does not support "sum" or "min" reductions for scatter_nd,\n'
              'instead it only supports non-deterministic replacement for duplicates.\n'
              'Depth buffer rendering (requies min) or fusion buffer (requies sum) are therefore unsupported.\n'
              'The rendering in this demo with MXNet backend exhibits non-deterministic jagged edges as a result.')
    sim = Simulator(interactive, try_use_sim)
    import matplotlib.pyplot as plt
    xyzs = list()
    rgbs = list()
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        for cam in sim.cams:
            depth, rgb = cam.cap()
            xyz = sim.depth_to_xyz(depth, cam.get_inv_ext_mat(), cam.inv_calib_mat, [1024]*2)
            xyzs.append(xyz)
            rgbs.append(rgb)
        xyz = ivy.reshape(ivy.concatenate(xyzs, 1), (-1, 3))
        rgb = ivy.reshape(ivy.concatenate(rgbs, 1), (-1, 3))
        cam_coords = ivy_vision.world_to_cam_coords(ivy_mech.make_coordinates_homogeneous(ivy.expand_dims(xyz, 1)),
                                                    sim.target_cam.get_ext_mat())
        ds_pix_coords = ivy_vision.cam_to_ds_pixel_coords(cam_coords, sim.target_cam.calib_mat)
        depth = ds_pix_coords[..., -1]
        pix_coords = ds_pix_coords[..., 0, 0:2] / depth
        final_image_dims = [512]*2
        feat = ivy.concatenate((depth, rgb), -1)
        rendered_img_no_db, _, _ = ivy_vision.quantize_to_image(
            pix_coords, final_image_dims, feat, ivy.zeros(final_image_dims + [4]), with_db=False)
        with_db = not with_mxnd
        rendered_img_with_db, _, _ = ivy_vision.quantize_to_image(
            pix_coords, final_image_dims, feat, ivy.zeros(final_image_dims + [4]), with_db=with_db)

        a_img = cv2.resize(ivy.to_numpy(rgbs[0]), (256, 256))
        a_img[0:50, 0:50] = np.zeros_like(a_img[0:50, 0:50])
        a_img[5:45, 5:45] = np.ones_like(a_img[5:45, 5:45])
        cv2.putText(a_img, 'a', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        b_img = cv2.resize(ivy.to_numpy(rgbs[1]), (256, 256))
        b_img[0:50, 0:50] = np.zeros_like(b_img[0:50, 0:50])
        b_img[5:45, 5:45] = np.ones_like(b_img[5:45, 5:45])
        cv2.putText(b_img, 'b', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        c_img = cv2.resize(ivy.to_numpy(rgbs[2]), (256, 256))
        c_img[0:50, 0:50] = np.zeros_like(c_img[0:50, 0:50])
        c_img[5:45, 5:45] = np.ones_like(c_img[5:45, 5:45])
        cv2.putText(c_img, 'c', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        target_img = cv2.resize(ivy.to_numpy(sim.target_cam.cap()[1]), (256, 256))
        target_img[0:50, 0:140] = np.zeros_like(target_img[0:50, 0:140])
        target_img[5:45, 5:135] = np.ones_like(target_img[5:45, 5:135])
        cv2.putText(target_img, 'target', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        msg = 'non-deterministic' if with_mxnd else 'no depth buffer'
        width = 360 if with_mxnd else 320
        no_db_img = np.copy(ivy.to_numpy(rendered_img_no_db[..., 3:]))
        no_db_img[0:50, 0:width+5] = np.zeros_like(no_db_img[0:50, 0:width+5])
        no_db_img[5:45, 5:width] = np.ones_like(no_db_img[5:45, 5:width])
        cv2.putText(no_db_img, msg, (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        with_db_img = np.copy(ivy.to_numpy(rendered_img_with_db[..., 3:]))
        with_db_img[0:50, 0:350] = np.zeros_like(with_db_img[0:50, 0:350])
        with_db_img[5:45, 5:345] = np.ones_like(with_db_img[5:45, 5:345])
        cv2.putText(with_db_img, 'with depth buffer', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        raw_imgs = np.concatenate((np.concatenate((a_img, b_img), 1),
                                   np.concatenate((c_img, target_img), 1)), 0)
        to_concat = (raw_imgs, no_db_img) if with_mxnd else (raw_imgs, no_db_img, with_db_img)
        final_img = np.concatenate(to_concat, 1)

        if interactive:
            print('\nClose the image window when you are ready.\n')
            plt.imshow(final_img)
            plt.show()
        xyzs.clear()
        rgbs.clear()
    sim.close()
    unset_framework()