Exemplo n.º 1
0
    def as_identity(batch_shape):
        """
        Return camera geometry object with array attributes as either zeros or identity matrices.

        :param batch_shape: Batch shape for each geometric array attribute
        :type batch_shape: sequence of ints
        :return: New camera geometry object, with each entry as either zeros or identity matrices.
        """
        intrinsics = Intrinsics.as_identity(batch_shape)
        extrinsics = Extrinsics.as_identity(batch_shape)
        full_mats_homo = _ivy.identity(4, batch_shape=batch_shape)
        inv_full_mats_homo = _ivy.identity(4, batch_shape=batch_shape)
        return __class__(intrinsics, extrinsics, full_mats_homo,
                         inv_full_mats_homo)
Exemplo n.º 2
0
    def as_identity(batch_shape):
        """
        Return camera intrinsics object with array attributes as either zeros or identity matrices.

        :param batch_shape: Batch shape for each geometric array attribute
        :type batch_shape: sequence of ints
        :return: New camera intrinsics object, with each entry as either zeros or identity matrices.
        """
        batch_shape = list(batch_shape)
        focal_lengths = _ivy.ones(batch_shape + [2])
        persp_angles = _ivy.ones(batch_shape + [2])
        pp_offsets = _ivy.zeros(batch_shape + [2])
        calib_mats = _ivy.identity(3, batch_shape=batch_shape)
        inv_calib_mats = _ivy.identity(3, batch_shape=batch_shape)
        return __class__(focal_lengths, persp_angles, pp_offsets, calib_mats,
                         inv_calib_mats)
Exemplo n.º 3
0
    def as_identity(batch_shape):
        """
        Return camera extrinsics object with array attributes as either zeros or identity matrices.

        :param batch_shape: Batch shape for each geometric array attribute.
        :type batch_shape: sequence of ints
        :return: New camera extrinsics object, with each entry as either zeros or identity matrices.
        """
        batch_shape = list(batch_shape)
        cam_centers = _ivy.zeros(batch_shape + [3, 1])
        Rs = _ivy.identity(3, batch_shape=batch_shape)
        inv_Rs = _ivy.identity(3, batch_shape=batch_shape)
        ext_mats_homo = _ivy.identity(4, batch_shape=batch_shape)
        inv_ext_mats_homo = _ivy.identity(4, batch_shape=batch_shape)
        return __class__(cam_centers, Rs, inv_Rs, ext_mats_homo,
                         inv_ext_mats_homo)
Exemplo n.º 4
0
def rot_mat_and_cam_center_to_ext_mat(rotation_mat, camera_center, batch_shape=None):
    """
    Get extrinsic matrix :math:`\mathbf{E}\in\mathbb{R}^{3×4}` from rotation matrix
    :math:`\mathbf{R}\in\mathbb{R}^{3×3}` and camera centers :math:`\overset{\sim}{\mathbf{C}}\in\mathbb{R}^{3×1}`.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=175>`_
    page 157, section 6.1, equation 6.11

    :param rotation_mat: Rotation matrix *[batch_shape,3,3]*
    :type rotation_mat: array
    :param camera_center: Camera center *[batch_shape,3,1]*
    :type camera_center: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :return: Extrinsic matrix *[batch_shape,3,4]*
    """

    if batch_shape is None:
        batch_shape = rotation_mat.shape[:-2]

    # shapes as list
    batch_shape = list(batch_shape)

    # num batch dims
    num_batch_dims = len(batch_shape)

    # BS x 3 x 3
    identity = _ivy.tile(_ivy.reshape(_ivy.identity(3), [1] * num_batch_dims + [3, 3]),
                         batch_shape + [1, 1])

    # BS x 3 x 4
    identity_w_cam_center = _ivy.concatenate((identity, -camera_center), -1)

    # BS x 3 x 4
    return _ivy.matmul(rotation_mat, identity_w_cam_center)
Exemplo n.º 5
0
    def as_identity(batch_shape):
        """
        Return primitive scene object with array attributes as either zeros or identity matrices.

        :param batch_shape: Batch shape for each geometric array attribute
        :type batch_shape: sequence of ints
        :return: New primitive scene object, with each entry as either zeros or identity matrices.
        """
        batch_shape = list(batch_shape)
        sphere_positions = _ivy.identity(4, batch_shape=batch_shape)[...,
                                                                     0:3, :]
        sphere_radii = _ivy.ones(batch_shape + [1])
        cuboid_ext_mats = _ivy.identity(4, batch_shape=batch_shape)[...,
                                                                    0:3, :]
        cuboid_dims = _ivy.ones(batch_shape + [3])
        return __class__(sphere_positions, sphere_radii, cuboid_ext_mats,
                         cuboid_dims)
Exemplo n.º 6
0
def _get_dummy_obs(batch_size, num_frames, num_cams, image_dims, num_feature_channels, dev_str='cpu', ones=False,
                   empty=False):

    uniform_pixel_coords =\
        ivy_vision.create_uniform_pixel_coords_image(image_dims, [batch_size, num_frames], dev_str=dev_str)

    img_meas = dict()
    for i in range(num_cams):
        validity_mask = ivy.ones([batch_size, num_frames] + image_dims + [1], dev_str=dev_str)
        if ones:
            img_mean = ivy.concatenate((uniform_pixel_coords[..., 0:2], ivy.ones(
                [batch_size, num_frames] + image_dims + [1 + num_feature_channels], dev_str=dev_str)), -1)
            img_var = ivy.ones(
                     [batch_size, num_frames] + image_dims + [3 + num_feature_channels], dev_str=dev_str)*1e-3
            pose_mean = ivy.zeros([batch_size, num_frames, 6], dev_str=dev_str)
            pose_cov = ivy.ones([batch_size, num_frames, 6, 6], dev_str=dev_str)*1e-3
        else:
            img_mean = ivy.concatenate((uniform_pixel_coords[..., 0:2], ivy.random_uniform(
                1e-3, 1, [batch_size, num_frames] + image_dims + [1 + num_feature_channels], dev_str=dev_str)), -1)
            img_var = ivy.random_uniform(
                     1e-3, 1, [batch_size, num_frames] + image_dims + [3 + num_feature_channels], dev_str=dev_str)
            pose_mean = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6], dev_str=dev_str)
            pose_cov = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6, 6], dev_str=dev_str)
        if empty:
            img_var = ivy.ones_like(img_var) * 1e12
            validity_mask = ivy.zeros_like(validity_mask)
        img_meas['dummy_cam_{}'.format(i)] =\
            {'img_mean': img_mean,
             'img_var': img_var,
             'validity_mask': validity_mask,
             'pose_mean': pose_mean,
             'pose_cov': pose_cov,
             'cam_rel_mat': ivy.identity(4, batch_shape=[batch_size, num_frames], dev_str=dev_str)[..., 0:3, :]}

    if ones:
        control_mean = ivy.zeros([batch_size, num_frames, 6], dev_str=dev_str)
        control_cov = ivy.ones([batch_size, num_frames, 6, 6], dev_str=dev_str)*1e-3
    else:
        control_mean = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6], dev_str=dev_str)
        control_cov = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6, 6], dev_str=dev_str)
    return Container({'img_meas': img_meas,
                      'control_mean': control_mean,
                      'control_cov': control_cov,
                      'agent_rel_mat': ivy.identity(4, batch_shape=[batch_size, num_frames],
                                                    dev_str=dev_str)[..., 0:3, :]})
Exemplo n.º 7
0
    def __init__(self,
                 img_mean: ivy.Array,
                 cam_rel_mat: ivy.Array = None,
                 img_var: ivy.Array = None,
                 validity_mask: ivy.Array = None,
                 pose_mean: ivy.Array = None,
                 pose_cov: ivy.Array = None,
                 dev_str: str = None):
        """
        Create esm image measurement container

        :param img_mean: Camera-relative co-ordinates and image features
                            *[batch_size, timesteps, height, width, 3 + feat]*
        :type: img_mean: array
        :param cam_rel_mat: The pose of the camera relative to the current agent pose. Default is identity matrix
                            *[batch_size, timesteps, 3, 4]*
        :type cam_rel_mat: array, optional
        :param img_var: Image depth and feature variance values, assumed all zero if None.
                        *[batch_size, timesteps, height, width, 1 + feat]*
        :type: img_var: array, optional
        :param validity_mask: Validity mask, for which pixels should be considered. Assumed all valid if None
                                *[batch_size, timesteps, height, width, 1]*
        :type validity_mask: array, optional
        :param pose_mean: The pose of the camera relative to the current agent pose, in rotation vector pose form.
                            Inferred from cam_rel_mat if None. *[batch_size, timesteps, 6]*
        :type pose_mean: array, optional
        :param pose_cov: The convariance of the camera relative pose, in rotation vector form. Assumed all zero if None.
                            *[batch_size, timesteps, 6, 6]*
        :type pose_cov: array, optional
        :param dev_str: Device string to use, default is to use img_mean.
        :type dev_str: str
        """
        if dev_str is None:
            dev_str = ivy.dev_str(img_mean)
        img_mean = _pad_to_batch_n_time_dims(img_mean, 5)
        self['img_mean'] = img_mean
        if cam_rel_mat is None:
            cam_rel_mat = ivy.identity(4, batch_shape=img_mean.shape[0:2], dev_str=dev_str)[..., 0:3, :]
        else:
            cam_rel_mat = _pad_to_batch_n_time_dims(cam_rel_mat, 4)
        self['cam_rel_mat'] = cam_rel_mat
        if img_var is None:
            img_var = ivy.zeros_like(img_mean, dev_str=dev_str)
        else:
            img_var = _pad_to_batch_n_time_dims(img_var, 5)
        self['img_var'] = img_var
        if validity_mask is None:
            validity_mask = ivy.ones_like(img_mean[..., 0:1], dev_str=dev_str)
        else:
            validity_mask = _pad_to_batch_n_time_dims(validity_mask, 5)
        self['validity_mask'] = validity_mask
        if pose_mean is None:
            pose_mean = ivy_mech.mat_pose_to_rot_vec_pose(cam_rel_mat)
        else:
            pose_mean = _pad_to_batch_n_time_dims(pose_mean, 3)
        self['pose_mean'] = pose_mean
        if pose_cov is None:
            pose_cov = ivy.tile(ivy.expand_dims(ivy.zeros_like(pose_mean, dev_str=dev_str), -1), (1, 1, 1, 6))
        else:
            pose_cov = _pad_to_batch_n_time_dims(pose_cov, 4)
        self['pose_cov'] = pose_cov
Exemplo n.º 8
0
def velocity_from_cam_coords_id_image_and_object_trans(cam_coords_t,
                                                       id_image,
                                                       obj_ids,
                                                       obj_trans,
                                                       delta_t,
                                                       batch_shape=None,
                                                       image_dims=None,
                                                       dev_str=None):
    """
    Compute velocity image from co-ordinate image, id image, and object transformations.

    :param cam_coords_t: Camera-centric homogeneous co-ordinates image in frame t *[batch_shape,h,w,4]*
    :type cam_coords_t: array
    :param id_image: Image containing per-pixel object ids *[batch_shape,h,w,1]*
    :type id_image: array
    :param obj_ids: Object ids *[batch_shape,num_obj,1]*
    :type obj_ids: array
    :param obj_trans: Object transformations for this frame over time *[batch_shape,num_obj,3,4]*
    :type obj_trans: array
    :param delta_t: Time difference between frame at timestep t-1 and t *[batch_shape,1]*
    :type delta_t: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs in None.
    :type image_dims: sequence of ints, optional
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
    :type dev_str: str, optional
    :return: Relative velocity image *[batch_shape,h,w,3]*
    """

    if batch_shape is None:
        batch_shape = cam_coords_t.shape[:-3]

    if image_dims is None:
        image_dims = cam_coords_t.shape[-3:-1]

    if dev_str is None:
        dev_str = _ivy.dev_str(cam_coords_t)

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # get co-ordinate re-projections

    # BS x H x W x 4
    cam_coords_t_all_trans, motion_mask =\
        project_cam_coords_with_object_transformations(cam_coords_t, id_image, obj_ids, obj_trans,
                                                       _ivy.identity(4, batch_shape=batch_shape)[..., 0:3, :],
                                                       batch_shape, image_dims)

    # BS x H x W x 4
    cam_coords_t_all_trans = \
        _ivy.where(motion_mask, cam_coords_t_all_trans, _ivy.zeros_like(cam_coords_t_all_trans, dev_str=dev_str))

    # compute velocities

    # BS x H x W x 3
    vel = (cam_coords_t[..., 0:3] - cam_coords_t_all_trans[..., 0:3]) / delta_t

    # prune velocities

    # BS x H x W x 3
    return _ivy.where(motion_mask, vel, _ivy.zeros_like(vel, dev_str=dev_str))
Exemplo n.º 9
0
def main():

    # LSTM #
    # -----#

    # using the Ivy LSTM memory module, dual stacked, in a PyTorch model

    class TorchModelWithLSTM(torch.nn.Module):
        def __init__(self, channels_in, channels_out):
            torch.nn.Module.__init__(self)
            self._linear = torch.nn.Linear(channels_in, 64)
            self._lstm = ivy_mem.LSTM(64, channels_out, 2, return_state=False)
            self._assign_variables()

        def _assign_variables(self):
            self._lstm.v.map(lambda x, kc: self.register_parameter(
                name=kc, param=torch.nn.Parameter(x)))
            self._lstm.v = self._lstm.v.map(lambda x, kc: self._parameters[kc])

        def forward(self, x):
            x = self._linear(x)
            return self._lstm(x)

    # create model
    in_channels = 32
    out_channels = 8
    ivy.set_framework('torch')
    model = TorchModelWithLSTM(in_channels, out_channels)

    # define inputs
    batch_shape = [1, 2]
    timesteps = 3
    input_shape = batch_shape + [timesteps, in_channels]
    input_seq = torch.rand(batch_shape + [timesteps, in_channels])

    # call model and test output
    output_seq = model(input_seq)
    assert input_seq.shape[:-1] == output_seq.shape[:-1]
    assert input_seq.shape[-1] == in_channels
    assert output_seq.shape[-1] == out_channels

    # define loss function
    target = torch.zeros_like(output_seq)

    def loss_fn():
        pred = model(input_seq)
        return torch.sum((pred - target)**2)

    # define optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=1e-2)

    # train model
    print('\ntraining dummy PyTorch LSTM model...\n')
    for i in range(10):
        loss = loss_fn()
        loss.backward()
        optimizer.step()
        print('step {}, loss = {}'.format(i, loss))
    print('\ndummy PyTorch LSTM model trained!\n')
    ivy.unset_framework()

    # NTM #
    # ----#

    # using the Ivy NTM memory module in a TensorFlow model

    class TfModelWithNTM(tf.keras.Model):
        def __init__(self, channels_in, channels_out):
            tf.keras.Model.__init__(self)
            self._linear = tf.keras.layers.Dense(64)
            memory_size = 4
            memory_vector_dim = 1
            self._ntm = ivy_mem.NTM(input_dim=64,
                                    output_dim=channels_out,
                                    ctrl_output_size=channels_out,
                                    ctrl_layers=1,
                                    memory_size=memory_size,
                                    memory_vector_dim=memory_vector_dim,
                                    read_head_num=1,
                                    write_head_num=1)
            self._assign_variables()

        def _assign_variables(self):
            self._ntm.v.map(
                lambda x, kc: self.add_weight(name=kc, shape=x.shape))
            self.set_weights(
                [ivy.to_numpy(v) for k, v in self._ntm.v.to_iterator()])
            self.trainable_weights_dict = dict()
            for weight in self.trainable_weights:
                self.trainable_weights_dict[weight.name] = weight
            self._ntm.v = self._ntm.v.map(
                lambda x, kc: self.trainable_weights_dict[kc + ':0'])

        def call(self, x, **kwargs):
            x = self._linear(x)
            return self._ntm(x)

    # create model
    in_channels = 32
    out_channels = 8
    ivy.set_framework('tensorflow')
    model = TfModelWithNTM(in_channels, out_channels)

    # define inputs
    batch_shape = [1, 2]
    timesteps = 3
    input_shape = batch_shape + [timesteps, in_channels]
    input_seq = tf.random.uniform(batch_shape + [timesteps, in_channels])

    # call model and test output
    output_seq = model(input_seq)
    assert input_seq.shape[:-1] == output_seq.shape[:-1]
    assert input_seq.shape[-1] == in_channels
    assert output_seq.shape[-1] == out_channels

    # define loss function
    target = tf.zeros_like(output_seq)

    def loss_fn():
        pred = model(input_seq)
        return tf.reduce_sum((pred - target)**2)

    # define optimizer
    optimizer = tf.keras.optimizers.Adam(1e-2)

    # train model
    print('\ntraining dummy TensorFlow NTM model...\n')
    for i in range(10):
        with tf.GradientTape() as tape:
            loss = loss_fn()
        grads = tape.gradient(loss, model.trainable_weights)
        optimizer.apply_gradients(zip(grads, model.trainable_weights))
        print('step {}, loss = {}'.format(i, loss))
    print('\ndummy TensorFlow NTM model trained!\n')
    ivy.unset_framework()

    # ESM #
    # ----#

    # using the Ivy ESM memory module in a pure-Ivy model, with a JAX backend
    # ToDo: add pre-ESM conv layers to this demo

    class IvyModelWithESM(ivy.Module):
        def __init__(self, channels_in, channels_out):
            self._channels_in = channels_in
            self._esm = ivy_mem.ESM(omni_image_dims=(16, 32))
            self._linear = ivy_mem.Linear(channels_in, channels_out)
            ivy.Module.__init__(self, 'cpu')

        def _forward(self, obs):
            mem = self._esm(obs)
            x = ivy.reshape(mem.mean, (-1, self._channels_in))
            return self._linear(x)

    # create model
    in_channels = 32
    out_channels = 8
    ivy.set_framework('torch')
    model = IvyModelWithESM(in_channels, out_channels)

    # input config
    batch_size = 1
    image_dims = [5, 5]
    num_timesteps = 2
    num_feature_channels = 3

    # create image of pixel co-ordinates
    uniform_pixel_coords =\
        ivy_vision.create_uniform_pixel_coords_image(image_dims, [batch_size, num_timesteps])

    # define camera measurement
    depths = ivy.random_uniform(shape=[batch_size, num_timesteps] +
                                image_dims + [1])
    ds_pixel_coords = ivy_vision.depth_to_ds_pixel_coords(depths)
    inv_calib_mats = ivy.random_uniform(
        shape=[batch_size, num_timesteps, 3, 3])
    cam_coords = ivy_vision.ds_pixel_to_cam_coords(ds_pixel_coords,
                                                   inv_calib_mats)[..., 0:3]
    features = ivy.random_uniform(shape=[batch_size, num_timesteps] +
                                  image_dims + [num_feature_channels])
    img_mean = ivy.concatenate((cam_coords, features), -1)
    cam_rel_mat = ivy.identity(4, batch_shape=[batch_size,
                                               num_timesteps])[..., 0:3, :]

    # place these into an ESM camera measurement container
    esm_cam_meas = ESMCamMeasurement(img_mean=img_mean,
                                     cam_rel_mat=cam_rel_mat)

    # define agent pose transformation
    agent_rel_mat = ivy.identity(4, batch_shape=[batch_size,
                                                 num_timesteps])[..., 0:3, :]

    # collect together into an ESM observation container
    esm_obs = ESMObservation(img_meas={'camera_0': esm_cam_meas},
                             agent_rel_mat=agent_rel_mat)

    # call model and test output
    output = model(esm_obs)
    assert output.shape[-1] == out_channels

    # define loss function
    target = ivy.zeros_like(output)

    def loss_fn(v):
        pred = model(esm_obs, v=v)
        return ivy.reduce_mean((pred - target)**2)

    # optimizer
    optimizer = ivy.SGD(lr=1e-4)

    # train model
    print('\ntraining dummy Ivy ESM model...\n')
    for i in range(10):
        loss, grads = ivy.execute_with_gradients(loss_fn, model.v)
        model.v = optimizer.step(model.v, grads)
        print('step {}, loss = {}'.format(i, ivy.to_numpy(loss).item()))
    print('\ndummy Ivy ESM model trained!\n')
    ivy.unset_framework()

    # message
    print('End of Run Through Demo!')
Exemplo n.º 10
0
    def __init__(self, a_s, d_s, alpha_s, dh_joint_scales, dh_joint_offsets, base_inv_ext_mat=None):
        """
        Initialize robot manipulator instance

        :param a_s: Denavit–Hartenberg "a" parameters *[num_joints]*
        :type a_s: array
        :param d_s: Denavit–Hartenberg "d" parameters *[num_joints]*
        :type d_s: array
        :param alpha_s: Denavit–Hartenberg "alpha" parameters *[num_joints]*
        :type alpha_s: array
        :param dh_joint_scales: Scalars to apply to input joints *[num_joints]*
        :type dh_joint_scales: array
        :param dh_joint_offsets: Scalar offsets to apply to input joints *[num_joints]*
        :type dh_joint_offsets: array
        :param base_inv_ext_mat: Inverse extrinsic matrix of the robot base *[4,4]*
        :type base_inv_ext_mat: array, optional
        """

        self._num_joints = a_s.shape[-1]
        # ToDo: incorporate the base_inv_ext_mat more elegantly, instead of the hack as in the sample_links method
        if base_inv_ext_mat is None:
            self._base_inv_ext_mat = _ivy.identity(4)
        else:
            self._base_inv_ext_mat = base_inv_ext_mat

        # NJ
        self._dis = d_s
        self._dh_joint_scales = dh_joint_scales
        self._dh_joint_offsets = dh_joint_offsets

        # Forward Kinematics Constant Matrices

        # Based on Denavit-Hartenberg Convention
        # Using same nomenclature as in:
        # Modelling, Planning and Control. Bruno Siciliano, Lorenzo Sciavicco, Luigi Villani, Giuseppe Oriolo
        # page 61 - 65

        AidashtoAis_list = [_ivy.identity(4, batch_shape=[1])]

        # repeated blocks

        # 1 x 1 x 3
        start_of_top_row = _ivy.array([[[1., 0., 0.]]])

        # 1 x 1 x 1
        zeros = _ivy.zeros((1, 1, 1))

        # 1 x 1 x 4
        bottom_row = _ivy.array([[[0., 0., 0., 1.]]])

        for i in range(self._num_joints):
            # 1 x 1 x 1
            a_i = _ivy.reshape(a_s[i], [1] * 3)
            cos_alpha = _ivy.reshape(_ivy.cos(alpha_s[i]), [1] * 3)
            sin_alpha = _ivy.reshape(_ivy.sin(alpha_s[i]), [1] * 3)

            # 1 x 1 x 4
            top_row = _ivy.concatenate((start_of_top_row, a_i), -1)
            top_middle_row = _ivy.concatenate((zeros, cos_alpha, -sin_alpha, zeros), -1)
            bottom_middle_row = _ivy.concatenate((zeros, sin_alpha, cos_alpha, zeros), -1)

            # 1 x 4 x 4
            AidashtoAi = _ivy.concatenate((top_row, top_middle_row, bottom_middle_row, bottom_row), 1)

            # list
            AidashtoAis_list.append(AidashtoAi)

        # NJ x 4 x 4
        self._AidashtoAis = _ivy.concatenate(AidashtoAis_list, 0)

        # Constant Jacobian Params

        # Using same nomenclature as in:
        # Modelling, Planning and Control. Bruno Siciliano, Lorenzo Sciavicco, Luigi Villani, Giuseppe Oriolo
        # page 111 - 113

        # 1 x 3
        self._z0 = _ivy.array([[0],
                                  [0],
                                  [1]])

        # 1 x 4
        self._p0hat = _ivy.array([[0],
                                     [0],
                                     [0],
                                     [1]])

        # link lengths

        # NJ
        self._link_lengths = (a_s ** 2 + d_s ** 2) ** 0.5
Exemplo n.º 11
0
    def compute_link_matrices(self, joint_angles, link_num, batch_shape=None):
        """
        Compute homogeneous transformation matrices relative to base frame, up to link_num of links.

        :param joint_angles: Joint angles of the robot *[batch_shape,num_joints]*
        :type joint_angles: array
        :param link_num: Link number for which to compute matrices up to
        :type link_num: int
        :param batch_shape: Shape of batch. Inferred from inputs if None.
        :type batch_shape: sequence of ints, optional
        :return: The link_num matrices, up the link_num *[batch_shape,link_num,4,4]*
        """

        if batch_shape is None:
            batch_shape = joint_angles.shape[:-1]
        batch_shape = list(batch_shape)
        num_batch_dims = len(batch_shape)

        # BS x 1 x NJ
        try:
            dh_joint_angles = _ivy.expand_dims(joint_angles * self._dh_joint_scales - self._dh_joint_offsets, -2)
        except:
            d = 0

        # BS x 1 x 4 x 4
        A00 = _ivy.identity(4, batch_shape=batch_shape + [1])

        Aitoip1dashs = list()
        Aiip1s = list()
        A0is = [A00]

        # repeated blocks

        # BS x 1 x NJ
        dis = _ivy.tile(_ivy.reshape(self._dis, [1] * num_batch_dims + [1, self._num_joints]),
                           batch_shape + [1, 1])

        # BS x 1 x 4
        bottom_row = _ivy.tile(
            _ivy.reshape(_ivy.array([0., 0., 0., 1.]), [1] * num_batch_dims + [1, 4]),
            batch_shape + [1, 1])

        # BS x 1 x 3
        start_of_bottom_middle = _ivy.tile(
            _ivy.reshape(_ivy.array([0., 0., 1.]), [1] * num_batch_dims + [1, 3]),
            batch_shape + [1, 1])

        # BS x 1 x 2
        zeros = _ivy.zeros(batch_shape + [1, 2])

        for i in range(self._num_joints):

            # BS x 1 x 4
            top_row = _ivy.concatenate((_ivy.cos(dh_joint_angles[..., i:i + 1]),
                                           -_ivy.sin(dh_joint_angles[..., i:i + 1]), zeros), -1)
            top_middle_row = _ivy.concatenate((_ivy.sin(dh_joint_angles[..., i:i + 1]),
                                                  _ivy.cos(dh_joint_angles[..., i:i + 1]), zeros), -1)
            bottom_middle_row = _ivy.concatenate((start_of_bottom_middle, dis[..., i:i + 1]), -1)

            # BS x 4 x 4
            Aitoip1dash = _ivy.concatenate((top_row, top_middle_row, bottom_middle_row, bottom_row), -2)

            # (BSx4) x 4
            Aitoip1dash_flat = _ivy.reshape(Aitoip1dash, (-1, 4))

            # (BSx4) x 4
            Aiip1_flat = _ivy.matmul(Aitoip1dash_flat, self._AidashtoAis[i + 1])

            # BS x 4 x 4
            Aiip1 = _ivy.reshape(Aiip1_flat, batch_shape + [4, 4])

            # BS x 4 x 4
            A0ip1 = _ivy.matmul(A0is[-1][..., 0, :, :], Aiip1)

            # append term to lists
            Aitoip1dashs.append(Aitoip1dash)
            Aiip1s.append(Aiip1)
            A0is.append(_ivy.expand_dims(A0ip1, -3))

            if i + 1 == link_num:
                # BS x LN x 4 x 4
                return _ivy.concatenate(A0is, -3)

        raise Exception('wrong parameter entered for link_num, please enter integer from 1-' + str(self._num_joints))