Ejemplo n.º 1
0
def projection_matrix_pseudo_inverse(proj_mat, batch_shape=None):
    """
    Given projection matrix :math:`\mathbf{P}\in\mathbb{R}^{3×4}`, compute it's pseudo-inverse
    :math:`\mathbf{P}^+\in\mathbb{R}^{4×3}`.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf?_ijt=25ihpil89dmfo4da975v402ogc#page=179>`_
    bottom of page 161, section 6.2.2
    
    :param proj_mat: Projection matrix *[batch_shape,3,4]*
    :type proj_mat: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :return: Projection matrix pseudo-inverse *[batch_shape,4,3]*
    """

    if batch_shape is None:
        batch_shape = proj_mat.shape[:-2]

    # shapes as list
    batch_shape = list(batch_shape)

    # transpose idxs
    num_batch_dims = len(batch_shape)
    transpose_idxs = list(
        range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims]

    # BS x 4 x 3
    matrix_transposed = _ivy.transpose(proj_mat, transpose_idxs)

    # BS x 4 x 3
    return _ivy.matmul(matrix_transposed,
                       _ivy.inv(_ivy.matmul(proj_mat, matrix_transposed)))
Ejemplo n.º 2
0
def get_fundamental_matrix(full_mat1,
                           full_mat2,
                           camera_center1=None,
                           pinv_full_mat1=None,
                           batch_shape=None,
                           dev_str=None):
    """
    Compute fundamental matrix :math:`\mathbf{F}\in\mathbb{R}^{3×3}` between two cameras, given their extrinsic
    matrices :math:`\mathbf{E}_1\in\mathbb{R}^{3×4}` and :math:`\mathbf{E}_2\in\mathbb{R}^{3×4}`.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=262>`_
    bottom of page 244, section 9.2.2, equation 9.1

    :param full_mat1: Frame 1 full projection matrix *[batch_shape,3,4]*
    :type full_mat1: array
    :param full_mat2: Frame 2 full projection matrix *[batch_shape,3,4]*
    :type full_mat2: array
    :param camera_center1: Frame 1 camera center, inferred from full_mat1 if None *[batch_shape,3,1]*
    :type camera_center1: array, optional
    :param pinv_full_mat1: Frame 1 full projection matrix pseudo-inverse, inferred from full_mat1 if None *[batch_shape,4,3]*
    :type pinv_full_mat1: array, optional
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
    :type dev_str: str, optional
    :return: Fundamental matrix connecting frames 1 and 2 *[batch_shape,3,3]*
    """

    if batch_shape is None:
        batch_shape = full_mat1.shape[:-2]

    if dev_str is None:
        dev_str = _ivy.dev_str(full_mat1)

    # shapes as list
    batch_shape = list(batch_shape)

    if camera_center1 is None:
        inv_full_mat1 = _ivy.inv(
            _ivy_mech.make_transformation_homogeneous(full_mat1, batch_shape,
                                                      dev_str))[..., 0:3, :]
        camera_center1 = _ivy_svg.inv_ext_mat_to_camera_center(inv_full_mat1)

    if pinv_full_mat1 is None:
        pinv_full_mat1 = _ivy.pinv(full_mat1)

    # BS x 4 x 1
    camera_center1_homo = _ivy.concatenate(
        (camera_center1, _ivy.ones(batch_shape + [1, 1], dev_str=dev_str)), -2)

    # BS x 3
    e2 = _ivy.matmul(full_mat2, camera_center1_homo)[..., -1]

    # BS x 3 x 3
    e2_skew_symmetric = _ivy.linalg.vector_to_skew_symmetric_matrix(e2)

    # BS x 3 x 3
    return _ivy.matmul(e2_skew_symmetric, _ivy.matmul(full_mat2,
                                                      pinv_full_mat1))
Ejemplo n.º 3
0
def create_trimesh_indices_for_image(batch_shape, image_dims, dev_str='cpu:0'):
    """
    Create triangle mesh for image with given image dimensions

    :param batch_shape: Shape of batch.
    :type batch_shape: sequence of ints
    :param image_dims: Image dimensions.
    :type image_dims: sequence of ints
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
    :type dev_str: str, optional
    :return: Triangle mesh indices for image *[batch_shape,h*w*some_other_stuff,3]*
    """

    # shapes as lists
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # other shape specs
    num_batch_dims = len(batch_shape)
    tri_dim = 2 * (image_dims[0] - 1) * (image_dims[1] - 1)
    flat_shape = [1] * num_batch_dims + [tri_dim] + [3]
    tile_shape = batch_shape + [1] * 2

    # 1 x W-1
    t00_ = _ivy.reshape(_ivy.arange(image_dims[1] - 1, dtype_str='float32', dev_str=dev_str), (1, -1))

    # H-1 x 1
    k_ = _ivy.reshape(_ivy.arange(image_dims[0] - 1, dtype_str='float32', dev_str=dev_str), (-1, 1)) * image_dims[1]

    # H-1 x W-1
    t00_ = _ivy.matmul(_ivy.ones((image_dims[0] - 1, 1), dev_str=dev_str), t00_)
    k_ = _ivy.matmul(k_, _ivy.ones((1, image_dims[1] - 1), dev_str=dev_str))

    # (H-1xW-1) x 1
    t00 = _ivy.expand_dims(t00_ + k_, -1)
    t01 = t00 + 1
    t02 = t00 + image_dims[1]
    t10 = t00 + image_dims[1] + 1
    t11 = t01
    t12 = t02

    # (H-1xW-1) x 3
    t0 = _ivy.concatenate((t00, t01, t02), -1)
    t1 = _ivy.concatenate((t10, t11, t12), -1)

    # BS x 2x(H-1xW-1) x 3
    return _ivy.tile(_ivy.reshape(_ivy.concatenate((t0, t1), 0),
                                  flat_shape), tile_shape)
Ejemplo n.º 4
0
def projection_matrix_inverse(proj_mat):
    """
    Given projection matrix :math:`\mathbf{P}\in\mathbb{R}^{3×4}`, compute it's inverse
    :math:`\mathbf{P}^{-1}\in\mathbb{R}^{3×4}`.\n
    `[reference] <https://github.com/pranjals16/cs676/blob/master/Hartley%2C%20Zisserman%20-%20Multiple%20View%20Geometry%20in%20Computer%20Vision.pdf#page=174>`_
    middle of page 156, section 6.1, eq 6.6

    :param proj_mat: Projection matrix *[batch_shape,3,4]*
    :type proj_mat: array
    :return: Projection matrix inverse *[batch_shape,3,4]*
    """

    # BS x 3 x 3
    rotation_matrix = proj_mat[..., 0:3]

    # BS x 3 x 3
    rotation_matrix_inverses = _ivy.inv(rotation_matrix)

    # BS x 3 x 1
    translations = proj_mat[..., 3:4]

    # BS x 3 x 1
    translation_inverses = -_ivy.matmul(rotation_matrix_inverses, translations)

    # BS x 3 x 4
    return _ivy.concatenate((rotation_matrix_inverses, translation_inverses),
                            -1)
Ejemplo n.º 5
0
def rot_mat_and_cam_center_to_ext_mat(rotation_mat, camera_center, batch_shape=None):
    """
    Get extrinsic matrix :math:`\mathbf{E}\in\mathbb{R}^{3×4}` from rotation matrix
    :math:`\mathbf{R}\in\mathbb{R}^{3×3}` and camera centers :math:`\overset{\sim}{\mathbf{C}}\in\mathbb{R}^{3×1}`.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=175>`_
    page 157, section 6.1, equation 6.11

    :param rotation_mat: Rotation matrix *[batch_shape,3,3]*
    :type rotation_mat: array
    :param camera_center: Camera center *[batch_shape,3,1]*
    :type camera_center: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :return: Extrinsic matrix *[batch_shape,3,4]*
    """

    if batch_shape is None:
        batch_shape = rotation_mat.shape[:-2]

    # shapes as list
    batch_shape = list(batch_shape)

    # num batch dims
    num_batch_dims = len(batch_shape)

    # BS x 3 x 3
    identity = _ivy.tile(_ivy.reshape(_ivy.identity(3), [1] * num_batch_dims + [3, 3]),
                         batch_shape + [1, 1])

    # BS x 3 x 4
    identity_w_cam_center = _ivy.concatenate((identity, -camera_center), -1)

    # BS x 3 x 4
    return _ivy.matmul(rotation_mat, identity_w_cam_center)
Ejemplo n.º 6
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    pix_per_deg = 2
    om_pix = sim.get_pix_coords()
    plr_degs = om_pix / pix_per_deg
    plr_rads = plr_degs * math.pi / 180
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        depth, rgb = sim.omcam.cap()
        plr = ivy.concatenate([plr_rads, depth], -1)
        xyz_wrt_cam = ivy_mech.polar_to_cartesian_coords(plr)
        xyz_wrt_cam = ivy.reshape(xyz_wrt_cam, (-1, 3))
        xyz_wrt_cam_homo = ivy_mech.make_coordinates_homogeneous(xyz_wrt_cam)
        inv_ext_mat_trans = ivy.transpose(sim.omcam.get_inv_ext_mat(), (1, 0))
        xyz_wrt_world = ivy.matmul(xyz_wrt_cam_homo, inv_ext_mat_trans)[..., 0:3]
        with ivy.numpy.use:
            omni_cam_inv_ext_mat = ivy_mech.make_transformation_homogeneous(
                ivy.to_numpy(sim.omcam.get_inv_ext_mat()))
        vis.show_point_cloud(xyz_wrt_world, rgb, interactive,
                             sphere_inv_ext_mats=[omni_cam_inv_ext_mat], sphere_radii=[0.025])
        if not interactive:
            sim.omcam.set_pos(sim.omcam.get_pos()
                               + ivy.array([-0.01, 0.01, 0.]))
    sim.close()
    unset_framework()
Ejemplo n.º 7
0
 def measure_incremental_mat(self):
     inv_ext_mat = ivy.reshape(ivy.array(self._handle.get_matrix()), (3, 4))
     inv_ext_mat_homo = ivy_mech.make_transformation_homogeneous(inv_ext_mat)
     ext_mat_homo = ivy.inv(inv_ext_mat_homo)
     ext_mat = ext_mat_homo[0:3, :]
     rel_mat = ivy.matmul(ext_mat, self._inv_ext_mat_homo)
     self._inv_ext_mat_homo = inv_ext_mat_homo
     return rel_mat
Ejemplo n.º 8
0
def _fit_spline(train_points, train_values, order):

    # shapes
    train_points_shape = train_points.shape
    batch_shape = list(train_points_shape[:-2])
    num_batch_dims = len(batch_shape)
    n = train_points_shape[-2]
    pd = train_values.shape[-1]

    # BS x N x 1
    c = train_points

    # BS x N x PD
    f_ = train_values

    # BS x N x N
    matrix_a = _phi(_pairwise_distance(c, c), order)

    # BS x N x 1
    ones = _ivy.ones_like(c[..., :1])

    # BS x N x 2
    matrix_b = _ivy.concatenate([c, ones], -1)

    # BS x 2 x N
    matrix_b_trans = _ivy.transpose(
        matrix_b,
        list(range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims])

    # BS x N+2 x N
    left_block = _ivy.concatenate([matrix_a, matrix_b_trans], -2)

    # BS x 2 x 2
    lhs_zeros = _ivy.zeros(batch_shape + [2, 2])

    # BS x N+2 x 2
    right_block = _ivy.concatenate([matrix_b, lhs_zeros], -2)

    # BS x N+2 x N+2
    lhs = _ivy.concatenate([left_block, right_block], -1)

    # BS x 2 x PD
    rhs_zeros = _ivy.zeros(batch_shape + [2, pd])

    # BS x N+2 x PD
    rhs = _ivy.concatenate([f_, rhs_zeros], -2)

    # BS x N+2 x PD
    w_v = _ivy.matmul(_ivy.pinv(lhs), rhs)

    # BS x N x PD
    w = w_v[..., :n, :]

    # BS x 2 x PD
    v = w_v[..., n:, :]

    # BS x N x PD,    BS x 2 x PD
    return w, v
Ejemplo n.º 9
0
def transform(coords, trans, batch_shape=None, image_dims=None):
    """
    Transform image of :math:`n`-dimensional co-ordinates :math:`\mathbf{x}\in\mathbb{R}^{h×w×n}` by
    transformation matrix :math:`\mathbf{f}\in\mathbb{R}^{m×n}`, to produce image of transformed co-ordinates
    :math:`\mathbf{x}_{trans}\in\mathbb{R}^{h×w×m}`.\n
    `[reference] <https://en.wikipedia.org/wiki/Matrix_multiplication>`_

    :param coords: Co-ordinate image *[batch_shape,height,width,n]*
    :type coords: array
    :param trans: Transformation matrix *[batch_shape,m,n]*
    :type trans: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs if None.
    :type image_dims: sequence of ints
    :return: Transformed co-ordinate image *[batch_shape,height,width,m]*
    """

    if batch_shape is None:
        batch_shape = coords.shape[:-3]

    if image_dims is None:
        image_dims = coords.shape[-3:-1]

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # transpose idxs
    num_batch_dims = len(batch_shape)
    transpose_idxs = list(
        range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims]

    # BS x (HxW) x N
    coords_flattened = _ivy.reshape(
        coords, batch_shape + [image_dims[0] * image_dims[1], -1])

    # BS x N x (HxW)
    coords_reshaped = _ivy.transpose(coords_flattened, transpose_idxs)

    # BS x M x (HxW)
    transformed_coords_vector = _ivy.matmul(trans, coords_reshaped)

    # BS x (HxW) x M
    transformed_coords_vector_transposed = _ivy.transpose(
        transformed_coords_vector, transpose_idxs)

    # BS x H x W x M
    return _ivy.reshape(transformed_coords_vector_transposed,
                        batch_shape + image_dims + [-1])
Ejemplo n.º 10
0
def calib_and_ext_to_full_mat(calib_mat, ext_mat):
    """
    Compute full projection matrix :math:`\mathbf{P}\in\mathbb{R}^{3×4}` from calibration
    :math:`\mathbf{K}\in\mathbb{R}^{3×3}` and extrinsic matrix :math:`\mathbf{E}\in\mathbb{R}^{3×4}`.\n

    :param calib_mat: Calibration matrix *[batch_shape,3,3]*
    :type calib_mat: array
    :param ext_mat: Extrinsic matrix *[batch_shape,3,4]*
    :type ext_mat: array
    :return: Full projection matrix *[batch_shape,3,4]*
    """

    # BS x 3 x 4
    return _ivy.matmul(calib_mat, ext_mat)
Ejemplo n.º 11
0
def sample_spline_path(anchor_points, anchor_vals, sample_points, order=3):
    """
    Sample spline path, given sample locations for path defined by the anchor locations and points.
    `[reference] <https://github.com/tensorflow/addons/blob/v0.11.2/tensorflow_addons/image/interpolate_spline.py>`_

    :param anchor_points: Anchor locations between 0-1 (regular spacing not necessary) *[batch_shape,num_anchors,1]*
    :type anchor_points: array
    :param anchor_vals: Anchor points along the spline path, in path space *[batch_shape,num_anchors,path_dim]*
    :type anchor_vals: array
    :param sample_points: Sample locations between 0-1 *[batch_shape,num_samples,1]*
    :type sample_points: array
    :param order: Order of the spline path interpolation
    :type order: float
    :return: Spline path sampled at sample_locations, giving points in path space *[batch_shape,num_samples,path_dim]*
    """

    # BS x N x PD,    BS x 2 x PD
    w, v = _fit_spline(anchor_points, anchor_vals, order)

    # Kernel term

    # BS x NS x N
    pairwise_dists = _pairwise_distance(sample_points, anchor_points)
    phi_pairwise_dists = _phi(pairwise_dists, order)

    # BS x NS x PD
    rbf_term = _ivy.matmul(phi_pairwise_dists, w)

    # Polynomial / linear term.

    # BS x NS x 2
    query_points_pad = _ivy.concatenate(
        [sample_points, _ivy.ones_like(sample_points[..., :1])], -1)

    # BS x NS x PD
    linear_term = _ivy.matmul(query_points_pad, v)
    return rbf_term + linear_term
Ejemplo n.º 12
0
    def _addressing(self, k, beta, g, s, gamma, prev_M, prev_w):

        # Sec 3.3.1 Focusing by Content

        # Cosine Similarity

        k = ivy.expand_dims(k, axis=2)
        inner_product = ivy.matmul(prev_M, k)
        k_norm = ivy.reduce_sum(k**2, axis=1, keepdims=True)**0.5
        M_norm = ivy.reduce_sum(prev_M**2, axis=2, keepdims=True)**0.5
        norm_product = M_norm * k_norm
        K = ivy.squeeze(inner_product / (norm_product + 1e-8))  # eq (6)

        # Calculating w^c

        K_amplified = ivy.exp(ivy.expand_dims(beta, axis=1) * K)
        w_c = K_amplified / ivy.reduce_sum(K_amplified, axis=1,
                                           keepdims=True)  # eq (5)

        if self._addressing_mode == 'content':  # Only focus on content
            return w_c

        # Sec 3.3.2 Focusing by Location

        g = ivy.expand_dims(g, axis=1)
        w_g = g * w_c + (1 - g) * prev_w  # eq (7)

        s = ivy.concatenate([
            s[:, :self._shift_range + 1],
            ivy.zeros(
                [s.shape[0], self._memory_size -
                 (self._shift_range * 2 + 1)]), s[:, -self._shift_range:]
        ],
                            axis=1)
        t = ivy.concatenate([ivy.flip(s, axis=[1]),
                             ivy.flip(s, axis=[1])],
                            axis=1)
        s_matrix = ivy.stack([
            t[:, self._memory_size - i - 1:self._memory_size * 2 - i - 1]
            for i in range(self._memory_size)
        ],
                             axis=1)
        w_ = ivy.reduce_sum(ivy.expand_dims(w_g, axis=1) * s_matrix,
                            axis=2)  # eq (8)
        w_sharpen = w_**ivy.expand_dims(gamma, axis=1)
        w = w_sharpen / ivy.reduce_sum(w_sharpen, axis=1,
                                       keepdims=True)  # eq (9)

        return w
Ejemplo n.º 13
0
    def sample_body(self, inv_ext_mats, batch_shape=None):
        """
        Sample links of the robot at uniformly distributed cartesian positions.

        :param inv_ext_mats: Inverse extrinsic matrices *[batch_shape,3,4]*
        :type inv_ext_mats: array
        :param batch_shape: Shape of batch. Inferred from inputs if None.
        :type batch_shape: sequence of ints, optional
        :return: The sampled body cartesian positions, in the world reference frame *[batch_shape,num_body_points,3]*
        """

        if batch_shape is None:
            batch_shape = inv_ext_mats.shape[:-2]
        batch_shape = list(batch_shape)

        # (BSx3) x NBP
        body_points_trans = _ivy.matmul(_ivy.reshape(inv_ext_mats, (-1, 4)), self._rel_body_points_homo_trans)

        # BS x NBP x 3
        return _ivy.swapaxes(_ivy.reshape(body_points_trans, batch_shape + [3, -1]), -1, -2)
Ejemplo n.º 14
0
def cuboid_signed_distances(cuboid_ext_mats,
                            cuboid_dims,
                            query_positions,
                            batch_shape=None):
    """
    Return the signed distances of a set of query points from the cuboid surfaces.\n
    `[reference] <https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm>`_

    :param cuboid_ext_mats: Extrinsic matrices of the cuboids *[batch_shape,num_cuboids,3,4]*
    :type cuboid_ext_mats: array
    :param cuboid_dims: Dimensions of the cuboids, in the order x, y, z *[batch_shape,num_cuboids,3]*
    :type cuboid_dims: array
    :param query_positions: Points for which to query the signed distances *[batch_shape,num_points,3]*
    :type query_positions: array
    :param batch_shape: Shape of batch. Assumed no batches if None.
    :type batch_shape: sequence of ints, optional
    :return: The distances of the query points from the closest cuboid surface *[batch_shape,num_points,1]*
    """

    if batch_shape is None:
        batch_shape = cuboid_ext_mats.shape[:-3]

    # shapes as list
    batch_shape = list(batch_shape)
    num_batch_dims = len(batch_shape)
    batch_dims_for_trans = list(range(num_batch_dims))
    num_cuboids = cuboid_ext_mats.shape[-3]
    num_points = query_positions.shape[-2]

    # BS x 3 x NP
    query_positions_trans = _ivy.transpose(
        query_positions,
        batch_dims_for_trans + [num_batch_dims + 1, num_batch_dims])

    # BS x 1 x NP
    ones = _ivy.ones_like(query_positions_trans[..., 0:1, :])

    # BS x 4 x NP
    query_positions_trans_homo = _ivy.concatenate(
        (query_positions_trans, ones), -2)

    # BS x NCx3 x 4
    cuboid_ext_mats_flat = _ivy.reshape(cuboid_ext_mats, batch_shape + [-1, 4])

    # BS x NCx3 x NP
    rel_query_positions_trans_flat = _ivy.matmul(cuboid_ext_mats_flat,
                                                 query_positions_trans_homo)

    # BS x NC x 3 x NP
    rel_query_positions_trans = _ivy.reshape(
        rel_query_positions_trans_flat,
        batch_shape + [num_cuboids, 3, num_points])

    # BS x NC x NP x 3
    rel_query_positions = _ivy.transpose(
        rel_query_positions_trans, batch_dims_for_trans +
        [num_batch_dims, num_batch_dims + 2, num_batch_dims + 1])
    q = _ivy.abs(rel_query_positions) - _ivy.expand_dims(cuboid_dims / 2, -2)
    q_max_clipped = _ivy.maximum(q, 1e-12)

    # BS x NC x NP x 1
    q_min_clipped = _ivy.minimum(_ivy.reduce_max(q, -1, keepdims=True), 0.)
    q_max_clipped_len = _ivy.reduce_sum(q_max_clipped**2, -1,
                                        keepdims=True)**0.5
    sdfs = q_max_clipped_len + q_min_clipped

    # BS x NP x 1
    return _ivy.reduce_min(sdfs, -3)
Ejemplo n.º 15
0
    def _forward(self, x, prev_state):
        prev_read_vector_list = prev_state[1]

        controller_input = ivy.concatenate([x] + prev_read_vector_list, axis=1)
        controller_output, controller_state = self._controller(ivy.expand_dims(controller_input, -2),
                                                               initial_state=prev_state[0])
        controller_output = controller_output[..., -1, :]

        parameters = self._controller_proj(controller_output)
        parameters = ivy.clip(parameters, -self._clip_value, self._clip_value)
        head_parameter_list = \
            ivy.split(parameters[:, :self._num_parameters_per_head * self._num_heads], self._num_heads,
                          axis=1)
        erase_add_list = ivy.split(parameters[:, self._num_parameters_per_head * self._num_heads:],
                                       2 * self._write_head_num, axis=1)

        prev_w_list = prev_state[2]
        prev_M = prev_state[4]
        w_list = []
        for i, head_parameter in enumerate(head_parameter_list):
            k = ivy.tanh(head_parameter[:, 0:self._memory_vector_dim])
            beta = ivy.softplus(head_parameter[:, self._memory_vector_dim])
            g = ivy.sigmoid(head_parameter[:, self._memory_vector_dim + 1])
            s = ivy.softmax(
                head_parameter[:, self._memory_vector_dim + 2:self._memory_vector_dim +
                                                              2 + (self._shift_range * 2 + 1)])
            gamma = ivy.softplus(head_parameter[:, -1]) + 1
            w = self._addressing(k, beta, g, s, gamma, prev_M, prev_w_list[i])
            w_list.append(w)

        # Reading (Sec 3.1)

        read_w_list = w_list[:self._read_head_num]
        if self._step == 0:
            usage_indicator = ivy.zeros_like(w_list[0])
        else:
            usage_indicator = prev_state[3] + ivy.reduce_sum(ivy.concatenate(read_w_list, 0))
        read_vector_list = []
        for i in range(self._read_head_num):
            read_vector = ivy.reduce_sum(ivy.expand_dims(read_w_list[i], axis=2) * prev_M, axis=1)
            read_vector_list.append(read_vector)

        # Writing (Sec 3.2)

        prev_wrtie_w_list = prev_w_list[self._read_head_num:]
        w_wr_size = math.ceil(self._memory_size / 2) if self._retroactive_updates else self._memory_size
        if self._sequential_writing:
            batch_size = ivy.shape(x)[0]
            if self._step < w_wr_size:
                w_wr_list = [ivy.tile(ivy.cast(ivy.one_hot(
                    ivy.array([self._step]), w_wr_size), 'float32'),
                    (batch_size, 1))] * self._write_head_num
            else:
                batch_idxs = ivy.expand_dims(ivy.arange(batch_size, 0), -1)
                mem_idxs = ivy.expand_dims(ivy.argmax(usage_indicator[..., :w_wr_size], -1), -1)
                total_idxs = ivy.concatenate((batch_idxs, mem_idxs), -1)
                w_wr_list = [ivy.scatter_nd(total_idxs, ivy.ones((batch_size,)),
                                                (batch_size, w_wr_size))] * self._write_head_num
        else:
            w_wr_list = w_list[self._read_head_num:]
        if self._retroactive_updates:
            w_ret_list = [self._retroactive_discount * prev_wrtie_w[..., w_wr_size:] +
                          (1 - self._retroactive_discount) * prev_wrtie_w[..., :w_wr_size]
                          for prev_wrtie_w in prev_wrtie_w_list]
            w_wrtie_list = [ivy.concatenate((w_wr, w_ret), -1) for w_wr, w_ret in zip(w_wr_list, w_ret_list)]
        else:
            w_wrtie_list = w_wr_list
        M = prev_M
        for i in range(self._write_head_num):
            w = ivy.expand_dims(w_wrtie_list[i], axis=2)
            if self._with_erase:
                erase_vector = ivy.expand_dims(ivy.sigmoid(erase_add_list[i * 2]), axis=1)
                M = M * ivy.ones(ivy.shape(M)) - ivy.matmul(w, erase_vector)
            add_vector = ivy.expand_dims(ivy.tanh(erase_add_list[i * 2 + 1]), axis=1)
            M = M + ivy.matmul(w, add_vector)

        NTM_output = self._output_proj(ivy.concatenate([controller_output] + read_vector_list, axis=1))
        NTM_output = ivy.clip(NTM_output, -self._clip_value, self._clip_value)

        self._step += 1
        return NTM_output, NTMControllerState(
            controller_state=controller_state, read_vector_list=read_vector_list, w_list=w_list,
            usage_indicator=usage_indicator, M=M)
Ejemplo n.º 16
0
def project_cam_coords_with_object_transformations(cam_coords_1,
                                                   id_image,
                                                   obj_ids,
                                                   obj_trans,
                                                   cam_1_to_2_ext_mat,
                                                   batch_shape=None,
                                                   image_dims=None):
    """
    Compute velocity image from co-ordinate image, id image, and object transformations.

    :param cam_coords_1: Camera-centric homogeneous co-ordinates image in frame t *[batch_shape,h,w,4]*
    :type cam_coords_1: array
    :param id_image: Image containing per-pixel object ids *[batch_shape,h,w,1]*
    :type id_image: array
    :param obj_ids: Object ids *[batch_shape,num_obj,1]*
    :type obj_ids: array
    :param obj_trans: Object transformations for this frame over time *[batch_shape,num_obj,3,4]*
    :type obj_trans: array
    :param cam_1_to_2_ext_mat: Camera 1 to camera 2 extrinsic projection matrix *[batch_shape,3,4]*
    :type cam_1_to_2_ext_mat: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param image_dims: Image dimensions. Inferred from inputs in None.
    :type image_dims: sequence of ints, optional
    :return: Relative velocity image *[batch_shape,h,w,3]*
    """

    if batch_shape is None:
        batch_shape = cam_coords_1.shape[:-3]

    if image_dims is None:
        image_dims = cam_coords_1.shape[-3:-1]

    # shapes as list
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)
    num_batch_dims = len(batch_shape)

    # Transform the co-ordinate image by each transformation

    # BS x (num_obj x 3) x 4
    obj_trans = _ivy.reshape(obj_trans, batch_shape + [-1, 4])

    # BS x 4 x H x W
    cam_coords_1_ = _ivy.transpose(
        cam_coords_1,
        list(range(num_batch_dims)) + [i + num_batch_dims for i in [2, 0, 1]])

    # BS x 4 x (HxW)
    cam_coords_1_ = _ivy.reshape(cam_coords_1_, batch_shape + [4, -1])

    # BS x (num_obj x 3) x (HxW)
    cam_coords_2_all_obj_trans = _ivy.matmul(obj_trans, cam_coords_1_)

    # BS x (HxW) x (num_obj x 3)
    cam_coords_2_all_obj_trans = \
        _ivy.transpose(cam_coords_2_all_obj_trans, list(range(num_batch_dims)) + [i + num_batch_dims for i in [1, 0]])

    # BS x H x W x num_obj x 3
    cam_coords_2_all_obj_trans = _ivy.reshape(
        cam_coords_2_all_obj_trans, batch_shape + image_dims + [-1, 3])

    # Multiplier

    # BS x 1 x 1 x num_obj
    obj_ids = _ivy.reshape(obj_ids, batch_shape + [1, 1] + [-1])

    # BS x H x W x num_obj x 1
    multiplier = _ivy.cast(_ivy.expand_dims(obj_ids == id_image, -1),
                           'float32')

    # compute validity mask, for pixels which are on moving objects

    # BS x H x W x 1
    motion_mask = _ivy.reduce_sum(multiplier, -2) > 0

    # make invalid transformations equal to zero

    # BS x H x W x num_obj x 3
    cam_coords_2_all_obj_trans_w_zeros = cam_coords_2_all_obj_trans * multiplier

    # reduce to get only valid transformations

    # BS x H x W x 3
    cam_coords_2_all_obj_trans = _ivy.reduce_sum(
        cam_coords_2_all_obj_trans_w_zeros, -2)

    # find cam coords to for zero motion pixels

    # BS x H x W x 3
    cam_coords_2_wo_motion = _ivy_tvg.cam_to_cam_coords(
        cam_coords_1, cam_1_to_2_ext_mat, batch_shape, image_dims)

    # BS x H x W x 4
    cam_coords_2_all_trans_homo =\
        _ivy_mech.make_coordinates_homogeneous(cam_coords_2_all_obj_trans, batch_shape + image_dims)
    cam_coords_2 = _ivy.where(motion_mask, cam_coords_2_all_trans_homo,
                              cam_coords_2_wo_motion)

    # return

    # BS x H x W x 3,    BS x H x W x 1
    return cam_coords_2, motion_mask
Ejemplo n.º 17
0
def main(f=None):

    # Framework Setup #
    # ----------------#

    # choose random framework

    f = choose_random_framework() if f is None else f
    set_framework(f)

    # Orientation #
    # ------------#

    # rotation representations

    # 3
    rot_vec = ivy.array([0., 1., 0.])

    # 3 x 3
    rot_mat = ivy_mech.rot_vec_to_rot_mat(rot_vec)

    # 3
    euler_angles = ivy_mech.rot_mat_to_euler(rot_mat, 'zyx')

    # 4
    quat = ivy_mech.euler_to_quaternion(euler_angles)

    # 4
    axis_and_angle = ivy_mech.quaternion_to_axis_angle(quat)

    # 3
    rot_vec_again = axis_and_angle[..., :-1] * axis_and_angle[..., -1:]

    # Pose #
    # -----#

    # pose representations

    # 3
    position = ivy.ones_like(rot_vec)

    # 6
    rot_vec_pose = ivy.concatenate((position, rot_vec), 0)

    # 3 x 4
    mat_pose = ivy_mech.rot_vec_pose_to_mat_pose(rot_vec_pose)

    # 6
    euler_pose = ivy_mech.mat_pose_to_euler_pose(mat_pose)

    # 7
    quat_pose = ivy_mech.euler_pose_to_quaternion_pose(euler_pose)

    # 6
    rot_vec_pose_again = ivy_mech.quaternion_pose_to_rot_vec_pose(quat_pose)

    # Position #
    # ---------#

    # conversions of positional representation

    # 3
    cartesian_coord = ivy.random_uniform(0., 1., (3, ))

    # 3
    polar_coord = ivy_mech.cartesian_to_polar_coords(cartesian_coord)

    # 3
    cartesian_coord_again = ivy_mech.polar_to_cartesian_coords(polar_coord)

    # cartesian co-ordinate frame-of-reference transformations

    # 3 x 4
    trans_mat = ivy.random_uniform(0., 1., (3, 4))

    # 4
    cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        cartesian_coord)

    # 3
    trans_cartesian_coord = ivy.matmul(
        trans_mat, ivy.expand_dims(cartesian_coord_homo, -1))[:, 0]

    # 4
    trans_cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        trans_cartesian_coord)

    # 4 x 4
    trans_mat_homo = ivy_mech.make_transformation_homogeneous(trans_mat)

    # 3 x 4
    inv_trans_mat = ivy.inv(trans_mat_homo)[0:3]

    # 3
    cartesian_coord_again = ivy.matmul(
        inv_trans_mat, ivy.expand_dims(trans_cartesian_coord_homo, -1))[:, 0]

    # message
    print('End of Run Through Demo!')
Ejemplo n.º 18
0
def main(interactive=True, f=None):

    global INTERACTIVE
    INTERACTIVE = interactive

    # Framework Setup #
    # ----------------#

    # choose random framework
    f = choose_random_framework() if f is None else f
    set_framework(f)

    # Camera Geometry #
    # ----------------#

    # intrinsics

    # common intrinsic params
    img_dims = [512, 512]
    pp_offsets = ivy.array([dim / 2 - 0.5 for dim in img_dims], 'float32')
    cam_persp_angles = ivy.array([60 * np.pi / 180] * 2, 'float32')

    # ivy cam intrinsics container
    intrinsics = ivy_vision.persp_angles_and_pp_offsets_to_intrinsics_object(
        cam_persp_angles, pp_offsets, img_dims)

    # extrinsics

    # 3 x 4
    cam1_inv_ext_mat = ivy.array(np.load(data_dir + '/cam1_inv_ext_mat.npy'),
                                 'float32')
    cam2_inv_ext_mat = ivy.array(np.load(data_dir + '/cam2_inv_ext_mat.npy'),
                                 'float32')

    # full geometry

    # ivy cam geometry container
    cam1_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
        cam1_inv_ext_mat, intrinsics)
    cam2_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
        cam2_inv_ext_mat, intrinsics)
    cam_geoms = [cam1_geom, cam2_geom]

    # Camera Geometry Check #
    # ----------------------#

    # assert camera geometry shapes

    for cam_geom in cam_geoms:

        assert cam_geom.intrinsics.focal_lengths.shape == (2, )
        assert cam_geom.intrinsics.persp_angles.shape == (2, )
        assert cam_geom.intrinsics.pp_offsets.shape == (2, )
        assert cam_geom.intrinsics.calib_mats.shape == (3, 3)
        assert cam_geom.intrinsics.inv_calib_mats.shape == (3, 3)

        assert cam_geom.extrinsics.cam_centers.shape == (3, 1)
        assert cam_geom.extrinsics.Rs.shape == (3, 3)
        assert cam_geom.extrinsics.inv_Rs.shape == (3, 3)
        assert cam_geom.extrinsics.ext_mats_homo.shape == (4, 4)
        assert cam_geom.extrinsics.inv_ext_mats_homo.shape == (4, 4)

        assert cam_geom.full_mats_homo.shape == (4, 4)
        assert cam_geom.inv_full_mats_homo.shape == (4, 4)

    # Image Data #
    # -----------#

    # load images

    # h x w x 3
    color1 = ivy.array(
        cv2.imread(data_dir + '/rgb1.png').astype(np.float32) / 255)
    color2 = ivy.array(
        cv2.imread(data_dir + '/rgb2.png').astype(np.float32) / 255)

    # h x w x 1
    depth1 = ivy.array(
        np.reshape(
            np.frombuffer(
                cv2.imread(data_dir + '/depth1.png', -1).tobytes(),
                np.float32), img_dims + [1]))
    depth2 = ivy.array(
        np.reshape(
            np.frombuffer(
                cv2.imread(data_dir + '/depth2.png', -1).tobytes(),
                np.float32), img_dims + [1]))

    # depth scaled pixel coords

    # h x w x 3
    u_pix_coords = ivy_vision.create_uniform_pixel_coords_image(img_dims)
    ds_pixel_coords1 = u_pix_coords * depth1
    ds_pixel_coords2 = u_pix_coords * depth2

    # depth limits
    depth_min = ivy.reduce_min(ivy.concatenate((depth1, depth2), 0))
    depth_max = ivy.reduce_max(ivy.concatenate((depth1, depth2), 0))
    depth_limits = [depth_min, depth_max]

    # show images
    show_rgb_and_depth_images(color1, color2, depth1, depth2, depth_limits)

    # Flow and Depth Triangulation #
    # -----------------------------#

    # required mat formats
    cam1to2_full_mat_homo = ivy.matmul(cam2_geom.full_mats_homo,
                                       cam1_geom.inv_full_mats_homo)
    cam1to2_full_mat = cam1to2_full_mat_homo[..., 0:3, :]
    full_mats_homo = ivy.concatenate(
        (ivy.expand_dims(cam1_geom.full_mats_homo,
                         0), ivy.expand_dims(cam2_geom.full_mats_homo, 0)), 0)
    full_mats = full_mats_homo[..., 0:3, :]

    # flow
    flow1to2 = ivy_vision.flow_from_depth_and_cam_mats(ds_pixel_coords1,
                                                       cam1to2_full_mat)

    # depth again
    depth1_from_flow = ivy_vision.depth_from_flow_and_cam_mats(
        flow1to2, full_mats)

    # show images
    show_flow_and_depth_images(depth1, flow1to2, depth1_from_flow,
                               depth_limits)

    # Inverse Warping #
    # ----------------#

    # inverse warp rendering
    warp = u_pix_coords[..., 0:2] + flow1to2
    color2_warp_to_f1 = ivy.reshape(ivy.bilinear_resample(color2, warp),
                                    color1.shape)

    # projected depth scaled pixel coords 2
    ds_pixel_coords1_wrt_f2 = ivy_vision.ds_pixel_to_ds_pixel_coords(
        ds_pixel_coords1, cam1to2_full_mat)

    # projected depth 2
    depth1_wrt_f2 = ds_pixel_coords1_wrt_f2[..., -1:]

    # inverse warp depth
    depth2_warp_to_f1 = ivy.reshape(ivy.bilinear_resample(depth2, warp),
                                    depth1.shape)

    # depth validity
    depth_validity = ivy.abs(depth1_wrt_f2 - depth2_warp_to_f1) < 0.01

    # inverse warp rendering with mask
    color2_warp_to_f1_masked = ivy.where(depth_validity, color2_warp_to_f1,
                                         ivy.zeros_like(color2_warp_to_f1))

    # show images
    show_inverse_warped_images(depth1_wrt_f2, depth2_warp_to_f1,
                               depth_validity, color1, color2_warp_to_f1,
                               color2_warp_to_f1_masked, depth_limits)

    # Forward Warping #
    # ----------------#

    # forward warp rendering
    ds_pixel_coords1_proj = ivy_vision.ds_pixel_to_ds_pixel_coords(
        ds_pixel_coords2,
        ivy.inv(cam1to2_full_mat_homo)[..., 0:3, :])
    depth1_proj = ds_pixel_coords1_proj[..., -1:]
    ds_pixel_coords1_proj = ds_pixel_coords1_proj[..., 0:2] / depth1_proj
    features_to_render = ivy.concatenate((depth1_proj, color2), -1)

    # without depth buffer
    f1_forward_warp_no_db, _, _ = ivy_vision.quantize_to_image(
        ivy.reshape(ds_pixel_coords1_proj, (-1, 2)),
        img_dims,
        ivy.reshape(features_to_render, (-1, 4)),
        ivy.zeros_like(features_to_render),
        with_db=False)

    # with depth buffer
    f1_forward_warp_w_db, _, _ = ivy_vision.quantize_to_image(
        ivy.reshape(ds_pixel_coords1_proj, (-1, 2)),
        img_dims,
        ivy.reshape(features_to_render, (-1, 4)),
        ivy.zeros_like(features_to_render),
        with_db=False if ivy.get_framework() == 'mxnd' else True)

    # show images
    show_forward_warped_images(depth1, color1, f1_forward_warp_no_db,
                               f1_forward_warp_w_db, depth_limits)

    # message
    print('End of Run Through Demo!')
Ejemplo n.º 19
0
    def sample_links(self, joint_angles, link_num=None, samples_per_metre=25, batch_shape=None):
        """
        Sample links of the robot at uniformly distributed cartesian positions.

        :param joint_angles: Joint angles of the robot *[batch_shape,num_joints]*
        :type joint_angles: array
        :param link_num: Link number for which to compute matrices up to. Default is the last link.
        :type link_num: int, optional
        :param samples_per_metre: Number of samples per metre of robot link
        :type samples_per_metre: int
        :param batch_shape: Shape of batch. Inferred from inputs if None.
        :type batch_shape: sequence of ints, optional
        :return: The sampled link cartesian positions *[batch_shape,total_sampling_chain_length,3]*
        """

        if link_num is None:
            link_num = self._num_joints
        if batch_shape is None:
            batch_shape = joint_angles.shape[:-1]
        batch_shape = list(batch_shape)
        num_batch_dims = len(batch_shape)
        batch_dims_for_trans = list(range(num_batch_dims))

        # BS x NJ x 4 x 4
        link_matrices = self.compute_link_matrices(joint_angles, link_num, batch_shape)

        # BS x LN+1 x 3
        link_positions = link_matrices[..., 0:3, -1]

        # BS x LN x 3
        segment_starts = link_positions[..., :-1, :]
        segment_ends = link_positions[..., 1:, :]

        # LN
        segment_sizes = _ivy.cast(_ivy.ceil(
            self._link_lengths[0:link_num] * samples_per_metre), 'int32')

        # list of segments
        segments_list = list()

        for link_idx in range(link_num):

            segment_size = segment_sizes[link_idx]

            # BS x 1 x 3
            segment_start = segment_starts[..., link_idx:link_idx + 1, :]
            segment_end = segment_ends[..., link_idx:link_idx + 1, :]

            # BS x segment_size x 3
            segment = _ivy.linspace(segment_start, segment_end, segment_size, axis=-2)[..., 0, :, :]
            if link_idx == link_num - 1 or segment_size == 1:
                segments_list.append(segment)
            else:
                segments_list.append(segment[..., :-1, :])

        # BS x total_robot_chain_length x 3
        all_segments = _ivy.concatenate(segments_list, -2)

        # BS x total_robot_chain_length x 4
        all_segments_homo = _ivy_mech.make_coordinates_homogeneous(all_segments)

        # 4 x BSxtotal_robot_chain_length
        all_segments_homo_trans = _ivy.reshape(_ivy.transpose(
            all_segments_homo, [num_batch_dims + 1] + batch_dims_for_trans + [num_batch_dims]), (4, -1))

        # 3 x BSxtotal_robot_chain_length
        transformed_trans = _ivy.matmul(self._base_inv_ext_mat[..., 0:3, :], all_segments_homo_trans)

        # BS x total_robot_chain_length x 3
        return _ivy.transpose(_ivy.reshape(
            transformed_trans, [3] + batch_shape + [-1]),
            [i+1 for i in batch_dims_for_trans] + [num_batch_dims+1] + [0])
Ejemplo n.º 20
0
def lstm_update(x, init_h, init_c, kernel, recurrent_kernel, bias=None, recurrent_bias=None):
    """
    Perform long-short term memory update by unrolling time dimension of input array.

    :param x: input tensor of LSTM layer *[batch_shape, t, in]*.
    :type x: array
    :param init_h: initial state tensor for the cell output *[batch_shape, out]*.
    :type init_h: array
    :param init_c: initial state tensor for the cell hidden state *[batch_shape, out]*.
    :type init_c: array
    :param kernel: weights for cell kernel *[in, 4 x out]*.
    :type kernel: array
    :param recurrent_kernel: weights for cell recurrent kernel *[out, 4 x out]*.
    :type recurrent_kernel: array
    :param bias: bias for cell kernel *[4 x out]*.
    :type bias: array
    :param recurrent_bias: bias for cell recurrent kernel *[4 x out]*.
    :type recurrent_bias: array
    :return: hidden state for all timesteps *[batch_shape,t,out]* and cell state for last timestep *[batch_shape,out]*
    """

    # get shapes
    x_shape = list(x.shape)
    batch_shape = x_shape[:-2]
    timesteps = x_shape[-2]
    input_channels = x_shape[-1]
    x_flat = ivy.reshape(x, (-1, input_channels))

    # input kernel
    Wi = kernel
    Wi_x = ivy.reshape(ivy.matmul(x_flat, Wi) + (bias if bias is not None else 0),
                        batch_shape + [timesteps, -1])
    Wii_x, Wif_x, Wig_x, Wio_x = ivy.split(Wi_x, 4, -1)

    # recurrent kernel
    Wh = recurrent_kernel

    # lstm states
    ht = init_h
    ct = init_c

    # lstm outputs
    ot = x
    hts_list = list()

    # unrolled time dimension with lstm steps
    for Wii_xt, Wif_xt, Wig_xt, Wio_xt in zip(ivy.unstack(Wii_x, axis=-2), ivy.unstack(Wif_x, axis=-2),
                                              ivy.unstack(Wig_x, axis=-2), ivy.unstack(Wio_x, axis=-2)):
        htm1 = ht
        ctm1 = ct

        Wh_htm1 = ivy.matmul(htm1, Wh) + (recurrent_bias if recurrent_bias is not None else 0)
        Whi_htm1, Whf_htm1, Whg_htm1, Who_htm1 = ivy.split(Wh_htm1, num_sections=4, axis=-1)

        it = ivy.sigmoid(Wii_xt + Whi_htm1)
        ft = ivy.sigmoid(Wif_xt + Whf_htm1)
        gt = ivy.tanh(Wig_xt + Whg_htm1)
        ot = ivy.sigmoid(Wio_xt + Who_htm1)
        ct = ft * ctm1 + it * gt
        ht = ot * ivy.tanh(ct)

        hts_list.append(ivy.expand_dims(ht, -2))

    return ivy.concatenate(hts_list, -2), ct
Ejemplo n.º 21
0
    def compute_link_matrices(self, joint_angles, link_num, batch_shape=None):
        """
        Compute homogeneous transformation matrices relative to base frame, up to link_num of links.

        :param joint_angles: Joint angles of the robot *[batch_shape,num_joints]*
        :type joint_angles: array
        :param link_num: Link number for which to compute matrices up to
        :type link_num: int
        :param batch_shape: Shape of batch. Inferred from inputs if None.
        :type batch_shape: sequence of ints, optional
        :return: The link_num matrices, up the link_num *[batch_shape,link_num,4,4]*
        """

        if batch_shape is None:
            batch_shape = joint_angles.shape[:-1]
        batch_shape = list(batch_shape)
        num_batch_dims = len(batch_shape)

        # BS x 1 x NJ
        try:
            dh_joint_angles = _ivy.expand_dims(joint_angles * self._dh_joint_scales - self._dh_joint_offsets, -2)
        except:
            d = 0

        # BS x 1 x 4 x 4
        A00 = _ivy.identity(4, batch_shape=batch_shape + [1])

        Aitoip1dashs = list()
        Aiip1s = list()
        A0is = [A00]

        # repeated blocks

        # BS x 1 x NJ
        dis = _ivy.tile(_ivy.reshape(self._dis, [1] * num_batch_dims + [1, self._num_joints]),
                           batch_shape + [1, 1])

        # BS x 1 x 4
        bottom_row = _ivy.tile(
            _ivy.reshape(_ivy.array([0., 0., 0., 1.]), [1] * num_batch_dims + [1, 4]),
            batch_shape + [1, 1])

        # BS x 1 x 3
        start_of_bottom_middle = _ivy.tile(
            _ivy.reshape(_ivy.array([0., 0., 1.]), [1] * num_batch_dims + [1, 3]),
            batch_shape + [1, 1])

        # BS x 1 x 2
        zeros = _ivy.zeros(batch_shape + [1, 2])

        for i in range(self._num_joints):

            # BS x 1 x 4
            top_row = _ivy.concatenate((_ivy.cos(dh_joint_angles[..., i:i + 1]),
                                           -_ivy.sin(dh_joint_angles[..., i:i + 1]), zeros), -1)
            top_middle_row = _ivy.concatenate((_ivy.sin(dh_joint_angles[..., i:i + 1]),
                                                  _ivy.cos(dh_joint_angles[..., i:i + 1]), zeros), -1)
            bottom_middle_row = _ivy.concatenate((start_of_bottom_middle, dis[..., i:i + 1]), -1)

            # BS x 4 x 4
            Aitoip1dash = _ivy.concatenate((top_row, top_middle_row, bottom_middle_row, bottom_row), -2)

            # (BSx4) x 4
            Aitoip1dash_flat = _ivy.reshape(Aitoip1dash, (-1, 4))

            # (BSx4) x 4
            Aiip1_flat = _ivy.matmul(Aitoip1dash_flat, self._AidashtoAis[i + 1])

            # BS x 4 x 4
            Aiip1 = _ivy.reshape(Aiip1_flat, batch_shape + [4, 4])

            # BS x 4 x 4
            A0ip1 = _ivy.matmul(A0is[-1][..., 0, :, :], Aiip1)

            # append term to lists
            Aitoip1dashs.append(Aitoip1dash)
            Aiip1s.append(Aiip1)
            A0is.append(_ivy.expand_dims(A0ip1, -3))

            if i + 1 == link_num:
                # BS x LN x 4 x 4
                return _ivy.concatenate(A0is, -3)

        raise Exception('wrong parameter entered for link_num, please enter integer from 1-' + str(self._num_joints))