Example #1
0
def sphere_signed_distances(sphere_positions, sphere_radii, query_positions):
    """
    Return the signed distances of a set of query points from the sphere surfaces.\n
    `[reference] <https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm>`_

    :param sphere_positions: Positions of the spheres *[batch_shape,num_spheres,3]*
    :type sphere_positions: array
    :param sphere_radii: Radii of the spheres *[batch_shape,num_spheres,1]*
    :type sphere_radii: array
    :param query_positions: Points for which to query the signed distances *[batch_shape,num_points,3]*
    :type query_positions: array
    :return: The distances of the query points from the closest sphere surface *[batch_shape,num_points,1]*
    """

    # BS x NS x 1 x 3
    sphere_positions = _ivy.expand_dims(sphere_positions, -2)

    # BS x 1 x NP x 3
    query_positions = _ivy.expand_dims(query_positions, -3)

    # BS x NS x NP x 1
    distances_to_centre = _ivy.reduce_sum(
        (query_positions - sphere_positions)**2, -1, keepdims=True)**0.5

    # BS x NS x NP x 1
    all_sdfs = distances_to_centre - _ivy.expand_dims(sphere_radii, -2)

    # BS x NP x 1
    return _ivy.reduce_min(all_sdfs, -3)
Example #2
0
def _pairwise_distance(x, y):

    # BS x NX x 1 x 1
    try:
        x = _ivy.expand_dims(x, -2)
    except:
        d = 0

    # BS x 1 x NY x 1
    y = _ivy.expand_dims(y, -3)

    # BS x NX x NY
    return _ivy.reduce_sum((x - y)**2, -1)
def main(interactive=True, try_use_sim=True, f=None):

    # config
    this_dir = os.path.dirname(os.path.realpath(__file__))
    f = choose_random_framework(excluded=['numpy']) if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    lr = 0.5
    num_anchors = 3
    num_sample_points = 100

    # spline start
    anchor_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, 2 + num_anchors), -1), 'float32')
    query_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, num_sample_points), -1), 'float32')

    # learnable parameters
    robot_start_config = ivy.array(ivy.cast(sim.robot_start_config, 'float32'))
    robot_target_config = ivy.array(
        ivy.cast(sim.robot_target_config, 'float32'))
    learnable_anchor_vals = ivy.variable(
        ivy.cast(
            ivy.transpose(
                ivy.linspace(robot_start_config, robot_target_config,
                             2 + num_anchors)[..., 1:-1], (1, 0)), 'float32'))

    # optimizer
    optimizer = ivy.SGD(lr=lr)

    # optimize
    it = 0
    colliding = True
    clearance = 0
    joint_query_vals = None
    while colliding:
        total_cost, grads, joint_query_vals, link_positions, sdf_vals = ivy.execute_with_gradients(
            lambda xs: compute_cost_and_sdfs(xs[
                'w'], anchor_points, robot_start_config, robot_target_config,
                                             query_points, sim),
            Container({'w': learnable_anchor_vals}))
        colliding = ivy.reduce_min(sdf_vals[2:]) < clearance
        sim.update_path_visualization(
            link_positions, sdf_vals,
            os.path.join(this_dir, 'msp_no_sim', 'path_{}.png'.format(it)))
        learnable_anchor_vals = optimizer.step(
            Container({'w': learnable_anchor_vals}), grads)['w']
        it += 1
    sim.execute_motion(joint_query_vals)
    sim.close()
    unset_framework()
Example #4
0
    def _addressing(self, k, beta, g, s, gamma, prev_M, prev_w):

        # Sec 3.3.1 Focusing by Content

        # Cosine Similarity

        k = ivy.expand_dims(k, axis=2)
        inner_product = ivy.matmul(prev_M, k)
        k_norm = ivy.reduce_sum(k**2, axis=1, keepdims=True)**0.5
        M_norm = ivy.reduce_sum(prev_M**2, axis=2, keepdims=True)**0.5
        norm_product = M_norm * k_norm
        K = ivy.squeeze(inner_product / (norm_product + 1e-8))  # eq (6)

        # Calculating w^c

        K_amplified = ivy.exp(ivy.expand_dims(beta, axis=1) * K)
        w_c = K_amplified / ivy.reduce_sum(K_amplified, axis=1,
                                           keepdims=True)  # eq (5)

        if self._addressing_mode == 'content':  # Only focus on content
            return w_c

        # Sec 3.3.2 Focusing by Location

        g = ivy.expand_dims(g, axis=1)
        w_g = g * w_c + (1 - g) * prev_w  # eq (7)

        s = ivy.concatenate([
            s[:, :self._shift_range + 1],
            ivy.zeros(
                [s.shape[0], self._memory_size -
                 (self._shift_range * 2 + 1)]), s[:, -self._shift_range:]
        ],
                            axis=1)
        t = ivy.concatenate([ivy.flip(s, axis=[1]),
                             ivy.flip(s, axis=[1])],
                            axis=1)
        s_matrix = ivy.stack([
            t[:, self._memory_size - i - 1:self._memory_size * 2 - i - 1]
            for i in range(self._memory_size)
        ],
                             axis=1)
        w_ = ivy.reduce_sum(ivy.expand_dims(w_g, axis=1) * s_matrix,
                            axis=2)  # eq (8)
        w_sharpen = w_**ivy.expand_dims(gamma, axis=1)
        w = w_sharpen / ivy.reduce_sum(w_sharpen, axis=1,
                                       keepdims=True)  # eq (9)

        return w
def compute_cost_and_sdfs(learnable_anchor_vals, anchor_points,
                          start_anchor_val, end_anchor_val, query_points, sim):
    anchor_vals = ivy.concatenate(
        (ivy.expand_dims(start_anchor_val, 0), learnable_anchor_vals,
         ivy.expand_dims(end_anchor_val, 0)), 0)
    joint_angles = ivy_robot.sample_spline_path(anchor_points, anchor_vals,
                                                query_points)
    link_positions = ivy.transpose(
        sim.ivy_manipulator.sample_links(joint_angles), (1, 0, 2))
    length_cost = compute_length(link_positions)
    sdf_vals = sim.sdf(ivy.reshape(link_positions, (-1, 3)))
    coll_cost = -ivy.reduce_mean(sdf_vals)
    total_cost = length_cost + coll_cost * 10
    return total_cost[0], joint_angles, link_positions, ivy.reshape(
        sdf_vals, (-1, 100, 1))
Example #6
0
    def get_reward(self):
        """
        Get reward based on current state

        :return: Reward array
        """
        # Goal proximity.
        x = ivy.reduce_sum(ivy.cos(self.angles), -1)
        y = ivy.reduce_sum(ivy.sin(self.angles), -1)
        xy = ivy.concatenate([ivy.expand_dims(x, 0),
                              ivy.expand_dims(y, 0)],
                             axis=0)
        rew = ivy.reshape(
            ivy.exp(-1 * ivy.reduce_sum((xy - self.goal_xy)**2, -1)), (-1, ))
        return ivy.reduce_mean(rew, axis=0, keepdims=True)
Example #7
0
def euler_pose_to_mat_pose(euler_pose, convention='zyx', batch_shape=None):
    """
    Convert :math: Euler angle pose
    :math:`\mathbf{p}_{abc} = [\mathbf{x}_c, \mathbf{θ}_{xyz}] = [x, y, z, ϕ_a, ϕ_b, ϕ_c]` to matrix pose
    :math:`\mathbf{P}\in\mathbb{R}^{3×4}`.\n
    `[reference] <https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix>`_

    :param euler_pose: Euler angle pose *[batch_shape,6]*
    :type euler_pose: array
    :param convention: The axes for euler rotation, in order of L.H.S. matrix multiplication.
    :type convention: str, optional
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :return: Matrix pose *[batch_shape,3,4]*
    """

    if batch_shape is None:
        batch_shape = euler_pose.shape[:-1]

    # BS x 3 x 3
    rot_mat = _ivy_rot_mat.euler_to_rot_mat(euler_pose[..., 3:], convention,
                                            batch_shape)

    # BS x 3 x 4
    return _ivy.concatenate(
        (rot_mat, _ivy.expand_dims(euler_pose[..., 0:3], -1)), -1)
Example #8
0
    def __init__(self,
                 img_meas: Dict[str, ESMCamMeasurement],
                 agent_rel_mat: ivy.Array,
                 control_mean: ivy.Array = None,
                 control_cov: ivy.Array = None):
        """
        Create esm observation container

        :param img_meas: dict of ESMImageMeasurement objects, with keys for camera names.
        :type: img_meas: Ivy container
        :param agent_rel_mat: The pose of the agent relative to the previous pose, in matrix form
                                *[batch_size, timesteps, 3, 4]*.
        :type agent_rel_mat: array
        :param control_mean: The pose of the agent relative to the previous pose, in rotation vector pose form.
                                Inferred from agent_rel_mat if None. *[batch_size, timesteps, 6]*
        :type control_mean: array, optional
        :param control_cov: The convariance of the agent relative pose, in rotation vector form.
                             Assumed all zero if None. *[batch_size, timesteps, 6, 6]*.
        :type control_cov: array, optional
        """
        self['img_meas'] = Container(img_meas)
        agent_rel_mat = _pad_to_batch_n_time_dims(agent_rel_mat, 4)
        self['agent_rel_mat'] = agent_rel_mat
        if control_mean is None:
            control_mean = ivy_mech.mat_pose_to_rot_vec_pose(agent_rel_mat)
        else:
            control_mean = _pad_to_batch_n_time_dims(control_mean, 3)
        self['control_mean'] = control_mean
        if control_cov is None:
            control_cov = ivy.tile(ivy.expand_dims(ivy.zeros_like(control_mean), -1), (1, 1, 1, 6))
        else:
            control_cov = _pad_to_batch_n_time_dims(control_cov, 4)
        self['control_cov'] = control_cov
Example #9
0
    def _group_tensor_into_windowed_tensor(self, x, valid_first_frame):
        if self._window_size == 1:
            valid_first_frame_pruned = ivy.cast(valid_first_frame[:, 0],
                                                'bool')
        else:
            valid_first_frame_pruned = ivy.cast(
                valid_first_frame[:1 - self._window_size, 0], 'bool')
        if ivy.reduce_sum(ivy.cast(valid_first_frame_pruned, 'int32'))[0] == 0:
            valid_first_frame_pruned =\
                ivy.cast(ivy.one_hot(0, self._sequence_lengths[0] - self._window_size + 1), 'bool')
        window_idxs_single = ivy.indices_where(valid_first_frame_pruned)

        gather_idxs_list = list()
        for w_idx in window_idxs_single:
            gather_idxs_list.append(
                ivy.expand_dims(
                    ivy.arange(w_idx[0] + self._window_size, w_idx[0], 1), 0))
        gather_idxs = ivy.concatenate(gather_idxs_list, 0)
        gather_idxs = ivy.reshape(gather_idxs, (-1, 1))
        num_valid_windows_for_seq = ivy.shape(window_idxs_single)[0:1]
        return ivy.reshape(
            ivy.gather_nd(x, gather_idxs),
            ivy.concatenate(
                (num_valid_windows_for_seq, ivy.array(
                    [self._window_size]), ivy.shape(x)[1:]), 0))
Example #10
0
def compute_cost_and_sdfs(learnable_anchor_vals, anchor_points,
                          start_anchor_val, end_anchor_val, query_points, sim):
    anchor_vals = ivy.concatenate(
        (ivy.expand_dims(start_anchor_val, 0), learnable_anchor_vals,
         ivy.expand_dims(end_anchor_val, 0)), 0)
    poses = ivy_robot.sample_spline_path(anchor_points, anchor_vals,
                                         query_points)
    inv_ext_mat_query_vals = ivy_mech.rot_vec_pose_to_mat_pose(poses)
    body_positions = ivy.transpose(
        sim.ivy_drone.sample_body(inv_ext_mat_query_vals), (1, 0, 2))
    length_cost = compute_length(body_positions)
    sdf_vals = sim.sdf(ivy.reshape(body_positions, (-1, 3)))
    coll_cost = -ivy.reduce_mean(sdf_vals)
    total_cost = length_cost + coll_cost * 10
    return total_cost[0], poses, body_positions, ivy.reshape(
        sdf_vals, (-1, 100, 1))
Example #11
0
 def batch_array(x, _):
     return [
         ivy.concatenate([
             ivy.expand_dims(item, 0)
             for item in x[i * batch_size:i * batch_size + batch_size]
         ], 0) for i in range(int(len(x) / batch_size))
     ]
Example #12
0
def focal_lengths_and_pp_offsets_to_calib_mat(focal_lengths, pp_offsets, batch_shape=None, dev_str=None):
    """
    Compute calibration matrix :math:`\mathbf{K}\in\mathbb{R}^{3×3}` from focal lengths :math:`f_x, f_y` and
    principal-point offsets :math:`p_x, p_y`.\n
    `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=173>`_
    page 155, section 6.1, equation 6.4

    :param focal_lengths: Focal lengths *[batch_shape,2]*
    :type focal_lengths: array
    :param pp_offsets: Principal-point offsets *[batch_shape,2]*
    :type pp_offsets: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
    :type dev_str: str, optional
    :return: Calibration matrix *[batch_shape,3,3]*
    """

    if batch_shape is None:
        batch_shape = focal_lengths.shape[:-1]

    if dev_str is None:
        dev_str = _ivy.dev_str(focal_lengths)

    # shapes as list
    batch_shape = list(batch_shape)

    # BS x 1 x 1
    zeros = _ivy.zeros(batch_shape + [1, 1], dev_str=dev_str)
    ones = _ivy.ones(batch_shape + [1, 1], dev_str=dev_str)

    # BS x 2 x 1
    focal_lengths_reshaped = _ivy.expand_dims(focal_lengths, -1)
    pp_offsets_reshaped = _ivy.expand_dims(pp_offsets, -1)

    # BS x 1 x 3
    row1 = _ivy.concatenate((focal_lengths_reshaped[..., 0:1, :], zeros, pp_offsets_reshaped[..., 0:1, :]), -1)
    row2 = _ivy.concatenate((zeros, focal_lengths_reshaped[..., 1:2, :], pp_offsets_reshaped[..., 1:2, :]), -1)
    row3 = _ivy.concatenate((zeros, zeros, ones), -1)

    # BS x 3 x 3
    return _ivy.concatenate((row1, row2, row3), -2)
Example #13
0
def _se_to_mask(se: ivy.Array) -> ivy.Array:
    se_h, se_w = se.shape
    se_flat = ivy.reshape(se, (-1,))
    num_feats = se_h * se_w
    i_s = ivy.expand_dims(ivy.arange(num_feats, dev_str=ivy.dev_str(se)), -1)
    y_s = i_s % se_h
    x_s = i_s // se_h
    indices = ivy.concatenate((i_s, ivy.zeros_like(i_s, dtype_str='int32'), x_s, y_s), -1)
    out = ivy.scatter_nd(
        indices, ivy.cast(se_flat >= 0, ivy.dtype_str(se)), (num_feats, 1, se_h, se_w), dev_str=ivy.dev_str(se))
    return out
Example #14
0
    def __init__(self,
                 img_mean: ivy.Array,
                 cam_rel_mat: ivy.Array,
                 img_var: ivy.Array = None,
                 validity_mask: ivy.Array = None,
                 pose_mean: ivy.Array = None,
                 pose_cov: ivy.Array = None):
        """
        Create esm image measurement container

        :param img_mean: Camera-relative co-ordinates and image features
                            *[batch_size, timesteps, height, width, 3 + feat]*
        :type: img_mean: array
        :param cam_rel_mat: The pose of the camera relative to the current agent pose, in matrix form
                            *[batch_size, timesteps, 3, 4]*
        :type cam_rel_mat: array
        :param img_var: Image depth and feature variance values, assumed all zero if None.
                        *[batch_size, timesteps, height, width, 1 + feat]*
        :type: img_var: array, optional
        :param validity_mask: Validity mask, for which pixels should be considered. Assumed all valid if None
                                *[batch_size, timesteps, height, width, 1]*
        :type validity_mask: array, optional
        :param pose_mean: The pose of the camera relative to the current agent pose, in rotation vector pose form.
                            Inferred from cam_rel_mat if None. *[batch_size, timesteps, 6]*
        :type pose_mean: array, optional
        :param pose_cov: The convariance of the camera relative pose, in rotation vector form. Assumed all zero if None.
                            *[batch_size, timesteps, 6, 6]*
        :type pose_cov: array, optional
        """
        img_mean = _pad_to_batch_n_time_dims(img_mean, 5)
        cam_rel_mat = _pad_to_batch_n_time_dims(cam_rel_mat, 4)
        self['img_mean'] = img_mean
        self['cam_rel_mat'] = cam_rel_mat
        if img_var is None:
            img_var = ivy.zeros_like(img_mean)
        else:
            img_var = _pad_to_batch_n_time_dims(img_var, 5)
        self['img_var'] = img_var
        if validity_mask is None:
            validity_mask = ivy.ones_like(img_mean[..., 0:1])
        else:
            validity_mask = _pad_to_batch_n_time_dims(validity_mask, 5)
        self['validity_mask'] = validity_mask
        if pose_mean is None:
            pose_mean = ivy_mech.mat_pose_to_rot_vec_pose(cam_rel_mat)
        else:
            pose_mean = _pad_to_batch_n_time_dims(pose_mean, 3)
        self['pose_mean'] = pose_mean
        if pose_cov is None:
            pose_cov = ivy.tile(ivy.expand_dims(ivy.zeros_like(pose_mean), -1), (1, 1, 1, 6))
        else:
            pose_cov = _pad_to_batch_n_time_dims(pose_cov, 4)
        self['pose_cov'] = pose_cov
Example #15
0
def render_rays_via_termination_probabilities(ray_term_probs,
                                              features,
                                              render_variance=False):
    """
    Render features onto the image plane, given rays sampled at radial depths with readings of
    feature values and densities at these sample points.

    :param ray_term_probs: The ray termination probabilities *[batch_shape,num_samples_per_ray]*
    :type ray_term_probs: array
    :param features: Feature values at the sample points *[batch_shape,num_samples_per_ray,feat_dim]*
    :type features: array
    :param render_variance: Whether to also render the feature variance. Default is False.
    :type render_variance: bool, optional
    :return: The feature renderings along the rays, computed via the termination probabilities *[batch_shape,feat_dim]*
    """

    # BS x NSPR
    rendering = ivy.reduce_sum(
        ivy.expand_dims(ray_term_probs, -1) * features, -2)
    if not render_variance:
        return rendering
    var = ivy.reduce_sum(
        ray_term_probs * (ivy.expand_dims(rendering, -2) - features)**2, -2)
    return rendering, var
Example #16
0
    def expand_dims(self, axis):
        """
        Expand dims of all sub-arrays of container object.

        :param axis: Axis along which to expand dimensions of the sub-arrays.
        :type axis: int
        :return: Container object at with all sub-array dimensions expanded along the axis.
        """
        return_dict = dict()
        for key, value in sorted(self.items()):
            if isinstance(value, Container):
                return_dict[key] = value.expand_dims(axis)
            else:
                return_dict[key] = _ivy.expand_dims(value, axis)
        return Container(return_dict)
Example #17
0
def create_trimesh_indices_for_image(batch_shape, image_dims, dev_str='cpu:0'):
    """
    Create triangle mesh for image with given image dimensions

    :param batch_shape: Shape of batch.
    :type batch_shape: sequence of ints
    :param image_dims: Image dimensions.
    :type image_dims: sequence of ints
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc.
    :type dev_str: str, optional
    :return: Triangle mesh indices for image *[batch_shape,h*w*some_other_stuff,3]*
    """

    # shapes as lists
    batch_shape = list(batch_shape)
    image_dims = list(image_dims)

    # other shape specs
    num_batch_dims = len(batch_shape)
    tri_dim = 2 * (image_dims[0] - 1) * (image_dims[1] - 1)
    flat_shape = [1] * num_batch_dims + [tri_dim] + [3]
    tile_shape = batch_shape + [1] * 2

    # 1 x W-1
    t00_ = _ivy.reshape(_ivy.arange(image_dims[1] - 1, dtype_str='float32', dev_str=dev_str), (1, -1))

    # H-1 x 1
    k_ = _ivy.reshape(_ivy.arange(image_dims[0] - 1, dtype_str='float32', dev_str=dev_str), (-1, 1)) * image_dims[1]

    # H-1 x W-1
    t00_ = _ivy.matmul(_ivy.ones((image_dims[0] - 1, 1), dev_str=dev_str), t00_)
    k_ = _ivy.matmul(k_, _ivy.ones((1, image_dims[1] - 1), dev_str=dev_str))

    # (H-1xW-1) x 1
    t00 = _ivy.expand_dims(t00_ + k_, -1)
    t01 = t00 + 1
    t02 = t00 + image_dims[1]
    t10 = t00 + image_dims[1] + 1
    t11 = t01
    t12 = t02

    # (H-1xW-1) x 3
    t0 = _ivy.concatenate((t00, t01, t02), -1)
    t1 = _ivy.concatenate((t10, t11, t12), -1)

    # BS x 2x(H-1xW-1) x 3
    return _ivy.tile(_ivy.reshape(_ivy.concatenate((t0, t1), 0),
                                  flat_shape), tile_shape)
Example #18
0
def axis_angle_pose_to_mat_pose(axis_angle_pose):
    """
    Convert axis-angle pose :math:`\mathbf{p}_{aa} = [\mathbf{x}_c, \mathbf{e}, θ] = [x, y, z, e_x, e_y, e_z, θ]` to
    matrix pose :math:`\mathbf{P}\in\mathbb{R}^{3×4}`.

    :param axis_angle_pose: Quaternion pose *[batch_shape,7]*
    :type axis_angle_pose: array
    :return: Matrix pose *[batch_shape,3,4]*
    """

    # BS x 3 x 3
    rot_mat = _ivy_rot_mat.axis_angle_to_rot_mat(axis_angle_pose[..., 3:])

    # BS x 3 x 4
    return _ivy.concatenate(
        (rot_mat, _ivy.expand_dims(axis_angle_pose[..., :3], -1)), -1)
Example #19
0
def quaternion_pose_to_mat_pose(quat_pose):
    """
    Convert quaternion pose :math:`\mathbf{p}_{q} = [\mathbf{x}_c, \mathbf{q}] = [x, y, z, q_i, q_j, q_k, q_r]` to
    matrix pose :math:`\mathbf{P}\in\mathbb{R}^{3×4}`.\n
    `[reference] <https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Conversion_to_and_from_the_matrix_representation>`_

    :param quat_pose: Quaternion pose *[batch_shape,7]*
    :type quat_pose: array
    :return: Matrix pose *[batch_shape,3,4]*
    """

    # BS x 3 x 3
    rot_mat = _ivy_rot_mat.quaternion_to_rot_mat(quat_pose[..., 3:])

    # BS x 3 x 1
    rhs = _ivy.expand_dims(quat_pose[..., 0:3], -1)

    # BS x 3 x 4
    return _ivy.concatenate((rot_mat, rhs), -1)
Example #20
0
def rot_vec_pose_to_mat_pose(rot_vec_pose):
    """
    Convert rotation vector pose :math:`\mathbf{p}_{rv} = [\mathbf{x}_c, \mathbf{θ}_{rv}] = [x, y, z, θe_x, θe_y, θe_z]`
    to matrix pose :math:`\mathbf{P}\in\mathbb{R}^{3×4}`.\n
    `[reference] <https://en.wikipedia.org/wiki/Rotation_formalisms_in_three_dimensions#Euler_axis_and_angle_(rotation_vector)>`_

    :param rot_vec_pose: Rotation vector pose *[batch_shape,6]*
    :type rot_vec_pose: array
    :return: Matrix pose *[batch_shape,3,4]*
    """

    # BS x 4
    quaternion = _ivy_quat.rotation_vector_to_quaternion(rot_vec_pose[..., 3:])

    # BS x 3 x 3
    rot_mat = _ivy_rot_mat.quaternion_to_rot_mat(quaternion)

    # BS x 3 x 4
    return _ivy.concatenate(
        (rot_mat, _ivy.expand_dims(rot_vec_pose[..., 0:3], -1)), -1)
Example #21
0
 def _group_tensor_into_windowed_tensor_simple(self, x, seq_info):
     seq_info = self._update_seq_info_for_window(seq_info)
     if self._fixed_sequence_length:
         return ivy.reshape(ivy.gather_nd(x, ivy.array(self._gather_idxs)),
                            (self._windows_per_seq, self._window_size) +
                            x.shape[1:])
     else:
         num_windows_in_seq = int(
             ivy.to_numpy(
                 ivy.maximum(seq_info.length[0] - self._window_size + 1,
                             1)))
         window_idxs_in_seq = ivy.arange(num_windows_in_seq, 0, 1)
         gather_idxs = ivy.tile(
             ivy.reshape(ivy.arange(self._window_size, 0, 1),
                         (1, self._window_size)),
             (num_windows_in_seq, 1)) + ivy.expand_dims(
                 window_idxs_in_seq, -1)
         gather_idxs_flat = ivy.reshape(
             gather_idxs, (self._window_size * num_windows_in_seq, 1))
         return ivy.reshape(ivy.gather_nd(x, gather_idxs_flat),
                            (num_windows_in_seq, self._window_size) +
                            x.shape[1:])
Example #22
0
def stratified_sample(starts, ends, num_samples, batch_shape=None):
    """
    Perform stratified sampling, between start and end arrays. This operation divides the range into equidistant bins,
    and uniformly samples value within the ranges for each of these bins.

    :param starts: Start values *[batch_shape]*
    :type starts: array
    :param ends: End values *[batch_shape]*
    :type ends: array
    :param num_samples: The number of samples to generate between starts and ends
    :type num_samples: int
    :param batch_shape: Shape of batch, Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :return: The stratified samples, with each randomly placed in uniformly spaced bins *[batch_shape,num_samples]*
    """

    # shapes
    if batch_shape is None:
        batch_shape = starts.shape

    # shapes as lists
    batch_shape = list(batch_shape)

    # BS
    bin_sizes = (ends - starts) / num_samples

    # BS x NS
    linspace_vals = ivy.linspace(starts, ends - bin_sizes, num_samples)

    # BS x NS
    random_uniform = ivy.random_uniform(shape=batch_shape + [num_samples],
                                        dev_str=ivy.dev_str(starts))

    # BS x NS
    random_offsets = random_uniform * ivy.expand_dims(bin_sizes, -1)

    # BS x NS
    return linspace_vals + random_offsets
Example #23
0
 def cap(self):
     self._pr_obj.handle_explicitly()
     return ivy.expand_dims(ivy.array(self._pr_obj.capture_depth(True).tolist()), -1),\
            ivy.array(self._pr_obj.capture_rgb().tolist())
Example #24
0
def main(f=None):

    # Framework Setup #
    # ----------------#

    # choose random framework

    f = choose_random_framework() if f is None else f
    set_framework(f)

    # Orientation #
    # ------------#

    # rotation representations

    # 3
    rot_vec = ivy.array([0., 1., 0.])

    # 3 x 3
    rot_mat = ivy_mech.rot_vec_to_rot_mat(rot_vec)

    # 3
    euler_angles = ivy_mech.rot_mat_to_euler(rot_mat, 'zyx')

    # 4
    quat = ivy_mech.euler_to_quaternion(euler_angles)

    # 4
    axis_and_angle = ivy_mech.quaternion_to_axis_angle(quat)

    # 3
    rot_vec_again = axis_and_angle[..., :-1] * axis_and_angle[..., -1:]

    # Pose #
    # -----#

    # pose representations

    # 3
    position = ivy.ones_like(rot_vec)

    # 6
    rot_vec_pose = ivy.concatenate((position, rot_vec), 0)

    # 3 x 4
    mat_pose = ivy_mech.rot_vec_pose_to_mat_pose(rot_vec_pose)

    # 6
    euler_pose = ivy_mech.mat_pose_to_euler_pose(mat_pose)

    # 7
    quat_pose = ivy_mech.euler_pose_to_quaternion_pose(euler_pose)

    # 6
    rot_vec_pose_again = ivy_mech.quaternion_pose_to_rot_vec_pose(quat_pose)

    # Position #
    # ---------#

    # conversions of positional representation

    # 3
    cartesian_coord = ivy.random_uniform(0., 1., (3, ))

    # 3
    polar_coord = ivy_mech.cartesian_to_polar_coords(cartesian_coord)

    # 3
    cartesian_coord_again = ivy_mech.polar_to_cartesian_coords(polar_coord)

    # cartesian co-ordinate frame-of-reference transformations

    # 3 x 4
    trans_mat = ivy.random_uniform(0., 1., (3, 4))

    # 4
    cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        cartesian_coord)

    # 3
    trans_cartesian_coord = ivy.matmul(
        trans_mat, ivy.expand_dims(cartesian_coord_homo, -1))[:, 0]

    # 4
    trans_cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        trans_cartesian_coord)

    # 4 x 4
    trans_mat_homo = ivy_mech.make_transformation_homogeneous(trans_mat)

    # 3 x 4
    inv_trans_mat = ivy.inv(trans_mat_homo)[0:3]

    # 3
    cartesian_coord_again = ivy.matmul(
        inv_trans_mat, ivy.expand_dims(trans_cartesian_coord_homo, -1))[:, 0]

    # message
    print('End of Run Through Demo!')
Example #25
0
File: esm.py Project: wx-b/memory
    def _forward(self,
                 obs: ESMObservation,
                 memory: ESMMemory = None,
                 batch_size=None,
                 num_timesteps=None,
                 num_cams=None,
                 image_dims=None):
        """
        Perform ESM update step.

        :param obs: Observations
        :type obs: ESMObservation
        :param memory: Memory from the previous time-step, uses internal parameter if None.
        :type memory: ESMMemory, optional.
        :param batch_size: Size of batch, inferred from inputs if None.
        :type batch_size: int, optional
        :param num_timesteps: Number of timesteps, inferred from inputs if None.
        :type num_timesteps: int, optional
        :param num_cams: Number of cameras, inferred from inputs if None.
        :type num_cams: int, optional
        :param image_dims: Image dimensions of captured images, inferred from inputs if None.
        :type image_dims: sequence of ints, optional
        :return: New memory of type ESMMemory
        """

        # get shapes
        img_meas = (next(iter(obs.img_meas.values()))).img_mean
        if batch_size is None:
            batch_size = int(img_meas.shape[0])
        if num_timesteps is None:
            num_timesteps = int(img_meas.shape[1])
        if num_cams is None:
            num_cams = len(obs.img_meas.values())
        if image_dims is None:
            image_dims = list(img_meas.shape[2:4])

        # get only previous memory

        # extract from memory #
        # --------------------#

        if memory:
            prev_mem = memory.slice((slice(None, None, None), -1))
        elif self._stateful and self._memory is not None:
            prev_mem = self._memory.slice((slice(None, None, None), -1))
        else:
            prev_mem = self.empty_memory(batch_size, 1).slice(
                (slice(None, None, None), -1))

        # holes prior #
        # ------------#

        # B x N x OH x OW x 1
        self._sphere_depth_prior = \
            ivy.ones([batch_size, num_timesteps] + self._sphere_img_dims + [1], dev_str=self._dev_str) * \
            self._sphere_depth_prior_val

        # B x N x OH x OW x F
        self._sphere_feat_prior = \
            ivy.ones([batch_size, num_timesteps] + self._sphere_img_dims + [self._feat_dim], dev_str=self._dev_str) * \
            self._sphere_feat_prior_val

        # B x N x OH x OW x (1+F)
        holes_prior = ivy.concatenate([self._sphere_depth_prior] +
                                      [self._sphere_feat_prior], -1)

        # holes prior variances #
        # ----------------------#

        # B x N x OH x OW x 2
        sphere_ang_pix_prior_var = \
            ivy.ones([batch_size, num_timesteps] + self._sphere_img_dims + [2], dev_str=self._dev_str) * self._ang_pix_prior_var_val

        # B x N x OH x OW x 1
        sphere_depth_prior_var = \
            ivy.ones([batch_size, num_timesteps] + self._sphere_img_dims + [1], dev_str=self._dev_str) * self._depth_prior_var_val

        # B x N x OH x OW x F
        sphere_feat_prior_var = \
            ivy.ones([batch_size, num_timesteps] + self._sphere_img_dims + [self._feat_dim], dev_str=self._dev_str) * \
            self._feat_prior_var_val

        # B x N x OH x OW x (3+F)
        holes_prior_var = ivy.concatenate([sphere_ang_pix_prior_var] +
                                          [sphere_depth_prior_var] +
                                          [sphere_feat_prior_var], -1)

        # variance threshold #
        # -------------------#

        # B x N x (3+F) x 1
        var_threshold_min = ivy.tile(
            ivy.reshape(
                ivy.stack([self._min_ang_pix_var] * 2 + [self._min_depth_var] +
                          [self._min_feat_var] * self._feat_dim),
                [1, 1, 3 + self._feat_dim, 1]),
            [batch_size, num_timesteps, 1, 1])
        var_threshold_max = ivy.tile(
            ivy.reshape(
                ivy.stack([self._ang_pix_var_threshold] * 2 +
                          [self._depth_var_threshold] +
                          [self._feat_var_threshold] * self._feat_dim),
                [1, 1, 3 + self._feat_dim, 1]),
            [batch_size, num_timesteps, 1, 1])
        self._var_threshold = ivy.concatenate(
            (var_threshold_min, var_threshold_max), -1)

        # measurements #
        # -------------#

        # B x N x OH x OW x 3
        uniform_sphere_pixel_coords = ivy_vision.create_uniform_pixel_coords_image(
            self._sphere_img_dims, (batch_size, num_timesteps),
            dev_str=self._dev_str)

        # B x N x OH x OW x (3+F),    B x N x OH x OW x (3+F)
        meas_means, meas_vars = self._convert_images_to_omni_observations(
            obs.img_meas, uniform_sphere_pixel_coords, holes_prior, batch_size,
            num_timesteps, num_cams, image_dims)

        # filtering #
        # ----------#

        # list of B x OH x OW x (3+F),    list of B x OH x OW x (3+F)
        fused_measurements_list, fused_variances_list = \
            self._kalman_filter_on_measurement_sequence(
                prev_mem.mean, prev_mem.var, holes_prior[:, 0], holes_prior_var[:, 0], meas_means, meas_vars,
                uniform_sphere_pixel_coords[:, 0], obs.control_mean, obs.control_cov, obs.agent_rel_mat,
                batch_size, num_timesteps)

        # new variance #
        # -------------#

        # B x N x OH x OW x (3+F)
        fused_variance = ivy.concatenate(
            [ivy.expand_dims(item, 1) for item in fused_variances_list], 1)

        # variance clipping

        # B x N x OH x OW x 1
        fused_depth_variance = ivy.clip(fused_variance[..., 0:1],
                                        self._min_depth_var,
                                        self._depth_prior_var_val)

        # B x N x OH x OW x 3
        fused_feat_variance = ivy.clip(fused_variance[...,
                                                      1:], self._min_feat_var,
                                       self._feat_prior_var_val)

        # B x N x OH x OW x (3+F)
        fused_variance = ivy.concatenate([fused_depth_variance] +
                                         [fused_feat_variance], -1)

        # new mean #
        # ---------#

        # B x N x OH x OW x (3+F)
        fused_measurement = ivy.concatenate(
            [ivy.expand_dims(item, 1) for item in fused_measurements_list], 1)

        # value clipping

        # B x N x OH x OW x 2
        fused_pixel_coords = fused_measurement[..., 0:2]

        # B x N x OH x OW x 1
        fused_depth = ivy.clip(fused_measurement[..., 2:3], self._min_depth,
                               self._max_depth)

        # B x N x OH x OW x 3
        fused_feat = fused_measurement[..., 3:]

        # B x N x OH x OW x (3+F)
        fused_measurement = ivy.concatenate([fused_pixel_coords] +
                                            [fused_depth] + [fused_feat], -1)

        # update memory #
        # --------------#

        # B x N x OH x OW x (3+F),    B x N x OH x OW x (3+F)
        self._memory = ESMMemory(mean=fused_measurement, var=fused_variance)

        # return #
        # -------#

        return self._memory
Example #26
0
File: esm.py Project: wx-b/memory
    def _kalman_filter_on_measurement_sequence(
            self, prev_fused_val, prev_fused_variance, hole_prior,
            hole_prior_var, meas, meas_vars, uniform_sphere_pixel_coords,
            agent_rel_poses, agent_rel_pose_covs, agent_rel_mats, batch_size,
            num_timesteps):
        """
        Perform kalman filter on measurement sequence

        :param prev_fused_val: Fused value from previous timestamp *[batch_size, oh, ow, (3+f)]*
        :param prev_fused_variance: Fused variance from previous timestamp *[batch_size, oh, ow, (3+f)]*
        :param hole_prior: Prior for holes in quantization *[batch_size, oh, ow, (1+f)]*
        :param hole_prior_var: Prior variance for holes in quantization *[batch_size, oh, ow, (3+f)]*
        :param meas: Measurements *[batch_size, num_timesteps, oh, ow, (3+f)]*
        :param meas_vars: Measurement variances *[batch_size, num_timesteps, oh, ow, (3+f)]*
        :param uniform_sphere_pixel_coords: Uniform sphere pixel co-ordinates *[batch_size, oh, ow, 3]*
        :param agent_rel_poses: Relative poses of agents to the previous step *[batch_size, num_timesteps, 6]*
        :param agent_rel_pose_covs: Agent relative pose covariances *[batch_size, num_timesteps, 6, 6]*
        :param agent_rel_mats: Relative transformations matrix of agents to the previous step
                                *[batch_size, num_timesteps, 3, 4]*
        :param batch_size: Size of batch
        :param num_timesteps: Number of frames
        :return: list of *[batch_size, oh, ow, (3+f)]*,    list of *[batch_size, oh, ow, (3+f)]*
        """

        fused_list = list()
        fused_variances_list = list()

        for i in range(num_timesteps):
            # project prior from previous frame #
            # ----------------------------------#

            # B x OH x OW x (3+F)
            prev_prior = prev_fused_val
            prev_prior_variance = prev_fused_variance

            # B x 3 x 4
            agent_rel_mat = agent_rel_mats[:, i]

            # B x 6
            agent_rel_pose = agent_rel_poses[:, i]

            # B x 6 x 6
            agent_rel_pose_cov = agent_rel_pose_covs[:, i]

            # B x OH x OW x (3+F)   B x OH x OW x (3+F)
            fused_projected, fused_projected_variance = self._omni_frame_to_omni_frame_projection(
                agent_rel_pose, agent_rel_mat, uniform_sphere_pixel_coords,
                prev_prior[..., 0:2], prev_prior[..., 2:3],
                prev_prior[..., 3:], agent_rel_pose_cov, prev_prior_variance,
                hole_prior, hole_prior_var, batch_size)

            # reset prior

            # B x OH x OW x (3+F)
            prior = fused_projected
            prior_var = fused_projected_variance

            # per-pixel fusion with measurements #
            # -----------------------------------#

            # extract slice for frame

            # B x OH x OW x (3+F)
            measurement = meas[:, i]
            measurement_variance = meas_vars[:, i]

            # fuse prior and measurement

            # B x 2 x OH x OW x (3+F)
            prior_and_meas = ivy.concatenate(
                (ivy.expand_dims(prior, 1), ivy.expand_dims(measurement, 1)),
                1)
            prior_and_meas_variance = ivy.concatenate((ivy.expand_dims(
                prior_var, 1), ivy.expand_dims(measurement_variance, 1)), 1)

            # B x OH x OW x (3+F)
            low_var_mask = ivy.reduce_sum(
                ivy.cast(
                    prior_and_meas_variance <
                    ivy.expand_dims(hole_prior_var, 1) *
                    self._threshold_var_factor, 'int32'), 1) > 0

            # B x 1 x OH x OW x (3+F)    B x 1 x OH x OW x (3+F)
            # ToDo: handle this properly once re-implemented with a single scatter operation only
            #  currently depth values are fused even if these are clearly far apart
            fused_val_unsmoothed, fused_variance_unsmoothed = \
                self._fuse_measurements_with_uncertainty(prior_and_meas, prior_and_meas_variance, 1)

            # B x OH x OW x (3+F)
            # This prevents accumulating certainty from duplicate re-projections from prior measurements
            fused_variance_unsmoothed = ivy.where(
                low_var_mask, fused_variance_unsmoothed[:, 0], hole_prior_var)

            # B x OH x OW x (3+F)
            fused_val = fused_val_unsmoothed[:, 0]
            fused_variance = fused_variance_unsmoothed
            low_var_mask = fused_variance < hole_prior_var

            # B x OH x OW x (3+F)    B x OH x OW x (3+F)
            fused_val, fused_variance = self.smooth(fused_val, fused_variance,
                                                    low_var_mask,
                                                    self._smooth_mean,
                                                    self._smooth_kernel_size,
                                                    True, True, batch_size)

            # append to list for returning

            # B x OH x OW x (3+F)
            fused_list.append(fused_val)

            # B x OH x OW x (3+F)
            fused_variances_list.append(fused_variance)

            # update for next time step
            prev_fused_val = fused_val
            prev_fused_variance = fused_variance

        # list of *[batch_size, oh, ow, (3+f)]*,    list of *[batch_size, oh, ow, (3+f)]*
        return fused_list, fused_variances_list
Example #27
0
File: esm.py Project: wx-b/memory
    def _convert_images_to_omni_observations(self, measurements,
                                             uniform_sphere_pixel_coords,
                                             holes_prior, batch_size,
                                             num_timesteps, num_cams,
                                             image_dims):
        """
        Convert image to omni-directional measurements

        :param measurements: perspective captured images and relative poses container
        :param uniform_sphere_pixel_coords: Uniform  sphere pixel coords *[batch_size, num_timesteps, oh, ow, 3]*
        :param holes_prior: Prior for quantization holes *[batch_size, num_timesteps, oh, ow, 1+f]*
        :param batch_size: Size of batch
        :param num_timesteps: Number of frames
        :param num_cams: Number of cameras
        :param image_dims: Image dimensions
        :return: *[batch_size, n, oh, ow, 3+f]*    *[batch_size, n, oh, ow, 3+f]*
        """

        # coords from all scene cameras wrt world

        images_list = list()
        images_var_list = list()
        cam_rel_poses_list = list()
        cam_rel_poses_cov_list = list()
        cam_rel_mats_list = list()
        validity_mask_list = list()
        for key, item in measurements.to_iterator():
            if key == 'img_mean':
                # B x N x 1 x H x W x (3+f)
                images_list.append(ivy.expand_dims(item, 2))
            elif key == 'img_var':
                # B x N x 1 x H x W x (3+f)
                images_var_list.append(ivy.expand_dims(item, 2))
            elif key == 'pose_mean':
                # B x N x 1 x 6
                cam_rel_poses_list.append(ivy.expand_dims(item, 2))
            elif key == 'pose_cov':
                # B x N x 1 x 6 x 6
                cam_rel_poses_cov_list.append(ivy.expand_dims(item, 2))
            elif key == 'cam_rel_mat':
                # B x N x 1 x 3 x 4
                cam_rel_mats_list.append(ivy.expand_dims(item, 2))
            elif key == 'validity_mask':
                validity_mask_list.append(ivy.expand_dims(item, 2))
            else:
                raise Exception('Invalid image key: {}'.format(key))

        # B x N x C x H x W x (3+f)
        images = ivy.concatenate(images_list, 2)

        # B x N x C x H x W x (3+f)
        var_to_project = ivy.concatenate(images_var_list, 2)

        # B x N x C x 6
        cam_to_cam_poses = ivy.concatenate(cam_rel_poses_list, 2)

        # B x N x C x 3 x 4
        cam_to_cam_mats = ivy.concatenate(cam_rel_mats_list, 2)

        # B x N x C x 6 x 6
        cam_to_cam_pose_covs = ivy.concatenate(cam_rel_poses_cov_list, 2)

        # B x N x C x 1
        validity_masks = ivy.concatenate(validity_mask_list, 2) > 0

        # B x N x OH x OW x (3+f)
        holes_prior_var = ivy.ones(
            [batch_size, num_timesteps] + self._sphere_img_dims +
            [3 + self._feat_dim],
            dev_str=self._dev_str) * 1e12

        # reset invalid regions to prior

        # B x N x C x H x W x (3+f)
        images = ivy.where(
            validity_masks, images,
            ivy.concatenate(
                (images[..., 0:2],
                 ivy.zeros_like(images[..., 2:], dev_str=self._dev_str)), -1))

        # B x N x C x H x W x (3+f)
        var_to_project = ivy.where(
            validity_masks, var_to_project,
            ivy.ones_like(var_to_project, dev_str=self._dev_str) * 1e12)

        # B x N x OH x OW x (3+f)    # B x N x OH x OW x (3+f)
        return self._frame_to_omni_frame_projection(
            cam_to_cam_poses, cam_to_cam_mats, uniform_sphere_pixel_coords,
            images[..., 0:3], images[..., 3:], cam_to_cam_pose_covs,
            var_to_project, holes_prior, holes_prior_var, batch_size,
            num_timesteps, num_cams, image_dims)
Example #28
0
def lstm_update(x, init_h, init_c, kernel, recurrent_kernel, bias=None, recurrent_bias=None):
    """
    Perform long-short term memory update by unrolling time dimension of input array.

    :param x: input tensor of LSTM layer *[batch_shape, t, in]*.
    :type x: array
    :param init_h: initial state tensor for the cell output *[batch_shape, out]*.
    :type init_h: array
    :param init_c: initial state tensor for the cell hidden state *[batch_shape, out]*.
    :type init_c: array
    :param kernel: weights for cell kernel *[in, 4 x out]*.
    :type kernel: array
    :param recurrent_kernel: weights for cell recurrent kernel *[out, 4 x out]*.
    :type recurrent_kernel: array
    :param bias: bias for cell kernel *[4 x out]*.
    :type bias: array
    :param recurrent_bias: bias for cell recurrent kernel *[4 x out]*.
    :type recurrent_bias: array
    :return: hidden state for all timesteps *[batch_shape,t,out]* and cell state for last timestep *[batch_shape,out]*
    """

    # get shapes
    x_shape = list(x.shape)
    batch_shape = x_shape[:-2]
    timesteps = x_shape[-2]
    input_channels = x_shape[-1]
    x_flat = ivy.reshape(x, (-1, input_channels))

    # input kernel
    Wi = kernel
    Wi_x = ivy.reshape(ivy.matmul(x_flat, Wi) + (bias if bias is not None else 0),
                        batch_shape + [timesteps, -1])
    Wii_x, Wif_x, Wig_x, Wio_x = ivy.split(Wi_x, 4, -1)

    # recurrent kernel
    Wh = recurrent_kernel

    # lstm states
    ht = init_h
    ct = init_c

    # lstm outputs
    ot = x
    hts_list = list()

    # unrolled time dimension with lstm steps
    for Wii_xt, Wif_xt, Wig_xt, Wio_xt in zip(ivy.unstack(Wii_x, axis=-2), ivy.unstack(Wif_x, axis=-2),
                                              ivy.unstack(Wig_x, axis=-2), ivy.unstack(Wio_x, axis=-2)):
        htm1 = ht
        ctm1 = ct

        Wh_htm1 = ivy.matmul(htm1, Wh) + (recurrent_bias if recurrent_bias is not None else 0)
        Whi_htm1, Whf_htm1, Whg_htm1, Who_htm1 = ivy.split(Wh_htm1, num_sections=4, axis=-1)

        it = ivy.sigmoid(Wii_xt + Whi_htm1)
        ft = ivy.sigmoid(Wif_xt + Whf_htm1)
        gt = ivy.tanh(Wig_xt + Whg_htm1)
        ot = ivy.sigmoid(Wio_xt + Who_htm1)
        ct = ft * ctm1 + it * gt
        ht = ot * ivy.tanh(ct)

        hts_list.append(ivy.expand_dims(ht, -2))

    return ivy.concatenate(hts_list, -2), ct
Example #29
0
 def _forward(self, x):
     for layer in self._layers:
         x = layer(x)
     return ivy.expand_dims(x, 0)
Example #30
0
def cuboid_signed_distances(cuboid_ext_mats,
                            cuboid_dims,
                            query_positions,
                            batch_shape=None):
    """
    Return the signed distances of a set of query points from the cuboid surfaces.\n
    `[reference] <https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm>`_

    :param cuboid_ext_mats: Extrinsic matrices of the cuboids *[batch_shape,num_cuboids,3,4]*
    :type cuboid_ext_mats: array
    :param cuboid_dims: Dimensions of the cuboids, in the order x, y, z *[batch_shape,num_cuboids,3]*
    :type cuboid_dims: array
    :param query_positions: Points for which to query the signed distances *[batch_shape,num_points,3]*
    :type query_positions: array
    :param batch_shape: Shape of batch. Assumed no batches if None.
    :type batch_shape: sequence of ints, optional
    :return: The distances of the query points from the closest cuboid surface *[batch_shape,num_points,1]*
    """

    if batch_shape is None:
        batch_shape = cuboid_ext_mats.shape[:-3]

    # shapes as list
    batch_shape = list(batch_shape)
    num_batch_dims = len(batch_shape)
    batch_dims_for_trans = list(range(num_batch_dims))
    num_cuboids = cuboid_ext_mats.shape[-3]
    num_points = query_positions.shape[-2]

    # BS x 3 x NP
    query_positions_trans = _ivy.transpose(
        query_positions,
        batch_dims_for_trans + [num_batch_dims + 1, num_batch_dims])

    # BS x 1 x NP
    ones = _ivy.ones_like(query_positions_trans[..., 0:1, :])

    # BS x 4 x NP
    query_positions_trans_homo = _ivy.concatenate(
        (query_positions_trans, ones), -2)

    # BS x NCx3 x 4
    cuboid_ext_mats_flat = _ivy.reshape(cuboid_ext_mats, batch_shape + [-1, 4])

    # BS x NCx3 x NP
    rel_query_positions_trans_flat = _ivy.matmul(cuboid_ext_mats_flat,
                                                 query_positions_trans_homo)

    # BS x NC x 3 x NP
    rel_query_positions_trans = _ivy.reshape(
        rel_query_positions_trans_flat,
        batch_shape + [num_cuboids, 3, num_points])

    # BS x NC x NP x 3
    rel_query_positions = _ivy.transpose(
        rel_query_positions_trans, batch_dims_for_trans +
        [num_batch_dims, num_batch_dims + 2, num_batch_dims + 1])
    q = _ivy.abs(rel_query_positions) - _ivy.expand_dims(cuboid_dims / 2, -2)
    q_max_clipped = _ivy.maximum(q, 1e-12)

    # BS x NC x NP x 1
    q_min_clipped = _ivy.minimum(_ivy.reduce_max(q, -1, keepdims=True), 0.)
    q_max_clipped_len = _ivy.reduce_sum(q_max_clipped**2, -1,
                                        keepdims=True)**0.5
    sdfs = q_max_clipped_len + q_min_clipped

    # BS x NP x 1
    return _ivy.reduce_min(sdfs, -3)