def _rot_mat_to_zxz_euler(rot_mat): # BS x 1 euler_angles_1 = _ivy.acos(rot_mat[..., 2, 2:3]) gimbal_validity = _ivy.abs(rot_mat[..., 0, 2:3]) > GIMBAL_TOL r12 = rot_mat[..., 0, 1:2] r11 = rot_mat[..., 0, 0:1] gimbal_euler_angles_0 = _ivy.atan2(-r12, r11) gimbal_euler_angles_2 = _ivy.zeros_like(gimbal_euler_angles_0) # BS x 3 gimbal_euler_angles = _ivy.concatenate( (gimbal_euler_angles_0, euler_angles_1, gimbal_euler_angles_2), -1) # BS x 1 r31 = rot_mat[..., 2, 0:1] r32 = rot_mat[..., 2, 1:2] r13 = rot_mat[..., 0, 2:3] r23 = rot_mat[..., 1, 2:3] normal_euler_angles_0 = _ivy.atan2(r31, r32) normal_euler_angles_2 = _ivy.atan2(r13, -r23) # BS x 3 normal_euler_angles = _ivy.concatenate( (normal_euler_angles_0, euler_angles_1, normal_euler_angles_2), -1) return _ivy.where(gimbal_validity, normal_euler_angles, gimbal_euler_angles)
def _group_tensor_into_windowed_tensor(self, x, valid_first_frame): if self._window_size == 1: valid_first_frame_pruned = ivy.cast(valid_first_frame[:, 0], 'bool') else: valid_first_frame_pruned = ivy.cast( valid_first_frame[:1 - self._window_size, 0], 'bool') if ivy.reduce_sum(ivy.cast(valid_first_frame_pruned, 'int32'))[0] == 0: valid_first_frame_pruned =\ ivy.cast(ivy.one_hot(0, self._sequence_lengths[0] - self._window_size + 1), 'bool') window_idxs_single = ivy.indices_where(valid_first_frame_pruned) gather_idxs_list = list() for w_idx in window_idxs_single: gather_idxs_list.append( ivy.expand_dims( ivy.arange(w_idx[0] + self._window_size, w_idx[0], 1), 0)) gather_idxs = ivy.concatenate(gather_idxs_list, 0) gather_idxs = ivy.reshape(gather_idxs, (-1, 1)) num_valid_windows_for_seq = ivy.shape(window_idxs_single)[0:1] return ivy.reshape( ivy.gather_nd(x, gather_idxs), ivy.concatenate( (num_valid_windows_for_seq, ivy.array( [self._window_size]), ivy.shape(x)[1:]), 0))
def _rot_mat_to_zyx_euler(rot_mat): # BS x 1 euler_angles_1 = _ivy.asin(rot_mat[..., 0, 2:3]) gimbal_validity = _ivy.abs(rot_mat[..., 1, 1:2]) > GIMBAL_TOL r21 = rot_mat[..., 1, 0:1] r22 = rot_mat[..., 1, 1:2] gimbal_euler_angles_0 = _ivy.atan2(r21, r22) gimbal_euler_angles_2 = _ivy.zeros_like(gimbal_euler_angles_0) # BS x 3 gimbal_euler_angles = _ivy.concatenate( (gimbal_euler_angles_0, euler_angles_1, gimbal_euler_angles_2), -1) # BS x 1 r12 = rot_mat[..., 0, 1:2] r11 = rot_mat[..., 0, 0:1] r23 = rot_mat[..., 1, 2:3] r33 = rot_mat[..., 2, 2:3] normal_euler_angles_0 = _ivy.atan2(-r12, r11) normal_euler_angles_2 = _ivy.atan2(-r23, r33) # BS x 3 normal_euler_angles = _ivy.concatenate( (normal_euler_angles_0, euler_angles_1, normal_euler_angles_2), -1) return _ivy.where(gimbal_validity, normal_euler_angles, gimbal_euler_angles)
def concat(containers, dim): """ Concatenate containers together along the specified dimension. :param containers: containers to concatenate :type containers: sequence of Container objects :param dim: dimension along which to concatenate :type dim: int :return: Concatenated containers """ container0 = containers[0] if isinstance(container0, dict): return_dict = dict() for key in container0.keys(): return_dict[key] = Container.concat( [container[key] for container in containers], dim) return Container(return_dict) else: # noinspection PyBroadException try: if len(containers[0].shape) == 0: return _ivy.concatenate([ _ivy.reshape(item, [1] * (dim + 1)) for item in containers ], dim) else: return _ivy.concatenate(containers, dim) except Exception as e: raise Exception( str(e) + '\nContainer concat operation only valid for containers of arrays' )
def concat(containers, dim, f=None): """ Concatenate containers together along the specified dimension. :param containers: containers to _concatenate :type containers: sequence of Container objects :param dim: dimension along which to _concatenate :type dim: int :param f: Machine learning framework. Inferred from inputs if None. :type f: ml_framework, optional :return: Concatenated containers """ container0 = containers[0] if isinstance(container0, dict): return_dict = dict() for key in container0.keys(): return_dict[key] = Container.concat([container[key] for container in containers], dim) return Container(return_dict) else: f = _get_framework(container0, f=f) # noinspection PyBroadException try: if len(containers[0].shape) == 0: return _ivy.concatenate([_ivy.reshape(item, [1] * (dim + 1)) for item in containers], dim, f=f) else: return _ivy.concatenate(containers, dim, f=f) except Exception as e: raise Exception(str(e) + '\nContainer concat operation only valid for containers of arrays')
def calib_mat_to_intrinsics_object(calib_mat, image_dims, batch_shape=None): """ Create camera intrinsics object from calibration matrix. :param calib_mat: Calibration matrices *[batch_shape,3,3]* :type calib_mat: array :param image_dims: Image dimensions. Inferred from inputs in None. :type image_dims: sequence of ints :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :return: Camera intrinsics object """ if batch_shape is None: batch_shape = calib_mat.shape[:-2] # shapes as list batch_shape = list(batch_shape) image_dims = list(image_dims) # BS x 2 focal_lengths = _ivy.concatenate((calib_mat[..., 0, 0:1], calib_mat[..., 1, 1:2]), -1) # BS x 2 persp_angles = focal_lengths_to_persp_angles(focal_lengths, image_dims) # BS x 2 pp_offsets = _ivy.concatenate((calib_mat[..., 0, -1:], calib_mat[..., 1, -1:]), -1) # BS x 3 x 3 inv_calib_mat = _ivy.inv(calib_mat) # intrinsics object intrinsics = _Intrinsics(focal_lengths, persp_angles, pp_offsets, calib_mat, inv_calib_mat) return intrinsics
def empty_memory(self, batch_size, timesteps): uniform_pixel_coords = \ ivy_vision.create_uniform_pixel_coords_image(self._sphere_img_dims, [batch_size, timesteps], dev_str=self._dev_str)[..., 0:2] empty_memory = { 'mean': ivy.concatenate([uniform_pixel_coords] + [ ivy.ones([batch_size, timesteps] + self._sphere_img_dims + [1], dev_str=self._dev_str) * self._sphere_depth_prior_val ] + [ ivy.ones([batch_size, timesteps] + self._sphere_img_dims + [self._feat_dim], dev_str=self._dev_str) * self._sphere_feat_prior_val ], -1), 'var': ivy.concatenate([ ivy.ones([batch_size, timesteps] + self._sphere_img_dims + [2], dev_str=self._dev_str) * self._ang_pix_prior_var_val ] + [ ivy.ones([batch_size, timesteps] + self._sphere_img_dims + [1], dev_str=self._dev_str) * self._depth_prior_var_val ] + [ ivy.ones([batch_size, timesteps] + self._sphere_img_dims + [self._feat_dim], dev_str=self._dev_str) * self._feat_prior_var_val ], -1) } return ESMMemory(**empty_memory)
def setup_primitive_scene(self): # shape matrices shape_matrices = ivy.concatenate([ivy.reshape(ivy.array(obj.get_matrix(), 'float32'), (1, 3, 4)) for obj in self._objects], 0) # shape dims x_dims = ivy.concatenate([ivy.reshape(ivy.array( obj.get_bounding_box()[1] - obj.get_bounding_box()[0], 'float32'), (1, 1)) for obj in self._objects], 0) y_dims = ivy.concatenate([ivy.reshape(ivy.array( obj.get_bounding_box()[3] - obj.get_bounding_box()[2], 'float32'), (1, 1)) for obj in self._objects], 0) z_dims = ivy.concatenate([ivy.reshape(ivy.array( obj.get_bounding_box()[5] - obj.get_bounding_box()[4], 'float32'), (1, 1)) for obj in self._objects], 0) shape_dims = ivy.concatenate((x_dims, y_dims, z_dims), -1) # primitve scene visualization if self._with_primitive_scene_vis: scene_vis = [Shape.create(PrimitiveShape.CUBOID, ivy.to_numpy(shape_dim).tolist()) for shape_dim in shape_dims] [obj.set_matrix(ivy.to_numpy(shape_mat).reshape(-1).tolist()) for shape_mat, obj in zip(shape_matrices, scene_vis)] [obj.set_transparency(0.5) for obj in scene_vis] # sdf primitive_scene = PrimitiveScene(cuboid_ext_mats=ivy.inv(ivy_mech.make_transformation_homogeneous( shape_matrices))[..., 0:3, :], cuboid_dims=shape_dims) self.sdf = primitive_scene.sdf
def angular_pixel_to_sphere_coords(angular_pixel_coords, pixels_per_degree): """ Convert angular pixel co-ordinates image :math:`\mathbf{A}_p\in\mathbb{R}^{h×w×3}` to camera-centric ego-sphere polar co-ordinates image :math:`\mathbf{S}_c\in\mathbb{R}^{h×w×3}`.\n `[reference] <https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates>`_ :param angular_pixel_coords: Angular pixel co-ordinates image *[batch_shape,h,w,3]* :type angular_pixel_coords: array :param pixels_per_degree: Number of pixels per angular degree :type pixels_per_degree: float :return: Camera-centric ego-sphere polar co-ordinates image *[batch_shape,h,w,3]* """ # BS x H x W x 1 sphere_x_coords = angular_pixel_coords[..., 0:1] sphere_y_coords = angular_pixel_coords[..., 1:2] radius_values = angular_pixel_coords[..., 2:3] sphere_x_angle_coords_in_degs = (180 - sphere_x_coords/(pixels_per_degree + MIN_DENOMINATOR) % 360) sphere_y_angle_coords_in_degs = (sphere_y_coords/(pixels_per_degree + MIN_DENOMINATOR) % 180) # BS x H x W x 2 sphere_angle_coords_in_degs = _ivy.concatenate((sphere_x_angle_coords_in_degs, sphere_y_angle_coords_in_degs), -1) sphere_angle_coords = sphere_angle_coords_in_degs * np.pi / 180 # BS x H x W x 3 return _ivy.concatenate((sphere_angle_coords, radius_values), -1)
def polar_axis_angle_to_quaternion(polar_axis_angle): """ Convert polar axis-angle representation, which constitutes the elevation and azimuth angles of the axis as well as the rotation angle :math:`\mathbf{θ}_{paa} = [ϕ_e, ϕ_a, θ]`, to quaternion form :math:`\mathbf{q} = [q_i, q_j, q_k, q_r]`.\n `[reference] <https://en.wikipedia.org/wiki/Axis%E2%80%93angle_representation>`_ :param polar_axis_angle: Polar axis angle representation *[batch_shape,3]* :type polar_axis_angle: array :return: Quaternion *[batch_shape,4]* """ # BS x 1 theta = polar_axis_angle[..., 0:1] phi = polar_axis_angle[..., 1:2] angle = polar_axis_angle[..., 2:3] x = _ivy.sin(theta) * _ivy.cos(phi) y = _ivy.sin(theta) * _ivy.sin(phi) z = _ivy.cos(theta) # BS x 3 vector = _ivy.concatenate((x, y, z), -1) # BS x 4 return axis_angle_to_quaternion(_ivy.concatenate([vector, angle], -1))
def _rot_mat_to_yzy_euler(rot_mat): # BS x 1 euler_angles_1 = _ivy.acos(rot_mat[..., 1, 1:2]) gimbal_validity = _ivy.abs(rot_mat[..., 1, 0:1]) > GIMBAL_TOL r31 = rot_mat[..., 2, 0:1] r33 = rot_mat[..., 2, 2:3] gimbal_euler_angles_0 = _ivy.atan2(-r31, r33) gimbal_euler_angles_2 = _ivy.zeros_like(gimbal_euler_angles_0) # BS x 3 gimbal_euler_angles = _ivy.concatenate( (gimbal_euler_angles_0, euler_angles_1, gimbal_euler_angles_2), -1) # BS x 1 r23 = rot_mat[..., 1, 2:3] r21 = rot_mat[..., 1, 0:1] r32 = rot_mat[..., 2, 1:2] r12 = rot_mat[..., 0, 1:2] normal_euler_angles_0 = _ivy.atan2(r23, r21) normal_euler_angles_2 = _ivy.atan2(r32, r12) # BS x 3 normal_euler_angles = _ivy.concatenate( (normal_euler_angles_0, euler_angles_1, normal_euler_angles_2), -1) return _ivy.where(gimbal_validity, normal_euler_angles, gimbal_euler_angles)
def _fit_spline(train_points, train_values, order): # shapes train_points_shape = train_points.shape batch_shape = list(train_points_shape[:-2]) num_batch_dims = len(batch_shape) n = train_points_shape[-2] pd = train_values.shape[-1] # BS x N x 1 c = train_points # BS x N x PD f_ = train_values # BS x N x N matrix_a = _phi(_pairwise_distance(c, c), order) # BS x N x 1 ones = _ivy.ones_like(c[..., :1]) # BS x N x 2 matrix_b = _ivy.concatenate([c, ones], -1) # BS x 2 x N matrix_b_trans = _ivy.transpose( matrix_b, list(range(num_batch_dims)) + [num_batch_dims + 1, num_batch_dims]) # BS x N+2 x N left_block = _ivy.concatenate([matrix_a, matrix_b_trans], -2) # BS x 2 x 2 lhs_zeros = _ivy.zeros(batch_shape + [2, 2]) # BS x N+2 x 2 right_block = _ivy.concatenate([matrix_b, lhs_zeros], -2) # BS x N+2 x N+2 lhs = _ivy.concatenate([left_block, right_block], -1) # BS x 2 x PD rhs_zeros = _ivy.zeros(batch_shape + [2, pd]) # BS x N+2 x PD rhs = _ivy.concatenate([f_, rhs_zeros], -2) # BS x N+2 x PD w_v = _ivy.matmul(_ivy.pinv(lhs), rhs) # BS x N x PD w = w_v[..., :n, :] # BS x 2 x PD v = w_v[..., n:, :] # BS x N x PD, BS x 2 x PD return w, v
def _addressing(self, k, beta, g, s, gamma, prev_M, prev_w): # Sec 3.3.1 Focusing by Content # Cosine Similarity k = ivy.expand_dims(k, axis=2) inner_product = ivy.matmul(prev_M, k) k_norm = ivy.reduce_sum(k**2, axis=1, keepdims=True)**0.5 M_norm = ivy.reduce_sum(prev_M**2, axis=2, keepdims=True)**0.5 norm_product = M_norm * k_norm K = ivy.squeeze(inner_product / (norm_product + 1e-8)) # eq (6) # Calculating w^c K_amplified = ivy.exp(ivy.expand_dims(beta, axis=1) * K) w_c = K_amplified / ivy.reduce_sum(K_amplified, axis=1, keepdims=True) # eq (5) if self._addressing_mode == 'content': # Only focus on content return w_c # Sec 3.3.2 Focusing by Location g = ivy.expand_dims(g, axis=1) w_g = g * w_c + (1 - g) * prev_w # eq (7) s = ivy.concatenate([ s[:, :self._shift_range + 1], ivy.zeros( [s.shape[0], self._memory_size - (self._shift_range * 2 + 1)]), s[:, -self._shift_range:] ], axis=1) t = ivy.concatenate([ivy.flip(s, axis=[1]), ivy.flip(s, axis=[1])], axis=1) s_matrix = ivy.stack([ t[:, self._memory_size - i - 1:self._memory_size * 2 - i - 1] for i in range(self._memory_size) ], axis=1) w_ = ivy.reduce_sum(ivy.expand_dims(w_g, axis=1) * s_matrix, axis=2) # eq (8) w_sharpen = w_**ivy.expand_dims(gamma, axis=1) w = w_sharpen / ivy.reduce_sum(w_sharpen, axis=1, keepdims=True) # eq (9) return w
def create_trimesh_indices_for_image(batch_shape, image_dims, dev_str='cpu:0'): """ Create triangle mesh for image with given image dimensions :param batch_shape: Shape of batch. :type batch_shape: sequence of ints :param image_dims: Image dimensions. :type image_dims: sequence of ints :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. :type dev_str: str, optional :return: Triangle mesh indices for image *[batch_shape,h*w*some_other_stuff,3]* """ # shapes as lists batch_shape = list(batch_shape) image_dims = list(image_dims) # other shape specs num_batch_dims = len(batch_shape) tri_dim = 2 * (image_dims[0] - 1) * (image_dims[1] - 1) flat_shape = [1] * num_batch_dims + [tri_dim] + [3] tile_shape = batch_shape + [1] * 2 # 1 x W-1 t00_ = _ivy.reshape(_ivy.arange(image_dims[1] - 1, dtype_str='float32', dev_str=dev_str), (1, -1)) # H-1 x 1 k_ = _ivy.reshape(_ivy.arange(image_dims[0] - 1, dtype_str='float32', dev_str=dev_str), (-1, 1)) * image_dims[1] # H-1 x W-1 t00_ = _ivy.matmul(_ivy.ones((image_dims[0] - 1, 1), dev_str=dev_str), t00_) k_ = _ivy.matmul(k_, _ivy.ones((1, image_dims[1] - 1), dev_str=dev_str)) # (H-1xW-1) x 1 t00 = _ivy.expand_dims(t00_ + k_, -1) t01 = t00 + 1 t02 = t00 + image_dims[1] t10 = t00 + image_dims[1] + 1 t11 = t01 t12 = t02 # (H-1xW-1) x 3 t0 = _ivy.concatenate((t00, t01, t02), -1) t1 = _ivy.concatenate((t10, t11, t12), -1) # BS x 2x(H-1xW-1) x 3 return _ivy.tile(_ivy.reshape(_ivy.concatenate((t0, t1), 0), flat_shape), tile_shape)
def get_observation(self): """ Get observation from environment. :return: observation array """ return ivy.concatenate([self.x, self.x_vel], axis=-1)
def rot_mat_and_cam_center_to_ext_mat(rotation_mat, camera_center, batch_shape=None): """ Get extrinsic matrix :math:`\mathbf{E}\in\mathbb{R}^{3×4}` from rotation matrix :math:`\mathbf{R}\in\mathbb{R}^{3×3}` and camera centers :math:`\overset{\sim}{\mathbf{C}}\in\mathbb{R}^{3×1}`.\n `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=175>`_ page 157, section 6.1, equation 6.11 :param rotation_mat: Rotation matrix *[batch_shape,3,3]* :type rotation_mat: array :param camera_center: Camera center *[batch_shape,3,1]* :type camera_center: array :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :return: Extrinsic matrix *[batch_shape,3,4]* """ if batch_shape is None: batch_shape = rotation_mat.shape[:-2] # shapes as list batch_shape = list(batch_shape) # num batch dims num_batch_dims = len(batch_shape) # BS x 3 x 3 identity = _ivy.tile(_ivy.reshape(_ivy.identity(3), [1] * num_batch_dims + [3, 3]), batch_shape + [1, 1]) # BS x 3 x 4 identity_w_cam_center = _ivy.concatenate((identity, -camera_center), -1) # BS x 3 x 4 return _ivy.matmul(rotation_mat, identity_w_cam_center)
def hamilton_product(quaternion1, quaternion2): """ Compute hamilton product :math:`\mathbf{h}_p = \mathbf{q}_1 × \mathbf{q}_2` between :math:`\mathbf{q}_1 = [q_{1i}, q_{1j}, q_{1k}, q_{1r}]` and :math:`\mathbf{q}_2 = [q_{2i}, q_{2j}, q_{2k}, q_{2r}]`.\n `[reference] <https://en.wikipedia.org/wiki/Quaternion#Hamilton_product>`_ :param quaternion1: Quaternion 1 *[batch_shape,4]* :type quaternion1: array :param quaternion2: Quaternion 2 *[batch_shape,4]* :type quaternion2: array :return: New quaternion after product *[batch_shape,4]* """ # BS x 1 a1 = quaternion1[..., 3:4] a2 = quaternion2[..., 3:4] b1 = quaternion1[..., 0:1] b2 = quaternion2[..., 0:1] c1 = quaternion1[..., 1:2] c2 = quaternion2[..., 1:2] d1 = quaternion1[..., 2:3] d2 = quaternion2[..., 2:3] term_r = a1 * a2 - b1 * b2 - c1 * c2 - d1 * d2 term_i = a1 * b2 + b1 * a2 + c1 * d2 - d1 * c2 term_j = a1 * c2 - b1 * d2 + c1 * a2 + d1 * b2 term_k = a1 * d2 + b1 * c2 - c1 * b2 + d1 * a2 # BS x 4 return _ivy.concatenate((term_i, term_j, term_k, term_r), -1)
def get_random_quaternion(max_rot_ang=_math.pi, batch_shape=None): """ Generate random quaternion :math:`\mathbf{q} = [q_i, q_j, q_k, q_r]`, adhering to maximum absolute rotation angle.\n `[reference] <https://en.wikipedia.org/wiki/Quaternion>`_ :param max_rot_ang: Absolute value of maximum rotation angle for quaternion. Default value of :math:`π`. :type max_rot_ang: float, optional :param batch_shape: Shape of batch. Shape of [1] is assumed if None. :type batch_shape: sequence of ints, optional :return: Random quaternion *[batch_shape,4]* """ if batch_shape is None: batch_shape = [] # BS x 3 quaternion_vector = _ivy.random_uniform(0, 1, list(batch_shape) + [3]) vec_len = _ivy.norm(quaternion_vector) quaternion_vector /= (vec_len + MIN_DENOMINATOR) # BS x 1 theta = _ivy.random_uniform(-max_rot_ang, max_rot_ang, list(batch_shape) + [1]) # BS x 4 return axis_angle_to_quaternion( _ivy.concatenate([quaternion_vector, theta], -1))
def main(interactive=True, try_use_sim=True, f=None): f = choose_random_framework() if f is None else f set_framework(f) sim = Simulator(interactive, try_use_sim) vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo)) pix_per_deg = 2 om_pix = sim.get_pix_coords() plr_degs = om_pix / pix_per_deg plr_rads = plr_degs * math.pi / 180 iterations = 10 if sim.with_pyrep else 1 for _ in range(iterations): depth, rgb = sim.omcam.cap() plr = ivy.concatenate([plr_rads, depth], -1) xyz_wrt_cam = ivy_mech.polar_to_cartesian_coords(plr) xyz_wrt_cam = ivy.reshape(xyz_wrt_cam, (-1, 3)) xyz_wrt_cam_homo = ivy_mech.make_coordinates_homogeneous(xyz_wrt_cam) inv_ext_mat_trans = ivy.transpose(sim.omcam.get_inv_ext_mat(), (1, 0)) xyz_wrt_world = ivy.matmul(xyz_wrt_cam_homo, inv_ext_mat_trans)[..., 0:3] with ivy.numpy.use: omni_cam_inv_ext_mat = ivy_mech.make_transformation_homogeneous( ivy.to_numpy(sim.omcam.get_inv_ext_mat())) vis.show_point_cloud(xyz_wrt_world, rgb, interactive, sphere_inv_ext_mats=[omni_cam_inv_ext_mat], sphere_radii=[0.025]) if not interactive: sim.omcam.set_pos(sim.omcam.get_pos() + ivy.array([-0.01, 0.01, 0.])) sim.close() unset_framework()
def sphere_to_angular_pixel_coords(sphere_coords, pixels_per_degree): """ Convert camera-centric ego-sphere polar co-ordinates image :math:`\mathbf{S}_c\in\mathbb{R}^{h×w×3}` to angular pixel co-ordinates image :math:`\mathbf{A}_p\in\mathbb{R}^{h×w×3}`.\n `[reference] <https://en.wikipedia.org/wiki/Spherical_coordinate_system#Cartesian_coordinates>`_ :param sphere_coords: Camera-centric ego-sphere polar co-ordinates image *[batch_shape,h,w,3]* :type sphere_coords: array :param pixels_per_degree: Number of pixels per angular degree :type pixels_per_degree: float :return: Angular pixel co-ordinates image *[batch_shape,h,w,3]* """ # BS x H x W x 1 sphere_radius_vals = sphere_coords[..., -1:] # BS x H x W x 2 sphere_angle_coords = sphere_coords[..., 0:2] # BS x H x W x 2 sphere_angle_coords_in_degs = sphere_angle_coords * 180 / np.pi # BS x H x W x 1 sphere_x_coords = ((180 - sphere_angle_coords_in_degs[..., 0:1]) % 360) * pixels_per_degree sphere_y_coords = (sphere_angle_coords_in_degs[..., 1:2] % 180) * pixels_per_degree # BS x H x W x 3 return _ivy.concatenate((sphere_x_coords, sphere_y_coords, sphere_radius_vals), -1)
def projection_matrix_inverse(proj_mat): """ Given projection matrix :math:`\mathbf{P}\in\mathbb{R}^{3×4}`, compute it's inverse :math:`\mathbf{P}^{-1}\in\mathbb{R}^{3×4}`.\n `[reference] <https://github.com/pranjals16/cs676/blob/master/Hartley%2C%20Zisserman%20-%20Multiple%20View%20Geometry%20in%20Computer%20Vision.pdf#page=174>`_ middle of page 156, section 6.1, eq 6.6 :param proj_mat: Projection matrix *[batch_shape,3,4]* :type proj_mat: array :return: Projection matrix inverse *[batch_shape,3,4]* """ # BS x 3 x 3 rotation_matrix = proj_mat[..., 0:3] # BS x 3 x 3 rotation_matrix_inverses = _ivy.inv(rotation_matrix) # BS x 3 x 1 translations = proj_mat[..., 3:4] # BS x 3 x 1 translation_inverses = -_ivy.matmul(rotation_matrix_inverses, translations) # BS x 3 x 4 return _ivy.concatenate((rotation_matrix_inverses, translation_inverses), -1)
def increment_quaternion_pose_with_velocity(quat_pose, quat_vel, delta_t): """ Increment a quaternion pose :math:`\mathbf{p}_{q} = [\mathbf{x}_c, \mathbf{q}] = [x, y, z, q_i, q_j, q_k, q_r]` forward by one time-increment :math:`Δt`, given the velocity represented in quaternion form :math:`\mathbf{V}_q`. Where :math:`\mathbf{P}_q (t+1) = \mathbf{P}_q(t) × \mathbf{V}_q`, :math:`t` is in seconds, and :math:`×` denotes the hamilton product.\n `[reference] <https://en.wikipedia.org/wiki/Quaternion>`_ :param quat_pose: Quaternion pose. *[batch_shape,7]* :type quat_pose: array :param quat_vel: Quaternion velocity. *[batch_shape,7]* :type quat_vel: array :param delta_t: Time step in seconds, for incrementing the quaternion pose by the velocity quaternion. :type delta_t: float :return: Incremented quaternion pose *[batch_shape,7]* """ # BS x 4 current_quaternion = quat_pose[..., 3:7] quaternion_vel = quat_vel[..., 3:7] quaternion_transform = _ivy_quat.scale_quaternion_rotation_angle( quaternion_vel, delta_t) new_quaternion = _ivy_quat.hamilton_product(current_quaternion, quaternion_transform) # BS x 7 return _ivy.concatenate( (quat_pose[..., 0:3] + quat_vel[..., 0:3] * delta_t, new_quaternion), -1)
def euler_pose_to_mat_pose(euler_pose, convention='zyx', batch_shape=None): """ Convert :math: Euler angle pose :math:`\mathbf{p}_{abc} = [\mathbf{x}_c, \mathbf{θ}_{xyz}] = [x, y, z, ϕ_a, ϕ_b, ϕ_c]` to matrix pose :math:`\mathbf{P}\in\mathbb{R}^{3×4}`.\n `[reference] <https://en.wikipedia.org/wiki/Euler_angles#Rotation_matrix>`_ :param euler_pose: Euler angle pose *[batch_shape,6]* :type euler_pose: array :param convention: The axes for euler rotation, in order of L.H.S. matrix multiplication. :type convention: str, optional :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :return: Matrix pose *[batch_shape,3,4]* """ if batch_shape is None: batch_shape = euler_pose.shape[:-1] # BS x 3 x 3 rot_mat = _ivy_rot_mat.euler_to_rot_mat(euler_pose[..., 3:], convention, batch_shape) # BS x 3 x 4 return _ivy.concatenate( (rot_mat, _ivy.expand_dims(euler_pose[..., 0:3], -1)), -1)
def make_transformation_homogeneous(matrices, batch_shape=None, dev_str=None): """ Append to set of 3x4 non-homogeneous matrices to make them homogeneous. :param matrices: set of 3x4 non-homogeneous matrices *[batch_shape,3,4]* :type matrices: array :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None. :type dev_str: str, optional :return: 4x4 Homogeneous matrices *[batch_shape,4,4]* """ if batch_shape is None: batch_shape = matrices.shape[:-2] if dev_str is None: dev_str = _ivy.dev_str(matrices) # shapes as list batch_shape = list(batch_shape) num_batch_dims = len(batch_shape) # BS x 1 x 4 last_row = _ivy.tile( _ivy.reshape(_ivy.array([0., 0., 0., 1.], dev_str=dev_str), [1] * num_batch_dims + [1, 4]), batch_shape + [1, 1]) # BS x 4 x 4 return _ivy.concatenate((matrices, last_row), -2)
def batch_array(x, _): return [ ivy.concatenate([ ivy.expand_dims(item, 0) for item in x[i * batch_size:i * batch_size + batch_size] ], 0) for i in range(int(len(x) / batch_size)) ]
def get_fundamental_matrix(full_mat1, full_mat2, camera_center1=None, pinv_full_mat1=None, batch_shape=None, dev_str=None): """ Compute fundamental matrix :math:`\mathbf{F}\in\mathbb{R}^{3×3}` between two cameras, given their extrinsic matrices :math:`\mathbf{E}_1\in\mathbb{R}^{3×4}` and :math:`\mathbf{E}_2\in\mathbb{R}^{3×4}`.\n `[reference] <localhost:63342/ivy/docs/source/references/mvg_textbook.pdf#page=262>`_ bottom of page 244, section 9.2.2, equation 9.1 :param full_mat1: Frame 1 full projection matrix *[batch_shape,3,4]* :type full_mat1: array :param full_mat2: Frame 2 full projection matrix *[batch_shape,3,4]* :type full_mat2: array :param camera_center1: Frame 1 camera center, inferred from full_mat1 if None *[batch_shape,3,1]* :type camera_center1: array, optional :param pinv_full_mat1: Frame 1 full projection matrix pseudo-inverse, inferred from full_mat1 if None *[batch_shape,4,3]* :type pinv_full_mat1: array, optional :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None. :type dev_str: str, optional :return: Fundamental matrix connecting frames 1 and 2 *[batch_shape,3,3]* """ if batch_shape is None: batch_shape = full_mat1.shape[:-2] if dev_str is None: dev_str = _ivy.dev_str(full_mat1) # shapes as list batch_shape = list(batch_shape) if camera_center1 is None: inv_full_mat1 = _ivy.inv( _ivy_mech.make_transformation_homogeneous(full_mat1, batch_shape, dev_str))[..., 0:3, :] camera_center1 = _ivy_svg.inv_ext_mat_to_camera_center(inv_full_mat1) if pinv_full_mat1 is None: pinv_full_mat1 = _ivy.pinv(full_mat1) # BS x 4 x 1 camera_center1_homo = _ivy.concatenate( (camera_center1, _ivy.ones(batch_shape + [1, 1], dev_str=dev_str)), -2) # BS x 3 e2 = _ivy.matmul(full_mat2, camera_center1_homo)[..., -1] # BS x 3 x 3 e2_skew_symmetric = _ivy.linalg.vector_to_skew_symmetric_matrix(e2) # BS x 3 x 3 return _ivy.matmul(e2_skew_symmetric, _ivy.matmul(full_mat2, pinv_full_mat1))
def _get_dummy_obs(batch_size, num_frames, num_cams, image_dims, num_feature_channels, dev_str='cpu', ones=False, empty=False): uniform_pixel_coords =\ ivy_vision.create_uniform_pixel_coords_image(image_dims, [batch_size, num_frames], dev_str=dev_str) img_meas = dict() for i in range(num_cams): validity_mask = ivy.ones([batch_size, num_frames] + image_dims + [1], dev_str=dev_str) if ones: img_mean = ivy.concatenate((uniform_pixel_coords[..., 0:2], ivy.ones( [batch_size, num_frames] + image_dims + [1 + num_feature_channels], dev_str=dev_str)), -1) img_var = ivy.ones( [batch_size, num_frames] + image_dims + [3 + num_feature_channels], dev_str=dev_str)*1e-3 pose_mean = ivy.zeros([batch_size, num_frames, 6], dev_str=dev_str) pose_cov = ivy.ones([batch_size, num_frames, 6, 6], dev_str=dev_str)*1e-3 else: img_mean = ivy.concatenate((uniform_pixel_coords[..., 0:2], ivy.random_uniform( 1e-3, 1, [batch_size, num_frames] + image_dims + [1 + num_feature_channels], dev_str=dev_str)), -1) img_var = ivy.random_uniform( 1e-3, 1, [batch_size, num_frames] + image_dims + [3 + num_feature_channels], dev_str=dev_str) pose_mean = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6], dev_str=dev_str) pose_cov = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6, 6], dev_str=dev_str) if empty: img_var = ivy.ones_like(img_var) * 1e12 validity_mask = ivy.zeros_like(validity_mask) img_meas['dummy_cam_{}'.format(i)] =\ {'img_mean': img_mean, 'img_var': img_var, 'validity_mask': validity_mask, 'pose_mean': pose_mean, 'pose_cov': pose_cov, 'cam_rel_mat': ivy.identity(4, batch_shape=[batch_size, num_frames], dev_str=dev_str)[..., 0:3, :]} if ones: control_mean = ivy.zeros([batch_size, num_frames, 6], dev_str=dev_str) control_cov = ivy.ones([batch_size, num_frames, 6, 6], dev_str=dev_str)*1e-3 else: control_mean = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6], dev_str=dev_str) control_cov = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6, 6], dev_str=dev_str) return Container({'img_meas': img_meas, 'control_mean': control_mean, 'control_cov': control_cov, 'agent_rel_mat': ivy.identity(4, batch_shape=[batch_size, num_frames], dev_str=dev_str)[..., 0:3, :]})
def _fuse_measurements_with_uncertainty(measurements, measurement_uncertainties, axis): measurements_shape = measurements.shape batch_shape = measurements_shape[0:axis] num_batch_dims = len(batch_shape) num_measurements = measurements_shape[axis] # BS x 1 x RS sum_of_variances = ivy.reduce_sum(measurement_uncertainties, axis, keepdims=True) prod_of_variances = ivy.reduce_prod(measurement_uncertainties, axis, keepdims=True) # BS x 1 x RS new_variance = prod_of_variances / sum_of_variances # dim size list of BS x (dim-1) x RS batch_slices = [slice(None, None, None)] * num_batch_dims concat_lists = \ [[measurement_uncertainties[tuple(batch_slices + [slice(0, i, None)])]] if i != 0 else [] + [measurement_uncertainties[tuple(batch_slices + [slice(i + 1, None, None)])]] for i in range(num_measurements)] partial_variances_list = [ ivy.concatenate(concat_list, axis) for concat_list in concat_lists ] # dim size list of BS x 1 x RS partial_prod_of_variances_list = \ [ivy.reduce_prod(partial_variance, axis, True) for partial_variance in partial_variances_list] # BS x dim x RS partial_prod_of_variances = ivy.concatenate( partial_prod_of_variances_list, axis) # BS x 1 x RS new_mean = ivy.reduce_sum( (partial_prod_of_variances * measurements) / sum_of_variances, axis, keepdims=True) # BS x 1 x RS, BS x 1 x RS return new_mean, new_variance
def get_observation(self): """ Get observation from environment. :return: observation array """ return ivy.concatenate( (ivy.cos(self.angle), ivy.sin(self.angle), self.angle_vel), axis=-1)
def pixel_cost_volume(image1, image2, search_range, batch_shape=None): """ Compute cost volume from image feature patch comparisons between first image :math:`\mathbf{X}_1\in\mathbb{R}^{h×w×d}` and second image :math:`\mathbf{X}_2\in\mathbb{R}^{h×w×d}`, as used in FlowNet paper.\n `[reference] <https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Dosovitskiy_FlowNet_Learning_Optical_ICCV_2015_paper.pdf>`_ :param image1: Image 1 *[batch_shape,h,w,D]* :type image1: array :param image2: Image 2 *[batch_shape,h,w,D]* :type image2: array :param search_range: Search range for patch comparisons. :type search_range: int :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :return: Cost volume between the images *[batch_shape,h,w,(search_range*2+1)^2]* """ if batch_shape is None: batch_shape = image1.shape[:-3] # shapes as list batch_shape = list(batch_shape) # shape info shape = image1.shape h = shape[-3] w = shape[-2] max_offset = search_range * 2 + 1 # pad dims pad_dims = [[0, 0]] * len(batch_shape) + [[search_range, search_range] ] * 2 + [[0, 0]] # BS x (H+2*SR) x (W+2*SR) x D padded_lvl = _ivy.zero_pad(image2, pad_dims) # create list cost_vol = [] # iterate through patches for y in range(0, max_offset): for x in range(0, max_offset): # BS x H x W x D tensor_slice = padded_lvl[..., y:y + h, x:x + w, :] # BS x H x W x 1 cost = _ivy.reduce_mean(image1 * tensor_slice, axis=-1, keepdims=True) # append to list cost_vol.append(cost) # BS x H x W x (max_offset^2) return _ivy.concatenate(cost_vol, -1)