def test_reduce_min(x, axis, kd, dtype_str, tensor_fn, dev_str, call): # smoke test x = tensor_fn(x, dtype_str, dev_str) ret = ivy.reduce_min(x, axis, kd) # type test assert ivy.is_array(ret) # cardinality test if axis is None: expected_shape = [1] * len(x.shape) if kd else [] else: axis_ = [axis] if isinstance(axis, int) else axis axis_ = [item % len(x.shape) for item in axis_] expected_shape = list(x.shape) if kd: expected_shape = [ 1 if i % len(x.shape) in axis_ else item for i, item in enumerate(expected_shape) ] else: [expected_shape.pop(item) for item in axis_] expected_shape = [1] if expected_shape == [] else expected_shape assert ret.shape == tuple(expected_shape) # value test assert np.allclose(call(ivy.reduce_min, x), ivy.numpy.reduce_min(ivy.to_numpy(x))) # compilation test helpers.assert_compilable(ivy.reduce_min)
def sphere_signed_distances(sphere_positions, sphere_radii, query_positions): """ Return the signed distances of a set of query points from the sphere surfaces.\n `[reference] <https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm>`_ :param sphere_positions: Positions of the spheres *[batch_shape,num_spheres,3]* :type sphere_positions: array :param sphere_radii: Radii of the spheres *[batch_shape,num_spheres,1]* :type sphere_radii: array :param query_positions: Points for which to query the signed distances *[batch_shape,num_points,3]* :type query_positions: array :return: The distances of the query points from the closest sphere surface *[batch_shape,num_points,1]* """ # BS x NS x 1 x 3 sphere_positions = _ivy.expand_dims(sphere_positions, -2) # BS x 1 x NP x 3 query_positions = _ivy.expand_dims(query_positions, -3) # BS x NS x NP x 1 distances_to_centre = _ivy.reduce_sum( (query_positions - sphere_positions)**2, -1, keepdims=True)**0.5 # BS x NS x NP x 1 all_sdfs = distances_to_centre - _ivy.expand_dims(sphere_radii, -2) # BS x NP x 1 return _ivy.reduce_min(all_sdfs, -3)
def main(interactive=True, try_use_sim=True, f=None): # config this_dir = os.path.dirname(os.path.realpath(__file__)) f = choose_random_framework(excluded=['numpy']) if f is None else f set_framework(f) sim = Simulator(interactive, try_use_sim) lr = 0.5 num_anchors = 3 num_sample_points = 100 # spline start anchor_points = ivy.cast( ivy.expand_dims(ivy.linspace(0, 1, 2 + num_anchors), -1), 'float32') query_points = ivy.cast( ivy.expand_dims(ivy.linspace(0, 1, num_sample_points), -1), 'float32') # learnable parameters robot_start_config = ivy.array(ivy.cast(sim.robot_start_config, 'float32')) robot_target_config = ivy.array( ivy.cast(sim.robot_target_config, 'float32')) learnable_anchor_vals = ivy.variable( ivy.cast( ivy.transpose( ivy.linspace(robot_start_config, robot_target_config, 2 + num_anchors)[..., 1:-1], (1, 0)), 'float32')) # optimizer optimizer = ivy.SGD(lr=lr) # optimize it = 0 colliding = True clearance = 0 joint_query_vals = None while colliding: total_cost, grads, joint_query_vals, link_positions, sdf_vals = ivy.execute_with_gradients( lambda xs: compute_cost_and_sdfs(xs[ 'w'], anchor_points, robot_start_config, robot_target_config, query_points, sim), Container({'w': learnable_anchor_vals})) colliding = ivy.reduce_min(sdf_vals[2:]) < clearance sim.update_path_visualization( link_positions, sdf_vals, os.path.join(this_dir, 'msp_no_sim', 'path_{}.png'.format(it))) learnable_anchor_vals = optimizer.step( Container({'w': learnable_anchor_vals}), grads)['w'] it += 1 sim.execute_motion(joint_query_vals) sim.close() unset_framework()
def _log_nested(self, nest, global_step, name_hierarchy, spec): if not ivy.exists(self._writer): raise Exception('torch must be installed in order to use the file writer for tensorboard logging.') if 'global_vector_norm' in spec: self._writer.add_scalar(name_hierarchy + '/global vector norm', ivy.to_scalar(ivy.to_native(nest.vector_norm(global_norm=True))), global_step) for k, v in nest.items(): new_name_hierarchy = name_hierarchy + '/' + k if isinstance(v, dict): self._log_nested(v, global_step, new_name_hierarchy, spec) else: if 'mean' in spec: self._writer.add_scalar(new_name_hierarchy + '/mean', ivy.to_scalar(ivy.to_native(ivy.reduce_mean(v))), global_step) if 'abs_mean' in spec: self._writer.add_scalar(new_name_hierarchy + '/abs mean', ivy.to_scalar(ivy.to_native(ivy.reduce_mean(ivy.abs(v)))), global_step) if 'var' in spec: self._writer.add_scalar(new_name_hierarchy + '/var', ivy.to_scalar(ivy.to_native(ivy.reduce_var(v))), global_step) if 'abs_var' in spec: self._writer.add_scalar(new_name_hierarchy + '/abs var', ivy.to_scalar(ivy.to_native(ivy.reduce_var(ivy.abs(v)))), global_step) if 'min' in spec: self._writer.add_scalar(new_name_hierarchy + '/min', ivy.to_scalar(ivy.to_native(ivy.reduce_min(v))), global_step) if 'abs_min' in spec: self._writer.add_scalar(new_name_hierarchy + '/abs min', ivy.to_scalar(ivy.to_native(ivy.reduce_min(ivy.abs(v)))), global_step) if 'max' in spec: self._writer.add_scalar(new_name_hierarchy + '/max', ivy.to_scalar(ivy.to_native(ivy.reduce_max(v))), global_step) if 'abs_max' in spec: self._writer.add_scalar(new_name_hierarchy + '/abs max', ivy.to_scalar(ivy.to_native(ivy.reduce_max(ivy.abs(v)))), global_step) if 'vector_norm' in spec: self._writer.add_scalar(new_name_hierarchy + '/vector norm', ivy.to_scalar(ivy.to_native(ivy.vector_norm(v))), global_step)
def erosion(tensor: ivy.Array, kernel: ivy.Array) -> ivy.Array: r"""Returns the eroded image applying the same kernel in each channel. The kernel must have 2 dimensions, each one defined by an odd number. Args: tensor (ivy.Array): Image with shape :math:`(B, C, H, W)`. kernel (ivy.Array): Structuring element with shape :math:`(H, W)`. Returns: ivy.Array: Eroded image with shape :math:`(B, C, H, W)`. Example: >>> tensor = ivy.random_uniform(shape=(1, 3, 5, 5)) >>> kernel = ivy.ones((5, 5)) >>> output = erosion(tensor, kernel) """ if ivy.backend == 'torch' and not isinstance(tensor, ivy.Array): raise TypeError("Input type is not an ivy.Array. Got {}".format(type(tensor))) if len(tensor.shape) != 4: raise ValueError("Input size must have 4 dimensions. Got {}".format( ivy.get_num_dims(tensor))) if ivy.backend == 'torch' and not isinstance(kernel, ivy.Array): raise TypeError("Kernel type is not a ivy.Array. Got {}".format(type(kernel))) if len(kernel.shape) != 2: raise ValueError("Kernel size must have 2 dimensions. Got {}".format( ivy.get_num_dims(kernel))) # prepare kernel se_e: ivy.Array = kernel - 1. kernel_e: ivy.Array = ivy.transpose(_se_to_mask(se_e), (2, 3, 1, 0)) # pad se_h, se_w = kernel.shape pad_e: List[List[int]] = [[0]*2, [0]*2, [se_h // 2, se_w // 2], [se_h // 2, se_w // 2]] output: ivy.Array = ivy.reshape(tensor, (tensor.shape[0] * tensor.shape[1], 1, tensor.shape[2], tensor.shape[3])) output = ivy.constant_pad(output, pad_e, value=1.) output = ivy.reduce_min(ivy.conv2d(output, kernel_e, 1, 'VALID', data_format='NCHW') - ivy.reshape(se_e, (1, -1, 1, 1)), [1]) return ivy.reshape(output, tensor.shape)
def sdf(self, query_positions): """ Return signed distance function for the scene :param query_positions: Point for which to query the signed distance *[batch_shape,num_points,3]* :type query_positions: array :return: The signed distance values for each of the query points in the scene *[batch_shape,num_points,1]* """ # BS x NP x 1 all_sdfs_list = list() if self.sphere_positions is not None: sphere_sdfs = ivy_sdf.sphere_signed_distances( self.sphere_positions[..., 0:3, -1], self.sphere_radii, query_positions) all_sdfs_list.append(sphere_sdfs) if self.cuboid_ext_mats is not None: cuboid_sdfs = ivy_sdf.cuboid_signed_distances( self.cuboid_ext_mats, self.cuboid_dims, query_positions) all_sdfs_list.append(cuboid_sdfs) sdfs_concatted = _ivy.concatenate( all_sdfs_list, -1) if len(all_sdfs_list) > 1 else all_sdfs_list[0] return _ivy.reduce_min(sdfs_concatted, -1, keepdims=True)
def cuboid_signed_distances(cuboid_ext_mats, cuboid_dims, query_positions, batch_shape=None): """ Return the signed distances of a set of query points from the cuboid surfaces.\n `[reference] <https://www.iquilezles.org/www/articles/distfunctions/distfunctions.htm>`_ :param cuboid_ext_mats: Extrinsic matrices of the cuboids *[batch_shape,num_cuboids,3,4]* :type cuboid_ext_mats: array :param cuboid_dims: Dimensions of the cuboids, in the order x, y, z *[batch_shape,num_cuboids,3]* :type cuboid_dims: array :param query_positions: Points for which to query the signed distances *[batch_shape,num_points,3]* :type query_positions: array :param batch_shape: Shape of batch. Assumed no batches if None. :type batch_shape: sequence of ints, optional :return: The distances of the query points from the closest cuboid surface *[batch_shape,num_points,1]* """ if batch_shape is None: batch_shape = cuboid_ext_mats.shape[:-3] # shapes as list batch_shape = list(batch_shape) num_batch_dims = len(batch_shape) batch_dims_for_trans = list(range(num_batch_dims)) num_cuboids = cuboid_ext_mats.shape[-3] num_points = query_positions.shape[-2] # BS x 3 x NP query_positions_trans = _ivy.transpose( query_positions, batch_dims_for_trans + [num_batch_dims + 1, num_batch_dims]) # BS x 1 x NP ones = _ivy.ones_like(query_positions_trans[..., 0:1, :]) # BS x 4 x NP query_positions_trans_homo = _ivy.concatenate( (query_positions_trans, ones), -2) # BS x NCx3 x 4 cuboid_ext_mats_flat = _ivy.reshape(cuboid_ext_mats, batch_shape + [-1, 4]) # BS x NCx3 x NP rel_query_positions_trans_flat = _ivy.matmul(cuboid_ext_mats_flat, query_positions_trans_homo) # BS x NC x 3 x NP rel_query_positions_trans = _ivy.reshape( rel_query_positions_trans_flat, batch_shape + [num_cuboids, 3, num_points]) # BS x NC x NP x 3 rel_query_positions = _ivy.transpose( rel_query_positions_trans, batch_dims_for_trans + [num_batch_dims, num_batch_dims + 2, num_batch_dims + 1]) q = _ivy.abs(rel_query_positions) - _ivy.expand_dims(cuboid_dims / 2, -2) q_max_clipped = _ivy.maximum(q, 1e-12) # BS x NC x NP x 1 q_min_clipped = _ivy.minimum(_ivy.reduce_max(q, -1, keepdims=True), 0.) q_max_clipped_len = _ivy.reduce_sum(q_max_clipped**2, -1, keepdims=True)**0.5 sdfs = q_max_clipped_len + q_min_clipped # BS x NP x 1 return _ivy.reduce_min(sdfs, -3)
def _kalman_filter_on_measurement_sequence( self, prev_fused_val, prev_fused_variance, hole_prior, hole_prior_var, meas, meas_vars, uniform_sphere_pixel_coords, agent_rel_poses, agent_rel_pose_covs, agent_rel_mats, batch_size, num_timesteps): """ Perform kalman filter on measurement sequence :param prev_fused_val: Fused value from previous timestamp *[batch_size, oh, ow, (3+f)]* :param prev_fused_variance: Fused variance from previous timestamp *[batch_size, oh, ow, (3+f)]* :param hole_prior: Prior for holes in quantization *[batch_size, oh, ow, (1+f)]* :param hole_prior_var: Prior variance for holes in quantization *[batch_size, oh, ow, (3+f)]* :param meas: Measurements *[batch_size, num_timesteps, oh, ow, (3+f)]* :param meas_vars: Measurement variances *[batch_size, num_timesteps, oh, ow, (3+f)]* :param uniform_sphere_pixel_coords: Uniform sphere pixel co-ordinates *[batch_size, oh, ow, 3]* :param agent_rel_poses: Relative poses of agents to the previous step *[batch_size, num_timesteps, 6]* :param agent_rel_pose_covs: Agent relative pose covariances *[batch_size, num_timesteps, 6, 6]* :param agent_rel_mats: Relative transformations matrix of agents to the previous step *[batch_size, num_timesteps, 3, 4]* :param batch_size: Size of batch :param num_timesteps: Number of frames :return: list of *[batch_size, oh, ow, (3+f)]*, list of *[batch_size, oh, ow, (3+f)]* """ fused_list = list() fused_variances_list = list() for i in range(num_timesteps): # project prior from previous frame # # ----------------------------------# # B x OH x OW x (3+F) prev_prior = prev_fused_val prev_prior_variance = prev_fused_variance # B x 3 x 4 agent_rel_mat = agent_rel_mats[:, i] # B x 6 agent_rel_pose = agent_rel_poses[:, i] # B x 6 x 6 agent_rel_pose_cov = agent_rel_pose_covs[:, i] # B x OH x OW x (3+F) B x OH x OW x (3+F) fused_projected, fused_projected_variance = self._omni_frame_to_omni_frame_projection( agent_rel_pose, agent_rel_mat, uniform_sphere_pixel_coords, prev_prior[..., 0:2], prev_prior[..., 2:3], prev_prior[..., 3:], agent_rel_pose_cov, prev_prior_variance, hole_prior, hole_prior_var, batch_size) # reset prior # B x OH x OW x (3+F) prior = fused_projected prior_var = fused_projected_variance # per-pixel fusion with measurements # # -----------------------------------# # extract slice for frame # B x OH x OW x (3+F) measurement = meas[:, i] measurement_variance = meas_vars[:, i] # fuse prior and measurement # B x 2 x OH x OW x (3+F) prior_and_meas = ivy.concatenate( (ivy.expand_dims(prior, 1), ivy.expand_dims(measurement, 1)), 1) prior_and_meas_variance = ivy.concatenate((ivy.expand_dims( prior_var, 1), ivy.expand_dims(measurement_variance, 1)), 1) # B x OH x OW x (3+F) low_var_mask = ivy.reduce_sum( ivy.cast( prior_and_meas_variance < ivy.expand_dims(hole_prior_var, 1) * self._threshold_var_factor, 'int32'), 1) > 0 # B x 1 x OH x OW x (3+F) B x 1 x OH x OW x (3+F) if self._with_depth_buffer: # ToDo: make this more efficient prior_low_var_mask = ivy.reduce_max(ivy.cast( prior_var >= hole_prior_var * self._threshold_var_factor, 'int32'), -1, keepdims=True) == 0 meas_low_var_mask = ivy.reduce_max(ivy.cast( measurement_variance >= hole_prior_var * self._threshold_var_factor, 'int32'), -1, keepdims=True) == 0 neiter_low_var_mask = ivy.logical_and( ivy.logical_not(prior_low_var_mask), ivy.logical_not(meas_low_var_mask)) prior_w_large = ivy.where(prior_low_var_mask, prior, ivy.ones_like(prior) * 1e12) meas_w_large = ivy.where(meas_low_var_mask, measurement, ivy.ones_like(measurement) * 1e12) prior_and_meas_w_large = ivy.concatenate((ivy.expand_dims( prior_w_large, 1), ivy.expand_dims(meas_w_large, 1)), 1) fused_val_unsmoothed = ivy.reduce_min(prior_and_meas_w_large, 1, keepdims=True) fused_val_unsmoothed = ivy.where(neiter_low_var_mask, prior, fused_val_unsmoothed) # ToDo: solve this variance correspondence properly, rather than assuming the most certain fused_variance_unsmoothed = ivy.reduce_min( prior_and_meas_variance, 1, keepdims=True) else: fused_val_unsmoothed, fused_variance_unsmoothed = \ self._fuse_measurements_with_uncertainty(prior_and_meas, prior_and_meas_variance, 1) # B x OH x OW x (3+F) # This prevents accumulating certainty from duplicate re-projections from prior measurements fused_variance_unsmoothed = ivy.where( low_var_mask, fused_variance_unsmoothed[:, 0], hole_prior_var) # B x OH x OW x (3+F) fused_val = fused_val_unsmoothed[:, 0] fused_variance = fused_variance_unsmoothed low_var_mask = fused_variance < hole_prior_var # B x OH x OW x (3+F) B x OH x OW x (3+F) fused_val, fused_variance = self.smooth(fused_val, fused_variance, low_var_mask, self._smooth_mean, self._smooth_kernel_size, True, True, batch_size) # append to list for returning # B x OH x OW x (3+F) fused_list.append(fused_val) # B x OH x OW x (3+F) fused_variances_list.append(fused_variance) # update for next time step prev_fused_val = fused_val prev_fused_variance = fused_variance # list of *[batch_size, oh, ow, (3+f)]*, list of *[batch_size, oh, ow, (3+f)]* return fused_list, fused_variances_list
def quantize_to_image(pixel_coords, final_image_dims, feat=None, feat_prior=None, with_db=False, pixel_coords_var=1e-3, feat_var=1e-3, pixel_coords_prior_var=1e12, feat_prior_var=1e12, var_threshold=(1e-3, 1e12), uniform_pixel_coords=None, batch_shape=None, dev_str=None): """ Quantize pixel co-ordinates with d feature channels (for depth, rgb, normals etc.), from images :math:`\mathbf{X}\in\mathbb{R}^{input\_images\_shape×(2+d)}`, which may have been reprojected from a host of different cameras (leading to non-integer pixel values), to a new quantized pixel co-ordinate image with the same feature channels :math:`\mathbf{X}\in\mathbb{R}^{h×w×(2+d)}`, and with integer pixel co-ordinates. Duplicates during the quantization are either probabilistically fused based on variance, or the minimum depth is chosen when using depth buffer mode. :param pixel_coords: Pixel co-ordinates *[batch_shape,input_size,2]* :type pixel_coords: array :param final_image_dims: Image dimensions of the final image. :type final_image_dims: sequence of ints :param feat: Features (i.e. depth, rgb, encoded), default is None. *[batch_shape,input_size,d]* :type feat: array, optional :param feat_prior: Prior feature image mean, default is None. *[batch_shape,input_size,d]* :type feat_prior: array or float to fill with :param with_db: Whether or not to use depth buffer in rendering, default is false :type with_db: bool, optional :param pixel_coords_var: Pixel coords variance *[batch_shape,input_size,2]* :type pixel_coords_var: array or float to fill with :param feat_var: Feature variance *[batch_shape,input_size,d]* :type feat_var: array or float to fill with :param pixel_coords_prior_var: Pixel coords prior variance *[batch_shape,h,w,2]* :type pixel_coords_prior_var: array or float to fill with :param feat_prior_var: Features prior variance *[batch_shape,h,w,3]* :type feat_prior_var: array or float to fill with :param var_threshold: Variance threshold, for projecting valid coords and clipping *[batch_shape,2+d,2]* :type var_threshold: array or sequence of floats to fill with :param uniform_pixel_coords: Homogeneous uniform (integer) pixel co-ordinate images, inferred from final_image_dims if None *[batch_shape,h,w,3]* :type uniform_pixel_coords: array, optional :param batch_shape: Shape of batch. Assumed no batches if None. :type batch_shape: sequence of ints, optional :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None. :type dev_str: str, optional :return: Quantized pixel co-ordinates image with d feature channels (for depth, rgb, normals etc.) *[batch_shape,h,w,2+d]*, maybe the quantized variance, *[batch_shape,h,w,2+d]*, and scatter counter image *[batch_shape,h,w,1]* """ # ToDo: make variance fully optional. If not specified, # then do not compute and scatter during function call for better efficiency. # config if batch_shape is None: batch_shape = pixel_coords.shape[:-2] if dev_str is None: dev_str = _ivy.dev_str(pixel_coords) if feat is None: d = 0 else: d = feat.shape[-1] min_depth_diff = _ivy.array([MIN_DEPTH_DIFF], dev_str=dev_str) red = 'min' if with_db else 'sum' # shapes as list batch_shape = list(batch_shape) final_image_dims = list(final_image_dims) num_batch_dims = len(batch_shape) # variance threshold if isinstance(var_threshold, tuple) or isinstance(var_threshold, list): ones = _ivy.ones(batch_shape + [1, 2 + d, 1]) var_threshold = _ivy.concatenate( (ones * var_threshold[0], ones * var_threshold[1]), -1) else: var_threshold = _ivy.reshape(var_threshold, batch_shape + [1, 2 + d, 2]) # uniform pixel coords if uniform_pixel_coords is None: uniform_pixel_coords =\ _ivy_svg.create_uniform_pixel_coords_image(final_image_dims, batch_shape, dev_str=dev_str) uniform_pixel_coords = uniform_pixel_coords[..., 0:2] # Extract Values # feat_prior = _ivy.ones_like(feat) * feat_prior if isinstance( feat_prior, float) else feat_prior pixel_coords_var = _ivy.ones_like(pixel_coords) * pixel_coords_var\ if isinstance(pixel_coords_var, float) else pixel_coords_var feat_var = _ivy.ones_like(feat) * feat_var if isinstance( feat_var, float) else feat_var pixel_coords_prior_var = _ivy.ones(batch_shape + final_image_dims + [2]) * pixel_coords_prior_var\ if isinstance(pixel_coords_prior_var, float) else pixel_coords_prior_var feat_prior_var = _ivy.ones(batch_shape + final_image_dims + [d]) * feat_prior_var\ if isinstance(feat_prior_var, float) else feat_prior_var # Quantize # # BS x N x 2 quantized_pixel_coords = _ivy.reshape( _ivy.cast(_ivy.round(pixel_coords), 'int32'), batch_shape + [-1, 2]) # Combine # # BS x N x (2+D) pc_n_feat = _ivy.reshape(_ivy.concatenate((pixel_coords, feat), -1), batch_shape + [-1, 2 + d]) pc_n_feat_var = _ivy.reshape( _ivy.concatenate((pixel_coords_var, feat_var), -1), batch_shape + [-1, 2 + d]) # BS x H x W x (2+D) prior = _ivy.concatenate((uniform_pixel_coords, feat_prior), -1) prior_var = _ivy.concatenate((pixel_coords_prior_var, feat_prior_var), -1) # Validity Mask # # BS x N x 1 var_validity_mask = \ _ivy.reduce_sum(_ivy.cast(pc_n_feat_var < var_threshold[..., 1], 'int32'), -1, keepdims=True) == 2+d bounds_validity_mask = _ivy.logical_and( _ivy.logical_and(quantized_pixel_coords[..., 0:1] >= 0, quantized_pixel_coords[..., 1:2] >= 0), _ivy.logical_and( quantized_pixel_coords[..., 0:1] <= final_image_dims[1] - 1, quantized_pixel_coords[..., 1:2] <= final_image_dims[0] - 1)) validity_mask = _ivy.logical_and(var_validity_mask, bounds_validity_mask) # num_valid_indices x len(BS)+2 validity_indices = _ivy.reshape( _ivy.cast(_ivy.indices_where(validity_mask), 'int32'), [-1, num_batch_dims + 2]) num_valid_indices = validity_indices.shape[-2] if num_valid_indices == 0: return _ivy.concatenate((uniform_pixel_coords[..., 0:2], feat_prior), -1), \ _ivy.concatenate((pixel_coords_prior_var, feat_prior_var), -1),\ _ivy.zeros_like(feat[..., 0:1], dev_str=dev_str) # Depth Based Scaling # mean_depth_min = None mean_depth_range = None pc_n_feat_wo_depth_range = None pc_n_feat_wo_depth_min = None var_vals_range = None var_vals_min = None if with_db: # BS x N x 1 mean_depth = pc_n_feat[..., 2:3] # BS x 1 x 1 mean_depth_min = _ivy.reduce_min(mean_depth, -2, keepdims=True) mean_depth_max = _ivy.reduce_max(mean_depth, -2, keepdims=True) mean_depth_range = mean_depth_max - mean_depth_min # BS x N x 1 scaled_depth = (mean_depth - mean_depth_min) / ( mean_depth_range * min_depth_diff + MIN_DENOMINATOR) if d == 1: # BS x 1 x 1+D pc_n_feat_wo_depth_min = _ivy.zeros(batch_shape + [1, 0], dev_str=dev_str) pc_n_feat_wo_depth_range = _ivy.ones(batch_shape + [1, 0], dev_str=dev_str) else: # feat without depth # BS x N x 1+D pc_n_feat_wo_depth = _ivy.concatenate( (pc_n_feat[..., 0:2], pc_n_feat[..., 3:]), -1) # find the min and max of each value # BS x 1 x 1+D pc_n_feat_wo_depth_max = _ivy.reduce_max( pc_n_feat_wo_depth, -2, keepdims=True) + 1 pc_n_feat_wo_depth_min = _ivy.reduce_min( pc_n_feat_wo_depth, -2, keepdims=True) - 1 pc_n_feat_wo_depth_range = pc_n_feat_wo_depth_max - pc_n_feat_wo_depth_min # BS x N x 1+D normed_pc_n_feat_wo_depth = (pc_n_feat_wo_depth - pc_n_feat_wo_depth_min) / \ (pc_n_feat_wo_depth_range + MIN_DENOMINATOR) # combine with scaled depth # BS x N x 1+D pc_n_feat_wo_depth_scaled = normed_pc_n_feat_wo_depth + scaled_depth # BS x N x (2+D) pc_n_feat = _ivy.concatenate( (pc_n_feat_wo_depth_scaled[..., 0:2], mean_depth, pc_n_feat_wo_depth_scaled[..., 2:]), -1) # scale variance # BS x 1 x (2+D) var_vals_max = _ivy.reduce_max(pc_n_feat_var, -2, keepdims=True) + 1 var_vals_min = _ivy.reduce_min(pc_n_feat_var, -2, keepdims=True) - 1 var_vals_range = var_vals_max - var_vals_min # BS x N x (2+D) normed_var_vals = (pc_n_feat_var - var_vals_min) / (var_vals_range + MIN_DENOMINATOR) pc_n_feat_var = normed_var_vals + scaled_depth # ready for later reversal with full image dimensions # BS x 1 x 1 x D var_vals_min = _ivy.expand_dims(var_vals_min, -2) var_vals_range = _ivy.expand_dims(var_vals_range, -2) # Validity Pruning # # num_valid_indices x (2+D) pc_n_feat = _ivy.gather_nd(pc_n_feat, validity_indices[..., 0:num_batch_dims + 1]) pc_n_feat_var = _ivy.gather_nd(pc_n_feat_var, validity_indices[..., 0:num_batch_dims + 1]) # num_valid_indices x 2 quantized_pixel_coords = _ivy.gather_nd( quantized_pixel_coords, validity_indices[..., 0:num_batch_dims + 1]) if with_db: means_to_scatter = pc_n_feat vars_to_scatter = pc_n_feat_var else: # num_valid_indices x (2+D) vars_to_scatter = 1 / (pc_n_feat_var + MIN_DENOMINATOR) means_to_scatter = pc_n_feat * vars_to_scatter # Scatter # # num_valid_indices x 1 counter = _ivy.ones_like(pc_n_feat[..., 0:1], dev_str=dev_str) if with_db: counter *= -1 # num_valid_indices x 2(2+D)+1 values_to_scatter = _ivy.concatenate( (means_to_scatter, vars_to_scatter, counter), -1) # num_valid_indices x (num_batch_dims + 2) all_indices = _ivy.flip(quantized_pixel_coords, -1) if num_batch_dims > 0: all_indices = _ivy.concatenate( (validity_indices[..., :-2], all_indices), -1) # BS x H x W x (2(2+D) + 1) quantized_img = _ivy.scatter_nd( _ivy.reshape(all_indices, [-1, num_batch_dims + 2]), _ivy.reshape(values_to_scatter, [-1, 2 * (2 + d) + 1]), batch_shape + final_image_dims + [2 * (2 + d) + 1], reduction='replace' if _ivy.backend == 'mxnd' else red) # BS x H x W x 1 quantized_counter = quantized_img[..., -1:] if with_db: invalidity_mask = quantized_counter != -1 else: invalidity_mask = quantized_counter == 0 if with_db: # BS x H x W x (2+D) quantized_mean_scaled = quantized_img[..., 0:2 + d] quantized_var_scaled = quantized_img[..., 2 + d:2 * (2 + d)] # BS x H x W x 1 quantized_depth_mean = quantized_mean_scaled[..., 2:3] # BS x 1 x 1 x 1 mean_depth_min = _ivy.expand_dims(mean_depth_min, -2) mean_depth_range = _ivy.expand_dims(mean_depth_range, -2) # BS x 1 x 1 x (1+D) pc_n_feat_wo_depth_min = _ivy.expand_dims(pc_n_feat_wo_depth_min, -2) pc_n_feat_wo_depth_range = _ivy.expand_dims(pc_n_feat_wo_depth_range, -2) # BS x 1 x 1 x (2+D) x 2 var_threshold = _ivy.expand_dims(var_threshold, -3) # BS x H x W x (1+D) quantized_mean_wo_depth_scaled = _ivy.concatenate( (quantized_mean_scaled[..., 0:2], quantized_mean_scaled[..., 3:]), -1) quantized_mean_wo_depth_normed = quantized_mean_wo_depth_scaled - (quantized_depth_mean - mean_depth_min) / \ (mean_depth_range * min_depth_diff + MIN_DENOMINATOR) quantized_mean_wo_depth = quantized_mean_wo_depth_normed * pc_n_feat_wo_depth_range + pc_n_feat_wo_depth_min prior_wo_depth = _ivy.concatenate((prior[..., 0:2], prior[..., 3:]), -1) quantized_mean_wo_depth = _ivy.where(invalidity_mask, prior_wo_depth, quantized_mean_wo_depth) # BS x H x W x (2+D) quantized_mean = _ivy.concatenate( (quantized_mean_wo_depth[..., 0:2], quantized_depth_mean, quantized_mean_wo_depth[..., 2:]), -1) # BS x H x W x (2+D) quantized_var_normed = quantized_var_scaled - (quantized_depth_mean - mean_depth_min) / \ (mean_depth_range * min_depth_diff + MIN_DENOMINATOR) quantized_var = _ivy.maximum( quantized_var_normed * var_vals_range + var_vals_min, var_threshold[..., 0]) quantized_var = _ivy.where(invalidity_mask, prior_var, quantized_var) else: # BS x H x W x (2+D) quantized_sum_mean_x_recip_var = quantized_img[..., 0:2 + d] quantized_var_wo_increase = _ivy.where( invalidity_mask, prior_var, (1 / (quantized_img[..., 2 + d:2 * (2 + d)] + MIN_DENOMINATOR))) quantized_var = _ivy.maximum( quantized_var_wo_increase * quantized_counter, _ivy.expand_dims(var_threshold[..., 0], -2)) quantized_var = _ivy.where(invalidity_mask, prior_var, quantized_var) quantized_mean = _ivy.where( invalidity_mask, prior, quantized_var_wo_increase * quantized_sum_mean_x_recip_var) # BS x H x W x (2+D) BS x H x W x (2+D) BS x H x W x 1 return quantized_mean, quantized_var, quantized_counter
def coords_to_voxel_grid(coords, voxel_shape_spec, mode='DIMS', coord_bounds=None, features=None, batch_shape=None, dev_str=None): """ Create voxel grid :math:`\mathbf{X}_v\in\mathbb{R}^{x×y×z×(3+N+1)}` from homogeneous co-ordinates :math:`\mathbf{X}_w\in\mathbb{R}^{num\_coords×4}`. Each voxel contains 3+N+1 values: the mean normalized co-ordinate inside the voxel for the projected pixels with :math:`0 < x, y, z < 1`, N coordinte features (optional), and also the number of projected pixels inside the voxel. Grid resolutions and dimensions are also returned separately for each entry in the batch. Note that the final batched voxel grid returned uses the maximum grid dimensions across the batch, this means some returned grids may contain redundant space, with all but the single largest batched grid occupying a subset of the grid space, originating from the corner of minimum :math:`x,y,z` values.\n `[reference] <https://en.wikipedia.org/wiki/Voxel>`_ :param coords: Homogeneous co-ordinates *[batch_shape,c,4]* :type coords: array :param voxel_shape_spec: Either the number of voxels in x,y,z directions, or the resolutions (metres) in x,y,z directions, depending on mode. Batched or unbatched. *[batch_shape,3]* or *[3]* :type voxel_shape_spec: array :param mode: Shape specification mode, either "DIMS" or "RES" :type mode: str :param coord_bounds: Co-ordinate x, y, z boundaries *[batch_shape,6]* or *[6]* :type coord_bounds: array :param features: Co-ordinate features *[batch_shape,c,4]*. E.g. RGB values, low-dimensional features, etc. Features mapping to the same voxel are averaged. :type features: array :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None. :type dev_str: str, optional :return: Voxel grid *[batch_shape,x_max,v_max,z_max,3+feature_size+1]*, dimensions *[batch_shape,3]*, resolutions *[batch_shape,3]*, voxel_grid_lower_corners *[batch_shape,3]* """ if batch_shape is None: batch_shape = coords.shape[:-2] if dev_str is None: dev_str = _ivy.dev_str(coords) # shapes as list batch_shape = list(batch_shape) num_batch_dims = len(batch_shape) num_coords_per_batch = coords.shape[-2] # voxel shape spec as array if len(voxel_shape_spec) is 3: # BS x 1 x 3 voxel_shape_spec = _ivy.expand_dims( _ivy.tile( _ivy.reshape(_ivy.array(voxel_shape_spec), [1] * num_batch_dims + [3]), batch_shape + [1]), -2) # coord bounds spec as array if coord_bounds is not None and len(coord_bounds) is 6: # BS x 1 x 6 coord_bounds = _ivy.expand_dims( _ivy.tile( _ivy.reshape(_ivy.array(coord_bounds, dtype_str='float32'), [1] * num_batch_dims + [6]), batch_shape + [1]), -2) # BS x N x 3 coords = coords[..., 0:3] if coord_bounds is not None: # BS x 1 x_min = coord_bounds[..., 0:1] y_min = coord_bounds[..., 1:2] z_min = coord_bounds[..., 2:3] x_max = coord_bounds[..., 3:4] y_max = coord_bounds[..., 4:5] z_max = coord_bounds[..., 5:6] # BS x N x 1 x_coords = coords[..., 0:1] y_coords = coords[..., 1:2] z_coords = coords[..., 2:3] x_validity_mask = _ivy.logical_and(x_coords > x_min, x_coords < x_max) y_validity_mask = _ivy.logical_and(y_coords > y_min, y_coords < y_max) z_validity_mask = _ivy.logical_and(z_coords > z_min, z_coords < z_max) # BS x N full_validity_mask = _ivy.logical_and( _ivy.logical_and(x_validity_mask, y_validity_mask), z_validity_mask)[..., 0] # BS x 1 x 3 bb_mins = coord_bounds[..., 0:3] bb_maxs = coord_bounds[..., 3:6] bb_ranges = bb_maxs - bb_mins else: # BS x N full_validity_mask = _ivy.cast( _ivy.ones(batch_shape + [num_coords_per_batch]), 'bool') # BS x 1 x 3 bb_mins = _ivy.reduce_min(coords, axis=-2, keepdims=True) bb_maxs = _ivy.reduce_max(coords, axis=-2, keepdims=True) bb_ranges = bb_maxs - bb_mins # get voxel dimensions if mode is 'DIMS': # BS x 1 x 3 dims = _ivy.cast(voxel_shape_spec, 'int32') elif mode is 'RES': # BS x 1 x 3 res = _ivy.cast(voxel_shape_spec, 'float32') dims = _ivy.cast(_ivy.ceil(bb_ranges / (res + MIN_DENOMINATOR)), 'int32') else: raise Exception( 'Invalid mode selection. Must be either "DIMS" or "RES"') dims_m_one = _ivy.cast(dims - 1, 'int32') # BS x 1 x 3 res = bb_ranges / (_ivy.cast(dims, 'float32') + MIN_DENOMINATOR) # BS x NC x 3 voxel_indices = _ivy.minimum( _ivy.cast(_ivy.floor((coords - bb_mins) / (res + MIN_DENOMINATOR)), 'int32'), dims_m_one) # BS x NC x 3 voxel_values = ((coords - bb_mins) % res) / (res + MIN_DENOMINATOR) feature_size = 0 if features is not None: feature_size = features.shape[-1] voxel_values = _ivy.concatenate([voxel_values, features], axis=-1) # TNVC x len(BS)+1 valid_coord_indices = _ivy.cast(_ivy.indices_where(full_validity_mask), 'int32') # scalar total_num_valid_coords = valid_coord_indices.shape[0] # TNVC x 3 voxel_values_pruned_flat = _ivy.gather_nd(voxel_values, valid_coord_indices) voxel_indices_pruned_flat = _ivy.gather_nd(voxel_indices, valid_coord_indices) # TNVC x len(BS)+2 if num_batch_dims == 0: all_indices_pruned_flat = voxel_indices_pruned_flat else: batch_indices = valid_coord_indices[..., :-1] all_indices_pruned_flat = _ivy.concatenate( [batch_indices] + [voxel_indices_pruned_flat], -1) # TNVC x 4 voxel_values_pruned_flat =\ _ivy.concatenate((voxel_values_pruned_flat, _ivy.ones([total_num_valid_coords, 1], dev_str=dev_str)), -1) # get max dims list for scatter if num_batch_dims > 0: max_dims = _ivy.reduce_max(_ivy.reshape(dims, batch_shape + [3]), axis=list(range(num_batch_dims))) else: max_dims = _ivy.reshape(dims, batch_shape + [3]) batch_shape_array_list = [_ivy.array(batch_shape, 'int32') ] if num_batch_dims != 0 else [] total_dims_list = _ivy.to_list( _ivy.concatenate( batch_shape_array_list + [max_dims, _ivy.array([4 + feature_size], 'int32')], -1)) # BS x x_max x y_max x z_max x 4 scattered = _ivy.scatter_nd( all_indices_pruned_flat, voxel_values_pruned_flat, total_dims_list, reduction='replace' if _ivy.backend == 'mxnd' else 'sum') # BS x x_max x y_max x z_max x 4 + feature_size, BS x 3, BS x 3, BS x 3 return _ivy.concatenate( (scattered[..., :-1] / (_ivy.maximum(scattered[..., -1:], 1.) + MIN_DENOMINATOR), scattered[..., -1:]), -1), dims[..., 0, :], res[..., 0, :], bb_mins[..., 0, :]
def main(interactive=True, f=None): global INTERACTIVE INTERACTIVE = interactive # Framework Setup # # ----------------# # choose random framework f = choose_random_framework() if f is None else f set_framework(f) # Camera Geometry # # ----------------# # intrinsics # common intrinsic params img_dims = [512, 512] pp_offsets = ivy.array([dim / 2 - 0.5 for dim in img_dims], 'float32') cam_persp_angles = ivy.array([60 * np.pi / 180] * 2, 'float32') # ivy cam intrinsics container intrinsics = ivy_vision.persp_angles_and_pp_offsets_to_intrinsics_object( cam_persp_angles, pp_offsets, img_dims) # extrinsics # 3 x 4 cam1_inv_ext_mat = ivy.array(np.load(data_dir + '/cam1_inv_ext_mat.npy'), 'float32') cam2_inv_ext_mat = ivy.array(np.load(data_dir + '/cam2_inv_ext_mat.npy'), 'float32') # full geometry # ivy cam geometry container cam1_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object( cam1_inv_ext_mat, intrinsics) cam2_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object( cam2_inv_ext_mat, intrinsics) cam_geoms = [cam1_geom, cam2_geom] # Camera Geometry Check # # ----------------------# # assert camera geometry shapes for cam_geom in cam_geoms: assert cam_geom.intrinsics.focal_lengths.shape == (2, ) assert cam_geom.intrinsics.persp_angles.shape == (2, ) assert cam_geom.intrinsics.pp_offsets.shape == (2, ) assert cam_geom.intrinsics.calib_mats.shape == (3, 3) assert cam_geom.intrinsics.inv_calib_mats.shape == (3, 3) assert cam_geom.extrinsics.cam_centers.shape == (3, 1) assert cam_geom.extrinsics.Rs.shape == (3, 3) assert cam_geom.extrinsics.inv_Rs.shape == (3, 3) assert cam_geom.extrinsics.ext_mats_homo.shape == (4, 4) assert cam_geom.extrinsics.inv_ext_mats_homo.shape == (4, 4) assert cam_geom.full_mats_homo.shape == (4, 4) assert cam_geom.inv_full_mats_homo.shape == (4, 4) # Image Data # # -----------# # load images # h x w x 3 color1 = ivy.array( cv2.imread(data_dir + '/rgb1.png').astype(np.float32) / 255) color2 = ivy.array( cv2.imread(data_dir + '/rgb2.png').astype(np.float32) / 255) # h x w x 1 depth1 = ivy.array( np.reshape( np.frombuffer( cv2.imread(data_dir + '/depth1.png', -1).tobytes(), np.float32), img_dims + [1])) depth2 = ivy.array( np.reshape( np.frombuffer( cv2.imread(data_dir + '/depth2.png', -1).tobytes(), np.float32), img_dims + [1])) # depth scaled pixel coords # h x w x 3 u_pix_coords = ivy_vision.create_uniform_pixel_coords_image(img_dims) ds_pixel_coords1 = u_pix_coords * depth1 ds_pixel_coords2 = u_pix_coords * depth2 # depth limits depth_min = ivy.reduce_min(ivy.concatenate((depth1, depth2), 0)) depth_max = ivy.reduce_max(ivy.concatenate((depth1, depth2), 0)) depth_limits = [depth_min, depth_max] # show images show_rgb_and_depth_images(color1, color2, depth1, depth2, depth_limits) # Flow and Depth Triangulation # # -----------------------------# # required mat formats cam1to2_full_mat_homo = ivy.matmul(cam2_geom.full_mats_homo, cam1_geom.inv_full_mats_homo) cam1to2_full_mat = cam1to2_full_mat_homo[..., 0:3, :] full_mats_homo = ivy.concatenate( (ivy.expand_dims(cam1_geom.full_mats_homo, 0), ivy.expand_dims(cam2_geom.full_mats_homo, 0)), 0) full_mats = full_mats_homo[..., 0:3, :] # flow flow1to2 = ivy_vision.flow_from_depth_and_cam_mats(ds_pixel_coords1, cam1to2_full_mat) # depth again depth1_from_flow = ivy_vision.depth_from_flow_and_cam_mats( flow1to2, full_mats) # show images show_flow_and_depth_images(depth1, flow1to2, depth1_from_flow, depth_limits) # Inverse Warping # # ----------------# # inverse warp rendering warp = u_pix_coords[..., 0:2] + flow1to2 color2_warp_to_f1 = ivy.reshape(ivy.bilinear_resample(color2, warp), color1.shape) # projected depth scaled pixel coords 2 ds_pixel_coords1_wrt_f2 = ivy_vision.ds_pixel_to_ds_pixel_coords( ds_pixel_coords1, cam1to2_full_mat) # projected depth 2 depth1_wrt_f2 = ds_pixel_coords1_wrt_f2[..., -1:] # inverse warp depth depth2_warp_to_f1 = ivy.reshape(ivy.bilinear_resample(depth2, warp), depth1.shape) # depth validity depth_validity = ivy.abs(depth1_wrt_f2 - depth2_warp_to_f1) < 0.01 # inverse warp rendering with mask color2_warp_to_f1_masked = ivy.where(depth_validity, color2_warp_to_f1, ivy.zeros_like(color2_warp_to_f1)) # show images show_inverse_warped_images(depth1_wrt_f2, depth2_warp_to_f1, depth_validity, color1, color2_warp_to_f1, color2_warp_to_f1_masked, depth_limits) # Forward Warping # # ----------------# # forward warp rendering ds_pixel_coords1_proj = ivy_vision.ds_pixel_to_ds_pixel_coords( ds_pixel_coords2, ivy.inv(cam1to2_full_mat_homo)[..., 0:3, :]) depth1_proj = ds_pixel_coords1_proj[..., -1:] ds_pixel_coords1_proj = ds_pixel_coords1_proj[..., 0:2] / depth1_proj features_to_render = ivy.concatenate((depth1_proj, color2), -1) # without depth buffer f1_forward_warp_no_db, _, _ = ivy_vision.quantize_to_image( ivy.reshape(ds_pixel_coords1_proj, (-1, 2)), img_dims, ivy.reshape(features_to_render, (-1, 4)), ivy.zeros_like(features_to_render), with_db=False) # with depth buffer f1_forward_warp_w_db, _, _ = ivy_vision.quantize_to_image( ivy.reshape(ds_pixel_coords1_proj, (-1, 2)), img_dims, ivy.reshape(features_to_render, (-1, 4)), ivy.zeros_like(features_to_render), with_db=False if ivy.get_framework() == 'mxnd' else True) # show images show_forward_warped_images(depth1, color1, f1_forward_warp_no_db, f1_forward_warp_w_db, depth_limits) # message print('End of Run Through Demo!')
def rasterize_triangles(pixel_coords_triangles, image_dims, batch_shape=None, dev_str=None): """ Rasterize image-projected triangles based on: https://www.scratchapixel.com/lessons/3d-basic-rendering/rasterization-practical-implementation/rasterization-stage and: https://www.scratchapixel.com/lessons/3d-basic-rendering/rasterization-practical-implementation/rasterization-practical-implementation :param pixel_coords_triangles: Projected image-space triangles to be rasterized *[batch_shape,input_size,3,3]* :type pixel_coords_triangles: array :param image_dims: Image dimensions. :type image_dims: sequence of ints :param batch_shape: Shape of batch. Inferred from Inputs if None. :type batch_shape: sequence of ints, optional :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None. :type dev_str: str, optional :return: Rasterized triangles """ if batch_shape is None: batch_shape = [] if dev_str is None: dev_str = _ivy.dev_str(pixel_coords_triangles) # shapes as list batch_shape = list(batch_shape) num_batch_dims = len(batch_shape) image_dims = list(image_dims) input_image_dims = pixel_coords_triangles.shape[num_batch_dims:-2] input_image_dims_prod = _reduce(_mul, input_image_dims, 1) # BS x 3 x 2 pixel_xy_coords = pixel_coords_triangles[..., 0:2] # BS x 3 x 1 pixel_x_coords = pixel_coords_triangles[..., 0:1] pixel_y_coords = pixel_coords_triangles[..., 1:2] # 1 x_min = _ivy.reshape(_ivy.reduce_min(pixel_x_coords, keepdims=True), (-1,)) x_max = _ivy.reshape(_ivy.reduce_max(pixel_x_coords, keepdims=True), (-1,)) x_range = x_max - x_min y_min = _ivy.reshape(_ivy.reduce_min(pixel_y_coords, keepdims=True), (-1,)) y_max = _ivy.reshape(_ivy.reduce_max(pixel_y_coords, keepdims=True), (-1,)) y_range = y_max - y_min # 2 bbox = _ivy.concatenate((x_range, y_range), 0) img_bbox_list = [int(item) for item in _ivy.to_list(_ivy.concatenate((y_range + 1, x_range + 1), 0))] # BS x 2 v0 = pixel_xy_coords[..., 0, :] v1 = pixel_xy_coords[..., 1, :] v2 = pixel_xy_coords[..., 2, :] tri_centres = (v0 + v1 + v2) / 3 # BS x 1 v0x = v0[..., 0:1] v0y = v0[..., 1:2] v1x = v1[..., 0:1] v1y = v1[..., 1:2] v2x = v2[..., 0:1] v2y = v2[..., 1:2] # BS x BBX x BBY x 2 uniform_sample_coords = _ivy_svg.create_uniform_pixel_coords_image(img_bbox_list, batch_shape)[..., 0:2] P = _ivy.round(uniform_sample_coords + tri_centres - bbox / 2) # BS x BBX x BBY x 1 Px = P[..., 0:1] Py = P[..., 1:2] v0v1_edge_func = ((Px - v0x) * (v1y - v0y) - (Py - v0y) * (v1x - v0x)) >= 0 v1v2_edge_func = ((Px - v1x) * (v2y - v1y) - (Py - v1y) * (v2x - v1x)) >= 0 v2v0_edge_func = ((Px - v2x) * (v0y - v2y) - (Py - v2y) * (v0x - v2x)) >= 0 edge_func = _ivy.logical_and(_ivy.logical_and(v0v1_edge_func, v1v2_edge_func), v2v0_edge_func) batch_indices_list = list() for i, batch_dim in enumerate(batch_shape): # get batch shape batch_dims_before = batch_shape[:i] num_batch_dims_before = len(batch_dims_before) batch_dims_after = batch_shape[i + 1:] num_batch_dims_after = len(batch_dims_after) # [batch_dim] batch_indices = _ivy.arange(batch_dim, dtype_str='int32', dev_str=dev_str) # [1]*num_batch_dims_before x batch_dim x [1]*num_batch_dims_after x 1 x 1 reshaped_batch_indices = _ivy.reshape(batch_indices, [1] * num_batch_dims_before + [batch_dim] + [1] * num_batch_dims_after + [1, 1]) # BS x N x 1 tiled_batch_indices = _ivy.tile(reshaped_batch_indices, batch_dims_before + [1] + batch_dims_after + [input_image_dims_prod * 9, 1]) batch_indices_list.append(tiled_batch_indices) # BS x N x (num_batch_dims + 2) all_indices = _ivy.concatenate( batch_indices_list + [_ivy.cast(_ivy.flip(_ivy.reshape(P, batch_shape + [-1, 2]), -1), 'int32')], -1) # offset uniform images return _ivy.cast(_ivy.flip(_ivy.scatter_nd(_ivy.reshape(all_indices, [-1, num_batch_dims + 2]), _ivy.reshape(_ivy.cast(edge_func, 'int32'), (-1, 1)), batch_shape + image_dims + [1], reduction='replace' if _ivy.backend == 'mxnd' else 'sum'), -3), 'bool')